Created
April 2, 2025 11:20
-
-
Save reouno/0da974b673e56e17e43d12bef3eb608c to your computer and use it in GitHub Desktop.
whisper_api_sample.ipynb
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"nbformat": 4, | |
"nbformat_minor": 0, | |
"metadata": { | |
"colab": { | |
"provenance": [], | |
"authorship_tag": "ABX9TyOYwi1dF2rJyLYdi5deMgTG", | |
"include_colab_link": true | |
}, | |
"kernelspec": { | |
"name": "python3", | |
"display_name": "Python 3" | |
}, | |
"language_info": { | |
"name": "python" | |
} | |
}, | |
"cells": [ | |
{ | |
"cell_type": "markdown", | |
"metadata": { | |
"id": "view-in-github", | |
"colab_type": "text" | |
}, | |
"source": [ | |
"<a href=\"https://colab.research.google.com/gist/reouno/0da974b673e56e17e43d12bef3eb608c/whisper_api_sample.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"source": [ | |
"# OpenAI Whisper API\n", | |
"\n", | |
"ブラウザ経由で音声入力して、それをWhisperで文字起こしするサンプルコードです。\n", | |
"\n", | |
"This is a sample code for voice input via a browser and transcribing it with Whisper." | |
], | |
"metadata": { | |
"id": "ZWtcdeYw-QYM" | |
} | |
}, | |
{ | |
"cell_type": "code", | |
"source": [ | |
"# OpenAI API key\n", | |
"API_KEY = '<API_KEY>'" | |
], | |
"metadata": { | |
"id": "aULzDzp-OuFm" | |
}, | |
"execution_count": null, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": { | |
"id": "CtpbMF5F7MR4" | |
}, | |
"outputs": [], | |
"source": [ | |
"!pip install openai --quiet" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"source": [ | |
"from IPython.display import display, Javascript\n", | |
"from google.colab.output import eval_js\n", | |
"from base64 import b64decode\n", | |
"import openai\n", | |
"\n", | |
"def record_js(filename='record.webm'):\n", | |
" js = Javascript('''\n", | |
" async function record() {\n", | |
" let rec;\n", | |
" let chunks;\n", | |
"\n", | |
" // 録音用のHTML要素を作成\n", | |
" const div = document.createElement('div');\n", | |
" const startRecord = document.createElement('button');\n", | |
" startRecord.textContent = 'Rec';\n", | |
" div.appendChild(startRecord);\n", | |
"\n", | |
" const stopRecord = document.createElement('button');\n", | |
" stopRecord.textContent = 'Stop';\n", | |
" stopRecord.style.display = 'none';\n", | |
" div.appendChild(stopRecord);\n", | |
"\n", | |
" const audio = document.createElement('audio');\n", | |
" div.appendChild(audio);\n", | |
"\n", | |
" document.body.appendChild(div);\n", | |
"\n", | |
" // Audioの利用許可が得られたら実行されるハンドラ\n", | |
" function handlerFunction(stream, resolve) {\n", | |
" // MIMEタイプをaudio/webmに指定\n", | |
" rec = new MediaRecorder(stream, { mimeType: 'audio/webm' });\n", | |
" rec.ondataavailable = e => {\n", | |
" chunks.push(e.data);\n", | |
" if (rec.state == \"inactive\") {\n", | |
" let blob = new Blob(chunks, { type: 'audio/webm' });\n", | |
" audio.src = URL.createObjectURL(blob);\n", | |
" audio.controls = true;\n", | |
" audio.autoplay = true;\n", | |
" resolve();\n", | |
" }\n", | |
" }\n", | |
" }\n", | |
"\n", | |
" startRecord.onclick = e => {\n", | |
" startRecord.style.display = 'none';\n", | |
" stopRecord.style.display = 'block';\n", | |
" chunks = [];\n", | |
" rec.start();\n", | |
" }\n", | |
"\n", | |
" stopRecord.onclick = e => {\n", | |
" startRecord.style.display = 'block';\n", | |
" stopRecord.style.display = 'none';\n", | |
" rec.stop();\n", | |
" }\n", | |
"\n", | |
" function blobToBase64(blob) {\n", | |
" return new Promise((resolve, _) => {\n", | |
" const reader = new FileReader();\n", | |
" reader.onloadend = () => resolve(reader.result);\n", | |
" reader.readAsDataURL(blob);\n", | |
" });\n", | |
" }\n", | |
"\n", | |
" await new Promise((resolve) => {\n", | |
" navigator.mediaDevices.getUserMedia({ audio: true })\n", | |
" .then(stream => { handlerFunction(stream, resolve) });\n", | |
" });\n", | |
" let blob = new Blob(chunks, { type: 'audio/webm' });\n", | |
" return await blobToBase64(blob);\n", | |
" }\n", | |
" ''')\n", | |
" display(js)\n", | |
" data = eval_js('record()')\n", | |
" binary = b64decode(data.split(',')[1])\n", | |
" with open(filename, 'wb') as f:\n", | |
" f.write(binary)\n", | |
" return filename\n", | |
"\n", | |
"# ① 録音実行(ブラウザ上でRecボタン→Stopボタンで録音完了)\n", | |
"recorded_filename = record_js()\n", | |
"\n", | |
"# ② OpenAI Whisper APIで文字起こし\n", | |
"client = openai.OpenAI(api_key=API_KEY)\n", | |
"\n", | |
"with open(recorded_filename, \"rb\") as audio_file:\n", | |
" transcript = client.audio.transcriptions.create(\n", | |
" file=audio_file,\n", | |
" # model='whisper-1', # low accuracy\n", | |
" # model='gpt-4o-mini-transcribe', # middle accuracy\n", | |
" model=\"gpt-4o-transcribe\" # high accuracy\n", | |
" )\n", | |
"\n", | |
"print(\"【文字起こし結果】\")\n", | |
"print(transcript.text)\n", | |
"print(transcript.to_dict())" | |
], | |
"metadata": { | |
"colab": { | |
"base_uri": "https://localhost:8080/", | |
"height": 170 | |
}, | |
"id": "OHCq5LTXBual", | |
"outputId": "13fa5bb2-6dfd-480b-bd75-978fea7e7602" | |
}, | |
"execution_count": null, | |
"outputs": [ | |
{ | |
"output_type": "display_data", | |
"data": { | |
"text/plain": [ | |
"<IPython.core.display.Javascript object>" | |
], | |
"application/javascript": [ | |
"\n", | |
" async function record() {\n", | |
" let rec;\n", | |
" let chunks;\n", | |
" \n", | |
" // 録音用のHTML要素を作成\n", | |
" const div = document.createElement('div');\n", | |
" const startRecord = document.createElement('button');\n", | |
" startRecord.textContent = 'Rec';\n", | |
" div.appendChild(startRecord);\n", | |
" \n", | |
" const stopRecord = document.createElement('button');\n", | |
" stopRecord.textContent = 'Stop';\n", | |
" stopRecord.style.display = 'none';\n", | |
" div.appendChild(stopRecord);\n", | |
" \n", | |
" const audio = document.createElement('audio');\n", | |
" div.appendChild(audio);\n", | |
" \n", | |
" document.body.appendChild(div);\n", | |
" \n", | |
" // Audioの利用許可が得られたら実行されるハンドラ\n", | |
" function handlerFunction(stream, resolve) {\n", | |
" // MIMEタイプをaudio/webmに指定\n", | |
" rec = new MediaRecorder(stream, { mimeType: 'audio/webm' });\n", | |
" rec.ondataavailable = e => {\n", | |
" chunks.push(e.data);\n", | |
" if (rec.state == \"inactive\") {\n", | |
" let blob = new Blob(chunks, { type: 'audio/webm' });\n", | |
" audio.src = URL.createObjectURL(blob);\n", | |
" audio.controls = true;\n", | |
" audio.autoplay = true;\n", | |
" resolve();\n", | |
" }\n", | |
" }\n", | |
" }\n", | |
" \n", | |
" startRecord.onclick = e => {\n", | |
" startRecord.style.display = 'none';\n", | |
" stopRecord.style.display = 'block';\n", | |
" chunks = [];\n", | |
" rec.start();\n", | |
" }\n", | |
" \n", | |
" stopRecord.onclick = e => {\n", | |
" startRecord.style.display = 'block';\n", | |
" stopRecord.style.display = 'none';\n", | |
" rec.stop();\n", | |
" }\n", | |
" \n", | |
" function blobToBase64(blob) {\n", | |
" return new Promise((resolve, _) => {\n", | |
" const reader = new FileReader();\n", | |
" reader.onloadend = () => resolve(reader.result);\n", | |
" reader.readAsDataURL(blob);\n", | |
" });\n", | |
" }\n", | |
" \n", | |
" await new Promise((resolve) => {\n", | |
" navigator.mediaDevices.getUserMedia({ audio: true })\n", | |
" .then(stream => { handlerFunction(stream, resolve) });\n", | |
" });\n", | |
" let blob = new Blob(chunks, { type: 'audio/webm' });\n", | |
" return await blobToBase64(blob);\n", | |
" }\n", | |
" " | |
] | |
}, | |
"metadata": {} | |
}, | |
{ | |
"output_type": "stream", | |
"name": "stdout", | |
"text": [ | |
"【文字起こし結果】\n", | |
"本日、APIで新しい音声合成およびテキスト読み上げ音声モデルを発表します。これにより、真の価値を提供する、より強力でカスタマイズ可能なインテリジェント音声エージェントを構築することが可能になります。新しい音声テキスト変換モデル。Whisperのオリジナルモデルと比較して単語誤り率の改善、言語認識と精度の向上を実現した新しいGPT-4O TranscribeおよびGPT-4O Mini Transcribeモデルを導入します。\n", | |
"{'text': '本日、APIで新しい音声合成およびテキスト読み上げ音声モデルを発表します。これにより、真の価値を提供する、より強力でカスタマイズ可能なインテリジェント音声エージェントを構築することが可能になります。新しい音声テキスト変換モデル。Whisperのオリジナルモデルと比較して単語誤り率の改善、言語認識と精度の向上を実現した新しいGPT-4O TranscribeおよびGPT-4O Mini Transcribeモデルを導入します。'}\n" | |
] | |
} | |
] | |
} | |
] | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment