#ref. https://qiita.com/dauuricus/items/7da2e5f14c965da18106
diary:トルコ語についてレクを受けると、ウズベクスタンを源流として、地理位置的に横並びの三国と同系統の言語だということらしい。
アナトリアの辺縁として、外来語はフランス語からの影響が大きい。
ケバブを待ちながらトルコ人に、トルコ語は語順が日本語と同じって聞いたんですけど、同じですか?
とたずねたら、ロシア語やアラビア語の子音の例を出して話してくれたが、「ウラル、アルタイ語系だから、日本もそうだからそうでしょ?」と言われて、「あ、(ちがう)そう(いうことんはなっていない)だけど、アルタイ語系?ということに(トルコ語は)なっている?」と聞き返した。だからモンゴル語も近いか。
陸続きなら、割り切ってそうなんだろうけど、島でオリジンを主張することでアイデンティティーを誇示しているとこでは、明らかに同じ言葉であっても(日本語と韓国語のこと)、世間一般が違うということになってしまって、そこまでズバッと同じにして俯瞰できないのだった。
そこまで常識が違うかぁ、ということが印象的だった。
でも、それで google の自動翻訳のざっと言語をリージョンというか種類に分けたときに、日本語と韓国語は一緒ねという分け方になっているという、その当然感が理解できる。
google 2016 https://research.google/pubs/pub45733/
トルコ語がラテン語からの影響もある・・・ということを期待して質問していたが、無いが、フランス語を外来語として取り入れているウズベキスタンあたりからの trie の最後方だという図を描いて示してくれた。
京都大学近所のケバブ屋さんで Dr.ヤルチンが。
中央アジア(アルタイ)からの派生の言語だといわれている族だけれども、なんだかよくしらないトルコの歴史の経過とアナトリアの関係があって、アラビア語、フランス語などと(イランとかギリシャとか)の交流があり、外来語はフランス語からそのまま取り入れていることが多いらしい。ウズベクになると、それはロシアとの交流でロシア系の外来語になるらしい。ウズベクとトルコだとかなり近い関係で、50%くらいはお互い言っていることが解る、とウズベクから来た留学生とトルコから来た留学生が言っていた。
アングロサクソンがメインであるオープンソースのプロジェクトの場合より、ロシア、中国のメンテナのプロジェクトの方が、根柢の部分の冗長性があるかも。英語と日本語の関係は、たんに従属国という関係性においてメインに外国語=英語になっているだけで、実際遠い。
kaldi って名前がいい。( deepspeech とは関係ない。)
#googlecolab
googlecolab ipynb
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "zh-Hans_deepspeech.ipynb",
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
}
},
"cells": [
{
"cell_type": "code",
"metadata": {
"id": "LaQRtRPM4T8f"
},
"source": [
"!curl ipecho.net/plain"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "Dfewr3Yw1x17"
},
"source": [
"pip install youtube_transcript_api googletrans==4.0.0-rc1 h2==3.*"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "KHEv4cVx1mX3"
},
"source": [
"from youtube_transcript_api import YouTubeTranscriptApi\n",
"from google.colab import files\n",
"#import time\n",
"import sys\n",
"from urllib.parse import urlparse, parse_qs\n",
"\n",
"urltext ='https://www.youtube.com/watch?v=G2eXQrgq_n8'\n",
"args = [urltext]\n",
"video_id = ''\n",
"\n",
"print()\n",
"print()\n",
"\n",
"\n",
"def extract_video_id(url):\n",
" query = urlparse(url)\n",
" if query.hostname == 'youtu.be': return query.path[1:]\n",
" if query.hostname in {'www.youtube.com', 'youtube.com'}:\n",
" if query.path == '/watch': return parse_qs(query.query)['v'][0]\n",
" if query.path[:7] == '/embed/': return query.path.split('/')[2]\n",
" if query.path[:3] == '/v/': return query.path.split('/')[2]\n",
" # fail?\n",
" return None\n",
"\n",
"for url in args:\n",
" video_id = (extract_video_id(url))\n",
" print('youtube video_id:',video_id)\n",
"\n",
"line =[]\n",
"line[:] = YouTubeTranscriptApi.get_transcript(video_id,languages=['zh-Hans'])\n",
"\n",
"text_list = []\n",
"for l in line:\n",
" ##print(\"text: \", l['text'])\n",
" ##print(\"start:\", l['start'])\n",
" ##print(\"duration:\", l['duration'])\n",
"\n",
" l['text']=l['text'].strip()\n",
" l['text']=l['text'].rstrip('\\n')\n",
" l['text']=l['text'].rstrip('\\r')\n",
" l['text']=l['text'].replace('\\r','')\n",
" l['text']=l['text'].replace('\\n',' ')\n",
" text_list.append(l['text'])\n",
"\n",
"##text_list[:] = [a for a in text_list if a != ' ']\n",
"##text_list[:] = [l.replace('\\n',' ') for l in text_list]\n",
"##print(line) \n",
"del line\n",
"print(text_list)\n",
"\n",
"##original_stdout = sys.stdout ## stdout backup\n",
"filename = 'subtitle.txt' ## print subtitle text to this file\n",
"with open(filename, 'w') as f:\n",
" ##sys.stdout = f # stdout to file\n",
"\n",
" print('youtube video_id:',video_id)\n",
" print()\n",
" print(\"haywhnk-A.K.A-@dauuricus\")\n",
" print(\"- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\")\n",
" print(\"YouTube captions\")\n",
" print(\"- - - - - - - - - - - - - - - - - - - YouTube - - - - - - - - - - - - - - - - - - -\")\n",
" print()\n",
" print()\n",
" line = YouTubeTranscriptApi.list_transcripts(video_id) \n",
"\n",
" transcript = line.find_transcript(['zh-Hans'])\n",
" #print(transcript.fetch())\n",
"\n",
" caption_line =[]\n",
" for count, dict_obj in enumerate(transcript.fetch()):\n",
" ##print(count+1,' ', dict_obj['text'] )\n",
" caption_line.append(dict_obj['text'])\n",
" for count, l in enumerate(caption_line):\n",
" print(count+1,' ',l)\n",
"\n",
" print()\n",
" print()\n",
" print(\"************************************************************************************\")\n",
" print()\n",
" print(\"Youtube captions\")\n",
" print(\"- - - - - - - - - - - - - - - - - - translated - - - - - - - - - - - - - - - - - - -\")\n",
" print()\n",
" print()\n",
"\n",
" translated = transcript.translate('ja')\n",
" for count, dict_obj in enumerate(translated.fetch()):# japanese\n",
" print( count+1,' ', dict_obj['text'] )\n",
"\n",
"\n",
"## print()\n",
"## print(\"-----------------------------------------------------------------------------------\")\n",
"## print()\n",
"## print(\"captions text compositimg\")\n",
"## print(\"- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\")\n",
"## print()\n",
"## print()\n",
"\n",
" def line_edit2(textlines): ##2 lines to 1 line\n",
" text_compo = []\n",
" txt = ''\n",
" for count,l in enumerate(textlines):\n",
" if (count+1)%2 == 0:\n",
" txt = text_compo.pop()\n",
" txt += ' ' +l\n",
" text_compo.append(txt)\n",
" else :\n",
" txt = l\n",
" text_compo.append(txt)\n",
" return text_compo\n",
"\n",
" def line_edit3(textlines): ##3 lines to 1 line\n",
" text_compo = []\n",
" txt = ''\n",
" i = 0\n",
" for count,l in enumerate(textlines):\n",
" if i == 0:\n",
" txt += l\n",
" i = i + 1\n",
" text_compo.append(txt)\n",
" elif i == 1:\n",
" txt = text_compo.pop()\n",
" txt += ' ' + l\n",
" i = i + 1\n",
" text_compo.append(txt)\n",
" elif i == 2:\n",
" txt = text_compo.pop()\n",
" txt += ' ' + l\n",
" text_compo.append(txt)\n",
" txt = ''\n",
" i = 0\n",
" return text_compo\n",
"\n",
" def line_edit(textlines): ##2 lines to 1 line\n",
" text_compo = []\n",
" i = 0\n",
" txt = ''\n",
" for count,l in enumerate(textlines):\n",
" if i == 0:\n",
" txt += l\n",
" i = i + 1\n",
" text_compo.append(txt)\n",
" elif i == 1:\n",
" txt += ' ' +l\n",
" text_compo.pop()\n",
" text_compo.append(txt)\n",
" i = 0\n",
" txt = ''\n",
" return text_compo\n",
"\n",
" print()\n",
" print()\n",
" print(\"************************************************************************************\")\n",
" print()\n",
" print()\n",
" print()\n",
" print()\n",
" print(\"************************************************************************************\")\n",
" print(\"shrink text\")\n",
" print()\n",
"## for count, l in enumerate(text_e):\n",
"## print(count+1,l)\n",
" print()\n",
" print()\n",
"# text_compo = (line_edit2(text_list))\n",
"# for count, l in enumerate(text_compo):\n",
"# print(l)\n",
" print()\n",
" print()\n",
" text_compo = (line_edit3(text_list))\n",
" text_compo[:] = (line_edit3(text_compo))\n",
" #for count, l in enumerate(text_compo):\n",
" # print(l)\n",
" text_compo2 = (line_edit2(text_compo))\n",
" text_compo2[:] = (line_edit2(text_compo2))\n",
" for count, l in enumerate(text_compo2):\n",
" print(l)\n",
"\n",
" print()\n",
" print(\"************************************************************************************\")\n",
" print()\n",
" print()\n",
" print(\"************************************************************************************\")\n",
" print()\n",
" print(\"Thank you.\")\n",
"\n",
"\n",
"#sys.stdout = original_stdout # stdout back \n",
"\n",
"##files.download(filename)\n",
"\n",
"#import re\n",
"import h2.connection\n",
"import h2.config\n",
"from googletrans import Translator\n",
"#import sys\n",
"\n",
"#####uploaded = files.upload()\n",
"\n",
"####filename = ''\n",
"####for fn in uploaded.keys():\n",
"#### print('User uploaded file \"{name}\" with length {length} bytes'.format(\n",
"#### name=fn, length=len(uploaded[fn])))\n",
"#### filename = fn\n",
"\n",
"\n",
"#filename = 'subtitle.txt'\n",
"#args= sys.argv\n",
"##args = [('translate.py'),filename]\n",
"\n",
"##print('open '+args[1])\n",
"##with open(args[1]) as f: # uploaded file\n",
"## line = f.readlines() \n",
"\n",
"##line[:] = [l.strip() for l in line]\n",
"##line[:] = [l.rstrip('\\n') for l in line]\n",
"##line[:] = [a for a in line if a != '']\n",
"##line[:] = [l.replace('\\n',' ') for l in line]\n",
"##line[:] = [l.replace('\\r',' ') for l in line]\n",
"#print(line)\n",
"\n",
"#print()\n",
"\n",
"####for line_num,l in enumerate(line):\n",
"#### if re.search(r'.*?i'm$',l):\n",
"#### print(line_num,' ',l)\n",
"#### elif re.search(r'.*?to/Z',l):\n",
"#### print(line_num,' ',l)\n",
"#### if re.search(r'.*?the$',l):\n",
"#### print(line_num,' ',l)\n",
"#### elif re.search(r'.*?the/Z',l):\n",
"#### print(line_num,' ',l)\n",
"\n",
"\n",
"#for line_num,l in enumerate(line):\n",
"# print(line_num,' ',l)\n",
"\n",
"translator = Translator()\n",
"num = 20\n",
"#obj_num = 1\n",
"filename = 'translated.txt'\n",
"backup_stdout = sys.stdout\n",
"print(\"translating...\")\n",
"print()\n",
"\n",
"with open(filename,'w') as f:\n",
" #sys.stdout = f\n",
"\n",
" for count, l in enumerate(text_compo2):\n",
" if count +1< num:\n",
" translated = translator.translate(l, dest='ja')\n",
" ##print(count+1,' ', l) # original text\n",
" print(translated.text)\n",
" else:\n",
" translated = translator.translate(l, dest='ja')\n",
" ##print(count+1,' ', l) # original text\n",
" print(translated.text) \n",
" del translator\n",
" num = num + 20\n",
" #obj_num = obj_num + 1\n",
" #print(\"\")\n",
" #print(\"--- translator :\", obj_num)\n",
" #print(\"\")\n",
" translator = Translator() \n",
" #sys.stdout = backup_stdout # back\n",
"del translator\n",
"print(\"saving...\",filename)\n",
"\n",
"# files.download(filename) # translated.txt"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "GfISM6ahIXQS"
},
"source": [
"from google.colab import drive \n",
"drive.mount('/content/drive')"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "7tCkh__aIpAK"
},
"source": [
"!pip install youtube-dl "
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "1YwzQj5I9yJA"
},
"source": [
"!apt install ffmpeg"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "2OC_2-0ZI2Mj"
},
"source": [
"!youtube-dl --extract-audio --audio-format wav --output \"extract.%(ext)s\" {urltext}"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "wPzFui2Bq2Bi"
},
"source": [
"!rm t*.wav\n",
"!ffmpeg -i extract.wav -vn -acodec pcm_s16le -ac 1 -ar 16000 -f wav test.wav"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "MOOctmTk-tVM"
},
"source": [
"from IPython.display import YouTubeVideo"
],
"execution_count": 15,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "m9DneSeisAV3"
},
"source": [
"YouTubeVideo(video_id)"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "eqR3igA0s-lX"
},
"source": [
"import os\n",
"from os.path import exists\n",
"import wave\n",
"\n",
"!pip install -q deepspeech-gpu==0.9.3 youtube-dl\n",
"\n",
"if not exists('deepspeech-0.9.3-models.pbmm'):\n",
" #!wget https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.pbmm \n",
" !wget https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models-zh-CN.pbmm \n",
"if not exists('deepspeech-0.9.3-models.scorer'):\n",
" #!wget https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.scorer\n",
" !wget https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models-zh-CN.scorer \n",
" #!tar xvfz deepspeech-0.9.3-models.tar.gz"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "DsRCWUNgse1_"
},
"source": [
"#!deepspeech --model deepspeech-0.9.3-models.pbmm --scorer deepspeech-0.9.3-models.scorer --audio test.wav > test.txt\n",
"!deepspeech --model deepspeech-0.9.3-models-zh-CN.pbmm --scorer deepspeech-0.9.3-models-zh-CN.scorer --audio test.wav > test.txt"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "GkFV1OUIve62"
},
"source": [
"##from google.colab import files\n",
"#files.download('test.txt')\n",
"\n",
"with open('test.txt','r') as f:\n",
" line = f.readline()\n"
" print(line)\n"
],
"execution_count": null,
"outputs": []
}
]
}
##cell 1
!curl ipecho.net/plain
##cell 2
pip install youtube_transcript_api googletrans==4.0.0-rc1 h2==3.*
##cell 3
zh-Hans : Chinese (simplified)
from youtube_transcript_api import YouTubeTranscriptApi
from google.colab import files
#import time
import sys
from urllib.parse import urlparse, parse_qs
urltext='https://www.youtue.com/watch?v=G2eXQrgq_n8' #Chinese language
args = [urltext]
video_id = ''
print()
print()
def extract_video_id(url):
query = urlparse(url)
if query.hostname == 'youtu.be': return query.path[1:]
if query.hostname in {'www.youtube.com', 'youtube.com'}:
if query.path == '/watch': return parse_qs(query.query)['v'][0]
if query.path[:7] == '/embed/': return query.path.split('/')[2]
if query.path[:3] == '/v/': return query.path.split('/')[2]
# fail?
return None
for url in args:
video_id = (extract_video_id(url))
print('youtube video_id:',video_id)
line =[]
line[:] = YouTubeTranscriptApi.get_transcript(video_id,languages=['zh-Hans'])
text_list = []
for l in line:
##print("text: ", l['text'])
##print("start:", l['start'])
##print("duration:", l['duration'])
l['text']=l['text'].strip()
l['text']=l['text'].rstrip('\n')
l['text']=l['text'].rstrip('\r')
l['text']=l['text'].replace('\r','')
l['text']=l['text'].replace('\n',' ')
text_list.append(l['text'])
##text_list[:] = [a for a in text_list if a != ' ']
##text_list[:] = [l.replace('\n',' ') for l in text_list]
##print(line)
del line
print(text_list)
##original_stdout = sys.stdout ## stdout backup
filename = 'subtitle.txt' ## print subtitle text to this file
with open(filename, 'w') as f:
##sys.stdout = f # stdout to file
print('youtube video_id:',video_id)
print()
print("haywhnk-A.K.A-@dauuricus")
print("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -")
print("YouTube captions")
print("- - - - - - - - - - - - - - - - - - - YouTube - - - - - - - - - - - - - - - - - - -")
print()
print()
line = YouTubeTranscriptApi.list_transcripts(video_id)
transcript = line.find_transcript(['zh-Hans'])
#print(transcript.fetch())
caption_line =[]
for count, dict_obj in enumerate(transcript.fetch()):
##print(count+1,' ', dict_obj['text'] )
caption_line.append(dict_obj['text'])
for count, l in enumerate(caption_line):
print(count+1,' ',l)
print()
print()
print("************************************************************************************")
print()
print("Youtube captions")
print("- - - - - - - - - - - - - - - - - - translated - - - - - - - - - - - - - - - - - - -")
print()
print()
translated = transcript.translate('ja')
for count, dict_obj in enumerate(translated.fetch()):# japanese
print( count+1,' ', dict_obj['text'] )
## print()
## print("-----------------------------------------------------------------------------------")
## print()
## print("captions text compositimg")
## print("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -")
## print()
## print()
def line_edit2(textlines): ##2 lines to 1 line
text_compo = []
txt = ''
for count,l in enumerate(textlines):
if (count+1)%2 == 0:
txt = text_compo.pop()
txt += ' ' +l
text_compo.append(txt)
else :
txt = l
text_compo.append(txt)
return text_compo
def line_edit3(textlines): ##3 lines to 1 line
text_compo = []
txt = ''
i = 0
for count,l in enumerate(textlines):
if i == 0:
txt += l
i = i + 1
text_compo.append(txt)
elif i == 1:
txt = text_compo.pop()
txt += ' ' + l
i = i + 1
text_compo.append(txt)
elif i == 2:
txt = text_compo.pop()
txt += ' ' + l
text_compo.append(txt)
txt = ''
i = 0
return text_compo
def line_edit(textlines): ##2 lines to 1 line
text_compo = []
i = 0
txt = ''
for count,l in enumerate(textlines):
if i == 0:
txt += l
i = i + 1
text_compo.append(txt)
elif i == 1:
txt += ' ' +l
text_compo.pop()
text_compo.append(txt)
i = 0
txt = ''
return text_compo
print()
print()
print("************************************************************************************")
print()
print()
print()
print()
print("************************************************************************************")
print("shrink text")
print()
## for count, l in enumerate(text_e):
## print(count+1,l)
print()
print()
# text_compo = (line_edit2(text_list))
# for count, l in enumerate(text_compo):
# print(l)
print()
print()
text_compo = (line_edit3(text_list))
text_compo[:] = (line_edit3(text_compo))
#for count, l in enumerate(text_compo):
# print(l)
text_compo2 = (line_edit2(text_compo))
text_compo2[:] = (line_edit2(text_compo2))
for count, l in enumerate(text_compo2):
print(l)
print()
print("************************************************************************************")
print()
print()
print("************************************************************************************")
print()
print("Thank you.")
#sys.stdout = original_stdout # stdout back
##files.download(filename)
#import re
import h2.connection
import h2.config
from googletrans import Translator
#import sys
#####uploaded = files.upload()
####filename = ''
####for fn in uploaded.keys():
#### print('User uploaded file "{name}" with length {length} bytes'.format(
#### name=fn, length=len(uploaded[fn])))
#### filename = fn
#filename = 'subtitle.txt'
#args= sys.argv
##args = [('translate.py'),filename]
##print('open '+args[1])
##with open(args[1]) as f: # uploaded file
## line = f.readlines()
##line[:] = [l.strip() for l in line]
##line[:] = [l.rstrip('\n') for l in line]
##line[:] = [a for a in line if a != '']
##line[:] = [l.replace('\n',' ') for l in line]
##line[:] = [l.replace('\r',' ') for l in line]
#print(line)
#print()
####for line_num,l in enumerate(line):
#### if re.search(r'.*?i'm$',l):
#### print(line_num,' ',l)
#### elif re.search(r'.*?to/Z',l):
#### print(line_num,' ',l)
#### if re.search(r'.*?the$',l):
#### print(line_num,' ',l)
#### elif re.search(r'.*?the/Z',l):
#### print(line_num,' ',l)
#for line_num,l in enumerate(line):
# print(line_num,' ',l)
translator = Translator()
num = 20
#obj_num = 1
filename = 'translated.txt'
backup_stdout = sys.stdout
print("translating...")
print()
with open(filename,'w') as f:
#sys.stdout = f
for count, l in enumerate(text_compo2):
if count +1< num:
translated = translator.translate(l, dest='ja')
##print(count+1,' ', l) # original text
print(translated.text)
else:
translated = translator.translate(l, dest='ja')
##print(count+1,' ', l) # original text
print(translated.text)
del translator
num = num + 20
#obj_num = obj_num + 1
#print("")
#print("--- translator :", obj_num)
#print("")
translator = Translator()
#sys.stdout = backup_stdout # back
del translator
print("saving...",filename)
# files.download(filename) # translated.txt
##cell 4
!pip install youtube-dl
##cell 5
!apt install ffmpeg
##cell 6
!youtube-dl --extract-audio --audio-format wav --output "extract.%(ext)s" {urltext}
##cell 7
!rm t*.wav
!ffmpeg -i extract.wav -vn -acodec pcm_s16le -ac 1 -ar 16000 -f wav test.wav
##cell 8
from IPython.display import YouTubeVideo
##cell 9
YouTubeVideo(video_id)
##cell 10
import os
from os.path import exists
import wave
!pip install -q deepspeech-gpu==0.9.3 youtube-dl
if not exists('deepspeech-0.9.3-models.pbmm'):
#!wget https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.pbmm
!wget https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models-zh-CN.pbmm
if not exists('deepspeech-0.9.3-models.scorer'):
#!wget https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.scorer
!wget https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models-zh-CN.scorer
#!tar xvfz deepspeech-0.9.3-models.tar.gz
##cell 11
#!deepspeech --model deepspeech-0.9.3-models.pbmm --scorer deepspeech-0.9.3-models.scorer --audio test.wav > test.txt
!deepspeech --model deepspeech-0.9.3-models-zh-CN.pbmm --scorer deepspeech-0.9.3-models-zh-CN.scorer --audio test.wav > test.txt
##cell 12
##from google.colab import files
#files.download('test.txt')
with open('test.txt','r') as f:
line = f.readline()
print(line)