保存时间:2026/4/2 11:38:09
openai-python库的audio.transcriptions.create,一行代码import hmac
import hashlib
import time
import websocket
import json
# ========== 你自己改成你的真实信息 ==========
ACCESS_KEY = "你的火山AccessKey ID"
SECRET_KEY = "你的火山SecretKey"
APPKEY = "你的ASR应用Appkey"
RESOURCE_ID = "你的ASR资源ID 一般是: cn-beijing..."
# ==========================================
def generate_signature(secret_key, appkey, timestamp):
# 拼接签名字符串(火山规定顺序)
sign_str = f"appkey={appkey}×tamp={timestamp}"
# HMAC-SHA256
hmac_obj = hmac.new(
secret_key.encode("utf-8"),
sign_str.encode("utf-8"),
hashlib.sha256
)
return hmac_obj.hexdigest().lower()
def run_asr_websocket():
timestamp = str(int(time.time()))
signature = generate_signature(SECRET_KEY, APPKEY, timestamp)
# 拼接WebSocket URL(火山正式域名)
url = (
f"wss://openspeech.bytedance.com/api/v1/asr/stream"
f"?access_key={ACCESS_KEY}"
f"&appkey={APPKEY}"
f"&resource_id={RESOURCE_ID}"
f"×tamp={timestamp}"
f"&signature={signature}"
)
print("URL生成成功,准备连接...")
print("签名:", signature)
ws = websocket.create_connection(url, timeout=10)
print("连接成功!")
# 发开始配置(16k 单声道 中文)
start_msg = {
"type": "START",
"format": "pcm",
"sample_rate": 16000,
"channels": 1,
"language": "zh-CN"
}
ws.send(json.dumps(start_msg))
print("已发送START,可发音频流")
ws.close()
if __name__ == "__main__":
run_asr_websocket()
pip install websocket-client
ACCESS_KEYSECRET_KEYAPPKEYRESOURCE_IDSpeech_Recognition_Seed_streaming2020000663199850946)不是接口要求的 Resource ID,这就是你一直报错 “配置找不到” 的根本原因。Resource ID 格式一般是:cn-beijing_xxxxxx
RESOURCE_ID 替换成真正的资源 ID(不是实例 ID):# 错误写法(你现在用的)
# RESOURCE_ID = "Speech_Recognition_Seed_streaming2020000663199850946"
# 正确写法示例(格式参考,需替换成你自己的)
RESOURCE_ID = "cn-beijing_123456_asr_streaming"
wss://openspeech.bytedance.com/api/v3/asr/bigmodelwss://openspeech.bytedance.com/api/v3/asr/bigmodel_async| Key | 说明 | 对应你控制台的字段 |
|---|---|---|
X-Api-App-Key | APP ID | 你控制台里的 APP ID |
paragraph-pP9ZLC paragraph-element br-paragraph-space">签名逻辑变了:大模型版不再用 URL 参数签名,而是用 Header + 固定 Resource ID,签名逻辑由 SDK 或文档里的规则生成。
|