openai Realtime API (实时语音)

https://openai.com/index/introducing-the-realtime-api/

 

官方demo

https://github.com/openai/openai-realtime-console

官方demo使用到的插件

https://github.com/openai/openai-realtime-api-beta?tab=readme-ov-file

装包配置

修改yarn.lock 这个包是从github下载的

"@openai/realtime-api-beta@openai/openai-realtime-api-beta":
  version "0.0.0"
  resolved "https://codeload.github.com/openai/openai-realtime-api-beta/tar.gz/a5cb94824f625423858ebacb9f769226ca98945f"
  dependencies:
    ws "^8.18.0"

前端代码

import { RealtimeClient } from '@openai/realtime-api-beta'


 

nginx配置

RealtimeClient需要配置一个wss地址

wss和https使用相同的加密协议,不需要单独配置,直接配置一个转发就可以了

    # httpsserver {listen       443 ssl; server_name  chat.xutongbao.top;# 付费ssl_certificate         /temp/ssl/chat.xutongbao.top/chat.xutongbao.top_cert_chain.pem;   # nginx的ssl证书文件ssl_certificate_key     /temp/ssl/chat.xutongbao.top/chat.xutongbao.top_key.key;  # nginx的ssl证书验证密码# 免费# ssl_certificate         /temp/ssl/cersign/chat.xutongbao.top/chat.xutongbao.top.crt;   # nginx的ssl证书文件# ssl_certificate_key     /temp/ssl/cersign/chat.xutongbao.top/chat.xutongbao.top_rsa.key;  # nginx的ssl证书验证密码proxy_send_timeout 6000s;    # 设置发送超时时间,proxy_read_timeout 6000s;    # 设置读取超时时间。#配置根目录location / {root    /temp/yuying;index  index.html index.htm;add_header Content-Security-Policy upgrade-insecure-requests;}location /api/ {proxy_set_header X-Real-IP $remote_addr;proxy_set_header REMOTE-HOST $remote_addr;proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;proxy_set_header X-NginX-Proxy true;proxy_set_header Connection '';proxy_http_version 1.1;chunked_transfer_encoding off;proxy_buffering off;proxy_cache off;proxy_pass http://yuying-api.xutongbao.top;}location /socket.io/ {proxy_set_header X-Real-IP $remote_addr;proxy_set_header REMOTE-HOST $remote_addr;proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;proxy_set_header X-NginX-Proxy true;proxy_pass http://127.0.0.1:84;# 关键配置 startproxy_http_version 1.1;proxy_set_header Upgrade $http_upgrade;proxy_set_header Connection "upgrade";# 关键配置 end}location /ws {proxy_pass http://52.247.xxx.xxx:86/;proxy_read_timeout              500;proxy_set_header                Host    $http_host;proxy_set_header                X-Real-IP          $remote_addr;proxy_set_header                X-Forwarded-For $proxy_add_x_forwarded_for;proxy_http_version 1.1;# ws 协议专用头proxy_set_header                Upgrade $http_upgrade;proxy_set_header                Connection "Upgrade";}location /ws-test {proxy_pass http://52.247.xxx.xxx:92/;proxy_read_timeout              500;proxy_set_header                Host    $http_host;proxy_set_header                X-Real-IP          $remote_addr;proxy_set_header                X-Forwarded-For $proxy_add_x_forwarded_for;proxy_http_version 1.1;# ws 协议专用头proxy_set_header                Upgrade $http_upgrade;proxy_set_header                Connection "Upgrade";}# 匹配sslCnd开头的请求,实际转发的请求去掉多余的sslCnd这三个字母location ^~/sslCnd/ {proxy_set_header X-Real-IP $remote_addr;proxy_set_header REMOTE-HOST $remote_addr;proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;proxy_set_header X-NginX-Proxy true;proxy_pass http://cdn.xutongbao.top/;}           }   

建立连接时如何通过token确认用户身份

  let apiKeyValue = `${localStorage.getItem('token')}divide${localStorage.getItem('talkId')}`const clientRef = useRef(new RealtimeClient(LOCAL_RELAY_SERVER_URL? {url: LOCAL_RELAY_SERVER_URL,apiKey: apiKeyValue,dangerouslyAllowAPIKeyInBrowser: true,}: {apiKey: apiKey,dangerouslyAllowAPIKeyInBrowser: true,}))

前端完整代码

realtimePlus/pages/ConsolePage.js:

import { connect } from 'react-redux'
import { withRouter } from 'react-router-dom'
import { useEffect, useRef, useCallback, useState } from 'react'
import { RealtimeClient } from '@openai/realtime-api-beta'
import { WavRecorder, WavStreamPlayer } from '../lib/wavtools/index.js'
import { instructions } from '../utils/conversation_config.js'
import { WavRenderer } from '../utils/wav_renderer'
import { X, ArrowUp, ArrowDown } from 'react-feather'
import { Button, Dropdown, Input, Select } from 'antd'
import { SinglePageHeader, Icon } from '../../../../../../components/light'
import { isPC } from '../../../../../../utils/tools.js'
import { realTimeBaseURL } from '../../../../../../utils/config.js'
import { message as antdMessage } from 'antd'
import Api from '../../../../../../api/index.js'import './ConsolePage.css'
import './index.css'
const LOCAL_RELAY_SERVER_URL = realTimeBaseURL //'wss://chat.xutongbao.top/ws'const Option = Select.Option
let isPCFlag = isPC()
let isAddStart = false
let addIdHistory = []function Index() {//#region 配置const apiKey = LOCAL_RELAY_SERVER_URL? '': localStorage.getItem('tmp::voice_api_key') ||prompt('OpenAI API Key') ||''if (apiKey !== '') {localStorage.setItem('tmp::voice_api_key', apiKey)}const wavRecorderRef = useRef(new WavRecorder({ sampleRate: 24000 }))const wavStreamPlayerRef = useRef(new WavStreamPlayer({ sampleRate: 24000 }))let apiKeyValue = `${localStorage.getItem('token')}divide${localStorage.getItem('talkId')}`const clientRef = useRef(new RealtimeClient(LOCAL_RELAY_SERVER_URL? {url: LOCAL_RELAY_SERVER_URL,apiKey: apiKeyValue,dangerouslyAllowAPIKeyInBrowser: true,}: {apiKey: apiKey,dangerouslyAllowAPIKeyInBrowser: true,}))const clientCanvasRef = useRef(null)const serverCanvasRef = useRef(null)const eventsScrollHeightRef = useRef(0)const eventsScrollRef = useRef(null)const startTimeRef = useRef(new Date().toISOString())const [items, setItems] = useState([])const [realtimeEvents, setRealtimeEvents] = useState([])const [expandedEvents, setExpandedEvents] = useState({})const [isConnected, setIsConnected] = useState(false)const [canPushToTalk, setCanPushToTalk] = useState(true)const [isRecording, setIsRecording] = useState(false)const [message, setMessage] = useState('')const [messageType, setMessageType] = useState('none')//#endregionconst getItems = () => {const items = [{key: 'chrome',label: (<>{/* eslint-disable-next-line */}<ahref={`https://static.xutongbao.top/app/ChromeSetup.exe`}target="_blank">下载chrome浏览器(推荐)</a></>),icon: <Icon name="chrome" className="m-realtime-menu-icon"></Icon>,},]return items}//#region 基础const formatTime = useCallback((timestamp) => {const startTime = startTimeRef.currentconst t0 = new Date(startTime).valueOf()const t1 = new Date(timestamp).valueOf()const delta = t1 - t0const hs = Math.floor(delta / 10) % 100const s = Math.floor(delta / 1000) % 60const m = Math.floor(delta / 60_000) % 60const pad = (n) => {let s = n + ''while (s.length < 2) {s = '0' + s}return s}return `${pad(m)}:${pad(s)}.${pad(hs)}`}, [])const connectConversation = useCallback(async () => {const client = clientRef.currentconst wavRecorder = wavRecorderRef.currentconst wavStreamPlayer = wavStreamPlayerRef.currentstartTimeRef.current = new Date().toISOString()setIsConnected(true)setRealtimeEvents([])setItems(client.conversation.getItems())try {// Connect to microphoneawait wavRecorder.begin()} catch (error) {console.log(error)}// Connect to audio outputawait wavStreamPlayer.connect()// Connect to realtime APIawait client.connect()// let isAutoAsk = true// if (isAutoAsk) {// client.sendUserMessageContent([//   {//     type: `input_text`,//     text: `你好!`,//   },// ])if (client.getTurnDetectionType() === 'server_vad') {await wavRecorder.record((data) => client.appendInputAudio(data.mono))}}, [])const handleTest = () => {const client = clientRef.currentclient.sendUserMessageContent([{type: `input_text`,text: message,},])setMessage('')}const handleMessage = (event) => {setMessage(event.target.value)}/*** Disconnect and reset conversation state*/const disconnectConversation = useCallback(async () => {setIsConnected(false)setRealtimeEvents([])// setItems([])const client = clientRef.currentclient.disconnect()const wavRecorder = wavRecorderRef.currentawait wavRecorder.end()const wavStreamPlayer = wavStreamPlayerRef.currentawait wavStreamPlayer.interrupt()}, [])const deleteConversationItem = useCallback(async (id) => {const client = clientRef.currentclient.deleteItem(id)}, [])/*** In push-to-talk mode, start recording* .appendInputAudio() for each sample*/const startRecording = async () => {setIsRecording(true)const client = clientRef.currentconst wavRecorder = wavRecorderRef.currentconst wavStreamPlayer = wavStreamPlayerRef.currentconst trackSampleOffset = await wavStreamPlayer.interrupt()if (trackSampleOffset?.trackId) {const { trackId, offset } = trackSampleOffsetawait client.cancelResponse(trackId, offset)}try {await wavRecorder.record((data) => client.appendInputAudio(data.mono))} catch (error) {console.log(error)}}/*** In push-to-talk mode, stop recording*/const stopRecording = async () => {setIsRecording(false)const client = clientRef.currentconst wavRecorder = wavRecorderRef.currenttry {await wavRecorder.pause()} catch (error) {console.log(error)}try {client.createResponse()} catch (error) {console.log(error)}}/*** Switch between Manual <> VAD mode for communication*/const changeTurnEndType = async (messageType) => {setMessageType(messageType)let valueif (messageType === 'server_vad') {value = 'server_vad'} else if (messageType === 'none' || messageType === 'input') {value = 'none'}const client = clientRef.currentconst wavRecorder = wavRecorderRef.currentif (value === 'none' && wavRecorder.getStatus() === 'recording') {await wavRecorder.pause()}client.updateSession({turn_detection: value === 'none' ? null : { type: 'server_vad' },})if (value === 'server_vad' && client.isConnected()) {await wavRecorder.record((data) => client.appendInputAudio(data.mono))}setCanPushToTalk(messageType === 'none')}const handleSearch = () => {let params = {talkId: localStorage.getItem('talkId'),gptVersion: 'realtime',pageNum: 1,pageSize: 20,isGetNewest: true,}const client = clientRef.current// client.conversation.processEvent({//   type: 'conversation.item.created',//   event_id: 'item_ARaEpHPCznsNlBGN5DGFp',//   item: {//     id: 'item_ARaEpHPCznsNlBGN5DGFp',//     object: 'realtime.item',//     type: 'message',//     status: 'completed',//     role: 'user',//     content: [{ type: 'input_text', text: '你好' }],//     formatted: { audio: {}, text: '你好', transcript: '' },//   }// })// let items = client.conversation.getItems()// console.log('items', items)Api.h5.chatSearch(params).then((res) => {// let list = [//   {//     id: 'item_ARaEpHPCznsNlBGN5DGFp',//     object: 'realtime.item',//     type: 'message',//     status: 'completed',//     role: 'user',//     content: [{ type: 'input_text', text: '你好' }],//     formatted: { audio: {}, text: '你好', transcript: '' },//   },//   {//     id: 'item_ARaEpLuspCKg6raB95pFr',//     object: 'realtime.item',//     type: 'message',//     status: 'in_progress',//     role: 'assistant',//     content: [{ type: 'audio', transcript: '你好!' }],//     formatted: { audio: {}, text: '', transcript: '你好!' },//   },// ]if (res.code === 200) {let list = res.data.list.map((item) => {return {id: item.uid,object: 'realtime.item',type: 'message',status: 'completed',role: item.messageType === '1' ? 'user' : 'assistant',content: [{type: item.messageType === '1' ? 'input_text' : 'text',text: item.message,transcript: item.message,},],formatted: {audio: {},text: item.message,transcript: item.message,},}})setItems(list)list.forEach((item) => {client.conversation.processEvent({type: 'conversation.item.created',event_id: item.id,item: {...item,},})})let items = client.conversation.getItems()console.log('items', items)}})}//#endregion//#region  useEffect/*** Auto-scroll the event logs*/useEffect(() => {if (eventsScrollRef.current) {const eventsEl = eventsScrollRef.currentconst scrollHeight = eventsEl.scrollHeight// Only scroll if height has just changedif (scrollHeight !== eventsScrollHeightRef.current) {eventsEl.scrollTop = scrollHeighteventsScrollHeightRef.current = scrollHeight}}}, [realtimeEvents])/*** Auto-scroll the conversation logs*/useEffect(() => {const conversationEls = [].slice.call(document.body.querySelectorAll('[data-conversation-content]'))for (const el of conversationEls) {const conversationEl = elconversationEl.scrollTop = conversationEl.scrollHeight}}, [items])/*** Set up render loops for the visualization canvas*/useEffect(() => {let isLoaded = trueconst wavRecorder = wavRecorderRef.currentconst clientCanvas = clientCanvasRef.currentlet clientCtx = nullconst wavStreamPlayer = wavStreamPlayerRef.currentconst serverCanvas = serverCanvasRef.currentlet serverCtx = nullconst render = () => {if (isLoaded) {if (clientCanvas) {if (!clientCanvas.width || !clientCanvas.height) {clientCanvas.width = clientCanvas.offsetWidthclientCanvas.height = clientCanvas.offsetHeight}clientCtx = clientCtx || clientCanvas.getContext('2d')if (clientCtx) {clientCtx.clearRect(0, 0, clientCanvas.width, clientCanvas.height)const result = wavRecorder.recording? wavRecorder.getFrequencies('voice'): { values: new Float32Array([0]) }WavRenderer.drawBars(clientCanvas,clientCtx,result.values,'#0099ff',10,0,8)}}if (serverCanvas) {if (!serverCanvas.width || !serverCanvas.height) {serverCanvas.width = serverCanvas.offsetWidthserverCanvas.height = serverCanvas.offsetHeight}serverCtx = serverCtx || serverCanvas.getContext('2d')if (serverCtx) {serverCtx.clearRect(0, 0, serverCanvas.width, serverCanvas.height)const result = wavStreamPlayer.analyser? wavStreamPlayer.getFrequencies('voice'): { values: new Float32Array([0]) }WavRenderer.drawBars(serverCanvas,serverCtx,result.values,'#009900',10,0,8)}}window.requestAnimationFrame(render)}}render()return () => {isLoaded = false}}, [])/*** Core RealtimeClient and audio capture setup* Set all of our instructions, tools, events and more*/useEffect(() => {// Get refsconst wavStreamPlayer = wavStreamPlayerRef.currentconst client = clientRef.current// Set instructionsclient.updateSession({ instructions: instructions })// Set transcription, otherwise we don't get user transcriptions backclient.updateSession({ input_audio_transcription: { model: 'whisper-1' } })// handle realtime events from client + server for event loggingclient.on('realtime.event', (realtimeEvent) => {if (realtimeEvent.event.code === 400) {antdMessage.warning(realtimeEvent.event.message)disconnectConversation()return}setRealtimeEvents((realtimeEvents) => {const lastEvent = realtimeEvents[realtimeEvents.length - 1]if (lastEvent?.event.type === realtimeEvent.event.type) {// if we receive multiple events in a row, aggregate them for display purposeslastEvent.count = (lastEvent.count || 0) + 1return realtimeEvents.slice(0, -1).concat(lastEvent)} else {return realtimeEvents.concat(realtimeEvent)}})})client.on('error', (event) => console.error(event))client.on('conversation.interrupted', async () => {const trackSampleOffset = await wavStreamPlayer.interrupt()if (trackSampleOffset?.trackId) {const { trackId, offset } = trackSampleOffsetawait client.cancelResponse(trackId, offset)}})client.on('conversation.updated', async ({ item, delta }) => {const items = client.conversation.getItems()if (delta?.audio) {wavStreamPlayer.add16BitPCM(delta.audio, item.id)}if (item.status === 'completed' && item.formatted.audio?.length) {const wavFile = await WavRecorder.decode(item.formatted.audio,24000,24000)item.formatted.file = wavFile}setItems(items)isAddStart = true})setItems(client.conversation.getItems())handleSearch()return () => {// cleanup; resets to defaultsclient.reset()}// eslint-disable-next-line}, [])useEffect(() => {if (Array.isArray(items) && items.length > 0) {let lastItem = items[items.length - 1]let addIdHistoryIndex = addIdHistory.findIndex((item) => item === lastItem.id)if (lastItem?.status === 'completed' &&lastItem?.role === 'assistant' &&isAddStart === true &&addIdHistoryIndex < 0) {addIdHistory.push(lastItem.id)let message = items[items.length - 2].formatted.transcript? items[items.length - 2].formatted.transcript: items[items.length - 2].formatted.textlet robotMessage = lastItem.formatted.transcriptApi.h5.chatRealTimeAdd({talkId: localStorage.getItem('talkId'),name: localStorage.getItem('nickname'),message,robotMessage,}).then((res) => {if (res.code === 40006) {antdMessage.warning(res.message)disconnectConversation()}})}}// eslint-disable-next-line}, [items, isAddStart])//#endregionreturn (<div className="m-realtime-wrap-box"><div className={`m-realtime-wrap-chat`}><SinglePageHeadergoBackPath="/ai/index/home/chatList"title="Realtime"></SinglePageHeader><div className="m-realtime-list" id="scrollableDiv">{window.platform === 'rn' ? null : (<Dropdownmenu={{ items: getItems() }}className="m-realtime-dropdown"trigger={['click', 'hover']}><Icon name="more" className="m-realtime-menu-btn"></Icon></Dropdown>)}<div data-component="ConsolePage"><div className="content-main"><div className="content-logs"><div className="content-block events"><div className="visualization"><div className="visualization-entry client"><canvas ref={clientCanvasRef} /></div><div className="visualization-entry server"><canvas ref={serverCanvasRef} /></div></div><div className="content-block-body" ref={eventsScrollRef}>{!realtimeEvents.length && `等待连接...`}{realtimeEvents.map((realtimeEvent, i) => {const count = realtimeEvent.countconst event = { ...realtimeEvent.event }if (event.type === 'input_audio_buffer.append') {event.audio = `[trimmed: ${event.audio.length} bytes]`} else if (event.type === 'response.audio.delta') {event.delta = `[trimmed: ${event.delta.length} bytes]`}return (<div className="event" key={event.event_id}><div className="event-timestamp">{formatTime(realtimeEvent.time)}</div><div className="event-details"><divclassName="event-summary"onClick={() => {// toggle event detailsconst id = event.event_idconst expanded = { ...expandedEvents }if (expanded[id]) {delete expanded[id]} else {expanded[id] = true}setExpandedEvents(expanded)}}><divclassName={`event-source ${event.type === 'error'? 'error': realtimeEvent.source}`}>{realtimeEvent.source === 'client' ? (<ArrowUp />) : (<ArrowDown />)}<span>{event.type === 'error'? 'error!': realtimeEvent.source}</span></div><div className="event-type">{event.type}{count && ` (${count})`}</div></div>{!!expandedEvents[event.event_id] && (<div className="event-payload">{JSON.stringify(event, null, 2)}</div>)}</div></div>)})}</div></div><div className="content-block conversation"><div className="content-block-body" data-conversation-content>{!items.length && `等待连接...`}{items.map((conversationItem, i) => {return (<divclassName="conversation-item"key={conversationItem.id}><divclassName={`speaker ${conversationItem.role || ''}`}><div>{(conversationItem.role || conversationItem.type).replaceAll('_', ' ')}</div><divclassName="close"onClick={() =>deleteConversationItem(conversationItem.id)}><X /></div></div><div className={`speaker-content`}>{/* tool response */}{conversationItem.type ==='function_call_output' && (<div>{conversationItem.formatted.output}</div>)}{/* tool call */}{!!conversationItem.formatted.tool && (<div>{conversationItem.formatted.tool.name}({conversationItem.formatted.tool.arguments})</div>)}{!conversationItem.formatted.tool &&conversationItem.role === 'user' && (<div className="m-realtime-message">{conversationItem.formatted.transcript ||(conversationItem.formatted.audio?.length? '(awaiting transcript)': conversationItem.formatted.text ||'(item sent)')}</div>)}{!conversationItem.formatted.tool &&conversationItem.role === 'assistant' && (<div className="m-realtime-message">{conversationItem.formatted.transcript ||conversationItem.formatted.text ||'(truncated)'}</div>)}{conversationItem.formatted.file && (<audiosrc={conversationItem.formatted.file.url}controls/>)}</div></div>)})}</div></div><div className="content-actions"><Selectvalue={messageType}onChange={(value) => changeTurnEndType(value)}placeholder="请选择"><Option value="none">手动</Option><Option value="server_vad">自动</Option><Option value="input">打字</Option></Select><div className="spacer" />{isConnected && canPushToTalk && (<>{isPCFlag ? (<Buttontype="primary"label={isRecording ? 'release to send' : 'push to talk'}disabled={!isConnected || !canPushToTalk}onMouseDown={startRecording}onMouseUp={stopRecording}className={`m-realtime-recorad-btn ${isRecording ? 'active' : ''}`}>{isRecording ? '松开发送' : '按住说话'}</Button>) : (<Buttontype="primary"label={isRecording ? 'release to send' : 'push to talk'}disabled={!isConnected || !canPushToTalk}onTouchStart={startRecording}onTouchEnd={stopRecording}className={`m-realtime-recorad-btn ${isRecording ? 'active' : ''}`}>{isRecording ? '松开发送' : '按住说话'}</Button>)}</>)}{isConnected && messageType === 'input' ? (<div className="m-realtime-input-wrap"><Input.TextAreavalue={message}onChange={(event) => handleMessage(event)}placeholder="请输入"></Input.TextArea><Buttontype="primary"onClick={() => handleTest()}className="m-realtime-send-btn">发送</Button></div>) : null}<div className="spacer" /><Buttontype="primary"danger={isConnected ? true : false}onClick={isConnected ? disconnectConversation : connectConversation}>{isConnected ? '已连接' : '连接'}</Button></div></div></div></div></div></div></div>)
}const mapStateToProps = (state) => {return {collapsed: state.getIn(['light', 'collapsed']),isRNGotToken: state.getIn(['light', 'isRNGotToken']),}
}const mapDispatchToProps = (dispatch) => {return {onSetState(key, value) {dispatch({ type: 'SET_LIGHT_STATE', key, value })},onDispatch(action) {dispatch(action)},}
}export default connect(mapStateToProps, mapDispatchToProps)(withRouter(Index))

后端通过请求头获取token

  async handleUserAuth(req) {let index = req.rawHeaders.findIndex((item) =>item.includes('realtime, openai-insecure-api-key.'))let infoValue = ''if (index >= 0) {infoValue = req.rawHeaders[index]}infoValue = infoValue.replace('realtime, openai-insecure-api-key.', '')infoValue = infoValue.replace(', openai-beta.realtime-v1', '')let infoValueArr = infoValue.split('divide')let realTimeAuthRes = await axios.post(`${baseURL}/api/light/chat/realTimeAuth`,{token: infoValueArr[0],talkId: infoValueArr[1],apiKey,})return realTimeAuthRes}

后端完整代码

relay.js:

const { WebSocketServer } = require('ws')
const axios = require('axios')let baseURL = process.env.aliIPAddressWithPort
let apiKey = process.env.apiKeyOnServerclass RealtimeRelay {constructor(apiKey) {this.apiKey = apiKeythis.sockets = new WeakMap()this.wss = null}listen(port) {this.wss = new WebSocketServer({ port })this.wss.on('connection', this.connectionHandler.bind(this))this.log(`Listening on ws://localhost:${port}`)}async handleUserAuth(req) {let index = req.rawHeaders.findIndex((item) =>item.includes('realtime, openai-insecure-api-key.'))let infoValue = ''if (index >= 0) {infoValue = req.rawHeaders[index]}infoValue = infoValue.replace('realtime, openai-insecure-api-key.', '')infoValue = infoValue.replace(', openai-beta.realtime-v1', '')let infoValueArr = infoValue.split('divide')let realTimeAuthRes = await axios.post(`${baseURL}/api/light/chat/realTimeAuth`,{token: infoValueArr[0],talkId: infoValueArr[1],apiKey,})return realTimeAuthRes}async connectionHandler(ws, req) {if (global.isAzure) {let realTimeAuthRes = await this.handleUserAuth(req)if (realTimeAuthRes.data.code === 200) {let Realtime = await import('@openai/realtime-api-beta')const { RealtimeClient } = Realtimeif (!req.url) {this.log('No URL provided, closing connection.')ws.close()return}const url = new URL(req.url, `http://${req.headers.host}`)const pathname = url.pathnameif (pathname !== '/') {this.log(`Invalid pathname: "${pathname}"`)ws.close()return}// Instantiate new clientthis.log(`Connecting with key "${this.apiKey.slice(0, 3)}..."`)const client = new RealtimeClient({ apiKey: this.apiKey })// Relay: OpenAI Realtime API Event -> Browser Eventclient.realtime.on('server.*', (event) => {this.log(`Relaying "${event.type}" to Client`)ws.send(JSON.stringify(event))})client.realtime.on('close', () => ws.close())// Relay: Browser Event -> OpenAI Realtime API Event// We need to queue data waiting for the OpenAI connectionconst messageQueue = []const messageHandler = (data) => {try {const event = JSON.parse(data)this.log(`Relaying "${event.type}" to OpenAI`)client.realtime.send(event.type, event)} catch (e) {console.error(e.message)this.log(`Error parsing event from client: ${data}`)}}ws.on('message', (data) => {if (!client.isConnected()) {messageQueue.push(data)} else {messageHandler(data)}})ws.on('close', () => client.disconnect())// Connect to OpenAI Realtime APItry {this.log(`Connecting to OpenAI...`)await client.connect()} catch (e) {this.log(`Error connecting to OpenAI: ${e.message}`)ws.close()return}this.log(`Connected to OpenAI successfully!`)while (messageQueue.length) {messageHandler(messageQueue.shift())}} else {ws.send(JSON.stringify({...realTimeAuthRes.data,}))}}}// eslint-disable-next-linelog(...args) {// console.log(`[RealtimeRelay]`, ...args)}
}module.exports = {RealtimeRelay,
}

调用上面的代码:

  const relay = new RealtimeRelay(process.env.openaiToken)relay.listen(PORT)

人工智能学习网站

https://chat.xutongbao.top

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.rhkb.cn/news/469290.html

如若内容造成侵权/违法违规/事实不符,请联系长河编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

杨辉三角-一维数组与二维数组解法

这种问题是很有规律的 这里 总结一下 这类问题输出&#xff1a;对称 且数据相同的很多 就比如首位都是1 如果计算中间值遇到困难 可以试着把边界值单独输出 一维数组 // // Created by 徐昌真 on 2024/11/11. // #include <stdio.h> //一维数组 int main() {int n; /…

无人机反制技术与方法:主动防御,被动防御技术原理详解

无人机反制技术与方法主要分为主动防御和被动防御两大类&#xff0c;以下是关于这两类防御技术的原理详解&#xff1a; 主动防御技术原理 主动防御系统旨在通过直接干扰或摧毁来攻击入侵的无人机。这类系统通常包括电子干扰、激光武器、定向能武器以及硬杀伤手段&#xff08;如…

计算机毕业设计Python+图神经网络考研院校推荐系统 考研分数线预测 考研推荐系统 考研爬虫 考研大数据 Hadoop 大数据毕设 机器学习 深度学习

温馨提示&#xff1a;文末有 CSDN 平台官方提供的学长联系方式的名片&#xff01; 温馨提示&#xff1a;文末有 CSDN 平台官方提供的学长联系方式的名片&#xff01; 温馨提示&#xff1a;文末有 CSDN 平台官方提供的学长联系方式的名片&#xff01; 作者简介&#xff1a;Java领…

小白初入Android_studio所遇到的坑以及怎么解决

1. 安装Android_studio 参考&#xff1a;Android Studio 安装配置教程 - Windows(详细版)-CSDN博客 Android Studio超级详细讲解下载、安装配置教程&#xff08;建议收藏&#xff09;_androidstudio-CSDN博客 想下旧版本的android_studio的地址&#xff08;仅供参考&#xf…

020_Servlet_Mysql学生选课系统(新版)_lwplus87

摘 要 随着在校大学生人数的不断增加&#xff0c;教务系统的数据量也不断的上涨。针对学生选课这一环节&#xff0c;本系统从学生网上自主选课以及课程发布两个大方面进行了设计&#xff0c;基本实现了学生的在线信息查询、选课功能以及教师对课程信息发布的管理等功能&…

Vue Cli 脚手架目录文件介绍

小试牛刀 //vetur高亮; vuetab 快速生成 <template><div class"box">我是个盒子<button click"fn">按钮</button></div> </template><script> export default {methods:{fn(){alert("Hello Vue")}} …

[安洵杯 2019]easy_web 详细题解

知识点: 编码转换 命令执行 linux空格_关键字绕过 打开页面 发现url 是 /index.php?imgTXpVek5UTTFNbVUzTURabE5qYz0&cmd 有img参数和cmd参数 cmd参数是没赋值的,随便赋值为123456 页面没有反应 鼠标移动到图片下面时发现有东西,当然直接查看页面源代码也可以发现 尝…

完整培训教程:骨折图像分割

骨折图像分割系统源码&#xff06;数据集分享 [yolov8-seg-efficientViT&#xff06;yolov8-seg-C2f-CloAtt等50全套改进创新点发刊_一键训练教程_Web前端展示] 1.研究背景与意义 项目参考ILSVRC ImageNet Large Scale Visual Recognition Challenge 项目来源AAAI Global A…

文本语义分块、RAG 系统的分块难题:小型语言模型如何找到最佳断点

文本语义分块、RAG 系统的分块难题&#xff1a;小型语言模型如何找到最佳断点&#xff1f; 转自jina最新的关于文本语义分块的分享和模型 之前我们聊过RAG 里文档分块 (Chunking) 的挑战&#xff0c;也介绍了 迟分 (Late Chunking) 的概念&#xff0c;它可以在向量化的时候减…

物联网技术及其在智慧城市中的应用

&#x1f493; 博客主页&#xff1a;瑕疵的CSDN主页 &#x1f4dd; Gitee主页&#xff1a;瑕疵的gitee主页 ⏩ 文章专栏&#xff1a;《热点资讯》 物联网技术及其在智慧城市中的应用 物联网技术及其在智慧城市中的应用 物联网技术及其在智慧城市中的应用 引言 物联网概述 定义…

新的服务器Centos7.6 安卓基础的环境配置(新服务器可直接粘贴使用配置)

常见的基础服务器配置之Centos命令 正常来说都是安装一个docker基本上很多问题都可以解决了&#xff0c;我基本上都是通过docker去管理一些容器如&#xff1a;mysql、redis、mongoDB等之类的镜像&#xff0c;还有一些中间件如kafka。下面就安装一个 docker 和 nginx 的相关配置…

金属箔电阻

6.金属箔电阻如何实现“高精度” 电阻的阻值会受到各种“应力”影响而发生改变&#xff0c;离开稳定性的高精度是没有意义的。 例如&#xff0c;电阻出厂时的精度时0.01%&#xff0c;为了实现精度付出了高昂的费用&#xff0c;但在几个月的存储或几百个小时的负载后阻值的变化…

在Django中安装、配置、使用CKEditor5,并将CKEditor5录入的文章展现出来,实现一个简单博客网站的功能

在Django中可以使用CKEditor4和CKEditor5两个版本&#xff0c;分别对应软件包django-ckeditor和django-ckeditor-5。原来使用的是CKEditor4&#xff0c;python manager.py makemigrations时总是提示CKEditor4有安全风险&#xff0c;建议升级到CKEditor5。故卸载了CKEditor4&…

C语言 | Leetcode C语言题解之第559题N叉树的最大深度

题目&#xff1a; 题解&#xff1a; /*** Definition for a Node.* struct Node {* int val;* int numChildren;* struct Node** children;* };*/int maxDepth(struct Node* root) {if (!root) {return 0;}int depth 0;// 创建空队列const int qCap 10e4 1;str…

SQLI LABS | Less-40 GET-BLIND Based-String-Stacked

关注这个靶场的其它相关笔记&#xff1a;SQLI LABS —— 靶场笔记合集-CSDN博客 0x01&#xff1a;过关流程 输入下面的链接进入靶场&#xff08;如果你的地址和我不一样&#xff0c;按照你本地的环境来&#xff09;&#xff1a; http://localhost/sqli-labs/Less-40/ 都 Less-…

turtlesim修改窗口大小;添加自己的小乌龟;

目前手边有humble版本ROS。以此为教程。其他版本以此类推 github中搜索ros&#xff0c;然后选择ros官网&#xff08;九点方阵那个图标&#xff09;。然后 在branch中&#xff0c;选择humble&#xff0c;然后复制链接。 git clone https://github.com/ros/ros_tutorials.git -…

OSG开发笔记(三十一):OSG中LOD层次细节模型介绍和使用

​若该文为原创文章&#xff0c;未经允许不得转载 本文章博客地址&#xff1a;https://blog.csdn.net/qq21497936/article/details/143697554 各位读者&#xff0c;知识无穷而人力有穷&#xff0c;要么改需求&#xff0c;要么找专业人士&#xff0c;要么自己研究 长沙红胖子Qt…

VMWare虚拟机NAT模式下与外部主机(非宿主机)通信

VMWare虚拟机NAT模式下与外部主机(非宿主机)通信 1. VMWare虚拟机网络 VMWare的三种网络工作模式&#xff1a; Bridged&#xff1a;桥接模式NAT&#xff1a;网络地址转换模式Host-Only &#xff1a;仅主机模式 VMWare 网络连接配置界面如下&#xff1a; 在本次测试环境中&a…

IDEA连接不同种类数据库

首先添加驱动 到了添加页面后&#xff0c;引入驱动jar包 添加URL样版&#xff08;我这来添加的是瀚高数据库&#xff0c;Key-Value&#xff09;也可以看上图中URL Templates Key&#xff1a;default Value&#xff1a;jdbc:highgo://{host::localhost}?[:{port::5866}][/{data…

测试实项中的偶必现难测bug--<pre>标签问题

问题描述: 用户从网上copy的简介信息可能带有<pre>标签,导致安卓上的内容只能一行滑动展示,但是ios有对这个标签做特殊处理: 分析: <pre> 标签是 HTML 中用于表示预格式化文本的标签,它的作用是保留文本中的空格、换行和缩进。它的全称是 preformatted text…