一、什么是智谱AI
智谱AI成立于2019年,由清华大学计算机系知识工程实验室的技术成果转化而来,是一家致力于人工智能技术研发和应用的公司。智谱致力于打造新一代认知智能大模型,专注于做大模型的中国创新。
二、智谱开放平台API调用
官方文档
https://open.bigmodel.cn/dev/api#http_para
创建应用
https://open.bigmodel.cn/usercenter/apikeys
Nodejs Http调用示例
cnpm i request --save
const request = require('request')
async function main() {
let url = "https://open.bigmodel.cn/api/paas/v4/chat/completions"
let body = {
"model": "glm-4", // 模型选择
"temperature": 0.9, //核采样阈值,用于决定结果随机性,取值越高随机性越强,即相同
的问题得到的不同答案的可能性越高。取值范围 (0,1)
"top-k": 4, //平衡生成文本的质量和多样性,较小的 k 值会减少随机性,使得输
出更加稳定;而较大的 k 值会增加随机性,产生更多新颖的输出。取值范围[1, 6],默认为4
"max_tokens": 1000, //模型回答的tokens的最大长度
"messages": [
{
"role": "system", //用于设置对话背景,角色设定
"content": "你是一个聪明且富有创造力的小说作家"
},
{
"role": "user",
"content": "你是谁"
}
],
}
header = {
"Authorization": "Bearer
8e67b3159ae070a71d71fe97fceb733b.Bf7fRh7Yy7uWxdDp", // 注意此处替换自己的key和
secret
'Content-Type': 'application/json'
}
var options = {
'method': 'POST',
'url': url,
'headers': header,
body: JSON.stringify(body)
};
request(options, function (error, response) {
if (error) throw new Error(error);
console.log(response.body);
});
}
main();
Golang Http调用示例
package main
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
)
func main() {
url := "https://open.bigmodel.cn/api/paas/v4/chat/completions"
body := map[string]interface{}{
"model": "glm-4",
"temperature": 0.9,
"top-k": 4,
"max_tokens": 100,
"messages": []map[string]interface{}{
{
"role": "system",
"content": "你是一个聪明且富有创造力的小说作家",
},
{
"role": "user",
"content": "你是谁",
},
},
}
jsonBody, err := json.Marshal(body)
if err != nil {
fmt.Println("Error marshalling JSON:", err)
return
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody))
if err != nil {
fmt.Println("Error creating request:", err)
return
}
req.Header.Set("Authorization", "Bearer
8e67b3159ae070a71d71fe97fceb733b.Bf7fRh7Yy7uWxdDp")
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
fmt.Println("Error sending request:", err)
return
}
defer resp.Body.Close()
var result map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&result)
if err != nil {
fmt.Println("Error decoding response:", err)
return
}
fmt.Println(result)
}
Python Http调用示例
import requests
import json
# 定义请求的URL
url = "https://open.bigmodel.cn/api/paas/v4/chat/completions"
# 定义请求的body
body = {
"model": "glm-4",
"temperature": 0.9,
"top-k": 4,
"max_tokens": 100,
"messages": [
{
"role": "system",
"content": "你是一个聪明且富有创造力的小说作家"
},
{
"role": "user",
"content": "你是谁"
}
],
}
# 定义请求的headers
headers = {
"Authorization": "Bearer
8e67b3159ae070a71d71fe97fceb733b.Bf7fRh7Yy7uWxdDp",
'Content-Type': 'application/json'
}
# 发送POST请求
response = requests.post(url, headers=headers, data=json.dumps(body))
# 打印响应结果
print(response.json())
OpenAi调用示例
https://github.com/openai/openai-node
https://github.com/openai/openai-python
https://github.com/sashabaranov/go-openai
官方文档 调用示例:
https://www.xfyun.cn/doc/spark/HTTP%E8%B0%83%E7%94%A8%E6%96%87%E6%A1%A3.html#_7
-%E4%BD%BF%E7%94%A8openai-sdk%E8%AF%B7%E6%B1%82%E7%A4%BA%E4%BE%8B
Python openAi
https://github.com/openai/openai-python
1、安装依赖
pip install openai
2、请求
# https://github.com/openai/openai-python
# pip install openai
from openai import OpenAI
client = OpenAI(
api_key="8e67b3159ae070a71d71fe97fceb733b.Bf7fRh7Yy7uWxdDp",
base_url="https://open.bigmodel.cn/api/paas/v4/"
)
completion = client.chat.completions.create(
model="glm-4",
messages=[
{"role": "system", "content": "你是一个聪明且富有创造力的小说作家"},
{"role": "user", "content": "请你作为童话故事大王,写一篇短篇童话故事,故事的主
题是要永远保持一颗善良的心,要能够激发儿童的学习兴趣和想象力,同时也能够帮助儿童更好地理解和接
受故事中所蕴含的道理和价值观。"}
],
top_p=0.7,
temperature=0.9
)
print(completion.choices[0].message)
Nodejs openAi
https://github.com/openai/openai-node
安装依赖
npm install openai --save
cnpm install openai --save
使用import 需要在package.json里面需要配置"type":"module",
{
"type": "module",
"dependencies": {
}
}
配置代码
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: "8e67b3159ae070a71d71fe97fceb733b.Bf7fRh7Yy7uWxdDp",
baseURL: "https://open.bigmodel.cn/api/paas/v4/"
});
async function main() {
const chatCompletion = await client.chat.completions.create({
messages: [{ role: 'user', content: '你好' }],
model: 'GLM-4-Flash',
});
console.log(chatCompletion.choices[0].message.content);
}
main();
智谱开放平台API调用视频详解:
【应用开发】分别用nodejs python golang Opne Ai调用 ChatGLM 智谱AI大模型的Api