Python示例
import openai
# 配置客户端
client = openai.OpenAI(
api_key="your-api-key", # 对应 DEFAULT_KEY
base_url="http://localhost:9090/v1"
)
# 示例 1: 使用旗舰模型 GLM-5 进行复杂推理
response = client.chat.completions.create(
model="GLM-5",
messages=[{"role": "user", "content": "分析并优化这段代码的时间复杂度"}]
)
print(response.choices[0].message.content)
# 示例 2: 使用 GLM-4.5-Air 快速响应(适合简单对话)
response = client.chat.completions.create(
model="GLM-4.5-Air",
messages=[{"role": "user", "content": "今天天气怎么样?"}]
)
print(response.choices[0].message.content)
# 示例 3: 流式请求 - 使用 GLM-4.7
response = client.chat.completions.create(
model="GLM-4.7",
messages=[{"role": "user", "content": "请写一首关于春天的诗"}],
stream=True
)
for chunk in response:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
# 示例 4: 启用思考模式(GLM-4.5/4.6/4.7/5 支持)
response = client.chat.completions.create(
model="GLM-5",
messages=[{"role": "user", "content": "分析这段算法的时间复杂度并给出优化建议"}],
reasoning=True # 启用思考模式,展示详细推理过程
)
print(response.choices[0].message.content)
# 示例 5: 使用多模态模型 GLM-4.6V(支持图像)
# response = client.chat.completions.create(
# model="GLM-4.6V",
# messages=[{
# "role": "user",
# "content": [
# {"type": "text", "text": "描述这张图片"},
# {"type": "image_url", "image_url": {"url": "https://example.com/image.jpg"}}
# ]
# }]
# )
cURL示例
# 示例 1: 使用旗舰模型 GLM-5(复杂任务)
curl -X POST http://localhost:9090/v1/chat/completions -H "Content-Type: application/json" -H "Authorization: Bearer your-api-key" -d '{
"model": "GLM-5",
"messages": [{"role": "user", "content": "请分析这段代码的性能瓶颈"}],
"stream": false
}'
# 示例 2: 使用 GLM-4.5-Air 快速响应(简单对话)
curl -X POST http://localhost:9090/v1/chat/completions -H "Content-Type: application/json" -H "Authorization: Bearer your-api-key" -d '{
"model": "GLM-4.5-Air",
"messages": [{"role": "user", "content": "你好"}],
"stream": false
}'
# 示例 3: 流式请求 - 使用 GLM-4.7
curl -X POST http://localhost:9090/v1/chat/completions -H "Content-Type: application/json" -H "Authorization: Bearer your-api-key" -d '{
"model": "GLM-4.7",
"messages": [{"role": "user", "content": "讲一个有趣的故事"}],
"stream": true
}'
# 示例 4: 启用思考模式 - 使用 GLM-5(展示详细推理过程)
curl -X POST http://localhost:9090/v1/chat/completions -H "Content-Type: application/json" -H "Authorization: Bearer your-api-key" -d '{
"model": "GLM-5",
"messages": [{"role": "user", "content": "分析这段算法的时间复杂度并给出优化建议"}],
"reasoning": true,
"stream": false
}'
# 示例 5: 多模态请求 - 使用 GLM-4.6V(支持图像)
# curl -X POST http://localhost:9090/v1/chat/completions # -H "Content-Type: application/json" # -H "Authorization: Bearer your-api-key" # -d '{
# "model": "GLM-4.6V",
# "messages": [{
# "role": "user",
# "content": [
# {"type": "text", "text": "这张图片里有什么?"},
# {"type": "image_url", "image_url": {"url": "https://example.com/image.jpg"}}
# ]
# }],
# "stream": false
# }'
JavaScript示例
const fetch = require('node-fetch');
async function chatWithGLM(model, message, stream = false) {
const response = await fetch('http://localhost:9090/v1/chat/completions', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer your-api-key'
},
body: JSON.stringify({
model: model,
messages: [{ role: 'user', content: message }],
stream: stream
})
});
if (stream) {
// 处理流式响应
const reader = response.body.getReader();
const decoder = new TextDecoder();
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split('
');
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') {
console.log('
流式响应完成');
return;
}
try {
const parsed = JSON.parse(data);
const content = parsed.choices[0]?.delta?.content;
if (content) {
process.stdout.write(content);
}
} catch (e) {
// 忽略解析错误
}
}
}
}
} else {
// 处理非流式响应
const data = await response.json();
console.log(data.choices[0].message.content);
}
}
// 使用示例 1: 旗舰模型 GLM-5(复杂任务)
chatWithGLM('GLM-5', '请分析并优化这段算法的时间复杂度', false);
// 使用示例 2: GLM-4.5-Air(快速响应)
chatWithGLM('GLM-4.5-Air', '你好', false);
// 使用示例 3: GLM-4.7 流式响应
chatWithGLM('GLM-4.7', '写一个关于未来的短篇故事', true);
// 使用示例 4: 启用思考模式(GLM-4.5/4.6/4.7/5 支持)
async function chatWithReasoning(model, message) {
const response = await fetch('http://localhost:9090/v1/chat/completions', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer your-api-key'
},
body: JSON.stringify({
model: model,
messages: [{ role: 'user', content: message }],
reasoning: true, // 启用思考模式
stream: false
})
});
const data = await response.json();
console.log(data.choices[0].message.content);
}
chatWithReasoning('GLM-5', '分析这段算法的时间复杂度并给出优化建议');
// 使用示例 5: 多模态模型 GLM-4.6V(支持图像)
// chatWithGLM('GLM-4.6V', [
// { type: 'text', text: '描述这张图片' },
// { type: 'image_url', image_url: { url: 'https://example.com/image.jpg' } }
// ], false);