38 lines
1.6 KiB
Python
38 lines
1.6 KiB
Python
from openai import OpenAI
|
||
|
||
client = OpenAI(
|
||
api_key='sk-f6da0c787eff4b0389e4ad03a35a911f',
|
||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||
)
|
||
|
||
prompt = "请提取图片中的试题"
|
||
completion = client.chat.completions.create(
|
||
model="qwen-vl-ocr-latest",
|
||
messages=[
|
||
{
|
||
"role": "user",
|
||
"content": [
|
||
{
|
||
"type": "image_url",
|
||
"image_url": "https://pic1.zhimg.com/v2-c3d7f060bbf3f7c122319350044d8888_1440w.jpg",
|
||
# 输入图像的最小像素阈值,小于该值图像会按原比例放大,直到总像素大于min_pixels
|
||
"min_pixels": 28 * 28 * 4,
|
||
# 输入图像的最大像素阈值,超过该值图像会按原比例缩小,直到总像素低于max_pixels
|
||
"max_pixels": 28 * 28 * 8192
|
||
},
|
||
# qwen-vl-ocr-latest支持在以下text字段中传入Prompt,若未传入,则会使用默认的Prompt:Please output only the text content from the image without any additional descriptions or formatting.
|
||
# 如调用qwen-vl-ocr-1028,模型会使用固定Prompt:Read all the text in the image.不支持用户在text中传入自定义Prompt
|
||
{"type": "text",
|
||
"text": prompt},
|
||
]
|
||
}
|
||
])
|
||
|
||
print(completion.choices[0].message.content)
|
||
|
||
# 将返回的内容保存到 2、识别出结果.md 中
|
||
with open('Res/2、识别出结果.md', 'w', encoding='utf-8') as f:
|
||
f.write(completion.choices[0].message.content)
|
||
|
||
print("保存成功!")
|