Spaces:
Running
Running
File size: 2,632 Bytes
3afb4b6 113071f 3afb4b6 951b105 113071f 3afb4b6 35e46b0 3afb4b6 113071f 3afb4b6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
import base64
import requests
import time
import pdb
import dashscope
from dashscope import MultiModalConversation
from PIL import Image
import io
from openai import OpenAI
import json
def resize_encode_image(image_path, screen_scale_ratio=1):
with Image.open(image_path) as img:
new_width = int(img.width * screen_scale_ratio)
new_height = int(img.height * screen_scale_ratio)
resized_img = img.resize((new_width, new_height), Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format="PNG")
img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
return img_base64
# with open(image_path, "rb") as image_file:
# return base64.b64encode(image_file.read()).decode('utf-8')
def inference_chat(chat, model, api_url, token):
messages = []
for role, content in chat:
messages.append({"role": role, "content": content})
# client = OpenAI(
# # 若没有配置环境变量,请用百炼API Key将下行替换为:api_key="sk-xxx",
# api_key=token,
# base_url=api_url,
# )
# num_try = 5
# for _ in range(num_try):
# try:
# completion = client.chat.completions.create(
# model=model, # 此处以qwen-plus为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
# messages=messages
# )
# except:
# print("Network Error:")
# try:
# print(completion.model_dump_json())
# except:
# print("Request Failed")
# time.sleep(2)
# else:
# break
dashscope.base_http_api_url = api_url
num_try = 5
for _ in range(num_try):
try:
response = MultiModalConversation.call(api_key=token, model=model, messages=messages)
response = response['output']['choices'][0]['message']['content'][0]["text"]
except:
print("Network Error:")
try:
print(response)
except:
print("Request Failed")
time.sleep(2)
else:
break
return response
# return json.loads(completion.model_dump_json())['choices'][0]['message']['content']
# headers = {
# "Content-Type": "application/json",
# "Authorization": f"Bearer {token}"
# }
# data = {
# "model": model,
# "messages": [],
# "max_tokens": 2048,
# 'temperature': 0.0,
# "seed": 1234
# }
|