Spaces:
Running
Running
File size: 3,162 Bytes
3afb4b6 01660d8 402ae99 01660d8 113071f 01660d8 3afb4b6 01660d8 113071f 3afb4b6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import base64
import requests
import time
import pdb
import dashscope
from dashscope import MultiModalConversation
from PIL import Image
import io
from openai import OpenAI
import json
def resize_encode_image(image_path, screen_scale_ratio=1):
with Image.open(image_path) as img:
new_width = int(img.width * screen_scale_ratio)
new_height = int(img.height * screen_scale_ratio)
resized_img = img.resize((new_width, new_height), Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format="PNG")
img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
return img_base64
# with open(image_path, "rb") as image_file:
# return base64.b64encode(image_file.read()).decode('utf-8')
def inference_chat(chat, model, api_url, token):
messages = []
for role, content in chat:
messages.append({"role": role, "content": content})
payload = json.dumps({
"model": "gpt-4o",
"messages": messages
})
headers = {
'Accept': 'application/json',
'Authorization': f'Bearer {token}',
'User-Agent': 'Apifox/1.0.0 (https://apifox.com)',
'Content-Type': 'application/json'
}
response = requests.request("POST", api_url, headers=headers, data=payload)
data = response.json()
print(data['choices'][0]['message']['content'])
return data
# client = OpenAI(
# # 若没有配置环境变量,请用百炼API Key将下行替换为:api_key="sk-xxx",
# api_key=token,
# base_url=api_url,
# )
# num_try = 5
# for _ in range(num_try):
# try:
# completion = client.chat.completions.create(
# model=model, # 此处以qwen-plus为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
# messages=messages
# )
# except:
# print("Network Error:")
# try:
# print(completion.model_dump_json())
# except:
# print("Request Failed")
# time.sleep(2)
# else:
# break
# dashscope.base_http_api_url = api_url
# num_try = 5
# for _ in range(num_try):
# try:
# response = MultiModalConversation.call(api_key=token, model=model, messages=messages)
# response = response['output']['choices'][0]['message']['content'][0]["text"]
# except Exception as e:
# print(f"Network Error: {e}")
# try:
# print(response)
# except:
# print("Request Failed")
# time.sleep(2)
# else:
# break
# return response
# return json.loads(completion.model_dump_json())['choices'][0]['message']['content']
# headers = {
# "Content-Type": "application/json",
# "Authorization": f"Bearer {token}"
# }
# data = {
# "model": model,
# "messages": [],
# "max_tokens": 2048,
# 'temperature': 0.0,
# "seed": 1234
# }
|