Flash-Tool / app.py
ha1772007's picture
Update app.py
f10f8a2 verified
from flask import Flask, request, jsonify
import json
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
import urllib.parse
from llama_cpp import Llama
llm = Llama(model_path="qwen2.5-0.5b-instruct-q4_k_m.gguf", chat_format="chatml-function-calling")
class cget():
def __init__(self, object):
self.object = object
def get(self, subscript, exceptr):
try:
return self.object[subscript]
except:
return exceptr
def convert_openai_to_langchain(input):
additional_kwargs = None
try:
additional_kwargs = {
"tool_calls": [
{
"id": "No ID",
"function": {
"arguments": cget(z['function']).get('arguments', {}),
"name": cget(z['function']).get('name', '')
},
"type": "function"
}
for z in cget(input['choices'][0]['message']).get('tool_calls', [])
]
}
except:
additional_kwargs = None
try:
if len(cget(additional_kwargs).get('tool_calls', [])) < 1:
additional_kwargs = None
else:
pass
except:
additional_kwargs = None
json_data = {
"content": cget(cget(cget(input).get('choices', [])).get(0, {})).get('messages', {}).get('content', None),
"additional_kwargs": additional_kwargs,
"response_metadata": {
"token_usage": {
"completion_tokens": cget(cget(input).get('usage', None)).get('completion_tokens', None),
"prompt_tokens": cget(cget(input).get('usage', None)).get('prompt_tokens', None),
"total_tokens": cget(cget(input).get('usage', None)).get('total_tokens', None),
},
"model_name": cget(input).get('model', None),
},
"type": "ai",
"name": None,
"id": cget(input).get('id', None),
"example": False,
"usage_metadata": {
"input_tokens": cget(cget(input).get('usage', None)).get('prompt_tokens', None),
"output_tokens": cget(cget(input).get('usage', None)).get('completion_tokens', None),
"total_tokens": cget(cget(input).get('usage', None)).get('total_tokens', None)
},
"DirectResult": cget(cget(cget(input).get('choices', [])).get(0, {})).get('messages', {}).get('content', None),
"original_response":input,
}
return json_data
def ToolSelector(other):
AVAIABLETOOLS = [
{
"name": "search",
"description": "Search Internet For Related Query and Provide Uptodate query",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search Query Follow the General Search Methods to get better result"
}
},
"required": ["query"]
}
}
]
toreturn = []
if(type(other['tools']) == list):
try:
tools = other['tools']
for tool in tools:
for thistool in AVAIABLETOOLS:
if(thistool['name'] == tool):
thistool['type'] = 'function'
thistool['function'] = {
"name":thistool['name'],
"parameters":thistool['parameters']
}
thistool.pop('parameters',None)
toreturn.append(thistool)
else:
pass
except:
pass
else:
raise Exception('tools is not provided in list formate')
print(toreturn)
return toreturn
def checknu(m):
return(m != None and m != '')
@app.route('/', methods=['POST','OPTIONS'])
def process_data():
if request.method == 'POST':
data = request.get_json()
if data:
if (not False in [checknu(data.get('conversation',None)),checknu(data.get('provider',None)),checknu(data.get('model',None)),checknu(data.get('api')),checknu(data.get('other',None))]):
conversation = data.get('conversation')
conversation = [{'role':z['role'],'content':z['context']}for z in conversation]
provider = data.get('provider')
model = data.get('model')
api = data.get('api')
other = data.get('other')
print(other)
tools=ToolSelector(other)
if(provider=="Flash-Tool"):
print(json.dumps({"tools":tools},indent=4))
toreturn = llm.create_chat_completion(
messages = conversation,
tools=tools,
tool_choice={
"type": "function",
"function": {
"name": "search"
}
},
max_tokens=100
)
return json.dumps(convert_openai_to_langchain(toreturn),indent=4)
else:
return json.dumps({'type':'error','message':'Flash-Tool is expected Provider'})
else:
return json.dumps({'type':'error','message':'missing parameter'},indent=4)
else:
return jsonify({"message": "No data received"}), 400
else:
return jsonify({"message": "Invalid request method"})
if __name__ == '__main__':
app.run(debug=True)