Your Hugging Face Username
commited on
Commit
·
ed1f8da
1
Parent(s):
03a68eb
First Application Add
Browse files- Dockerfile +20 -0
- README.md +1 -10
- app.py +27 -0
- chat.py +18 -0
- requirements.txt +9 -0
Dockerfile
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10
|
2 |
+
|
3 |
+
WORKDIR /code
|
4 |
+
|
5 |
+
COPY ./requirements.txt /code/requirements.txt
|
6 |
+
|
7 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
8 |
+
RUN apt update
|
9 |
+
RUN apt install -y protobuf-compiler libprotobuf-dev
|
10 |
+
RUN useradd -m -u 1000 user
|
11 |
+
USER user
|
12 |
+
ENV HOME=/home/user \
|
13 |
+
PATH=/home/user/.local/bin:$PATH
|
14 |
+
|
15 |
+
WORKDIR $HOME/app
|
16 |
+
|
17 |
+
COPY --chown=user . $HOME/app
|
18 |
+
|
19 |
+
|
20 |
+
CMD ["flask", "run", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
@@ -1,10 +1 @@
|
|
1 |
-
|
2 |
-
title: Langchain Simple Server
|
3 |
-
emoji: 👀
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: blue
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
# MyAI_backend
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import mdtex2html
|
2 |
+
from flask import Flask, request
|
3 |
+
from chat import converse
|
4 |
+
import json
|
5 |
+
|
6 |
+
|
7 |
+
app = Flask(__name__)
|
8 |
+
|
9 |
+
@app.route("/",methods=['GET','POST'])
|
10 |
+
def home():
|
11 |
+
par = request.json
|
12 |
+
try:
|
13 |
+
conversation = par.get('conversation')
|
14 |
+
provider = par.get('provider')
|
15 |
+
model = par.get('model')
|
16 |
+
api = par.get('api')
|
17 |
+
load = json.loads(converse(conversation,provider,model,api))
|
18 |
+
try:
|
19 |
+
load['content'] = mdtex2html.convert(load['content'])
|
20 |
+
toreturn = json.dumps(load,indent=4)
|
21 |
+
except:
|
22 |
+
toreturn = json.dumps(load,indent=4)
|
23 |
+
return toreturn
|
24 |
+
except Exception as e:
|
25 |
+
return str(e)
|
26 |
+
if __name__ == "__main__":
|
27 |
+
app.run(host='0.0.0.0',debug=True,port=1777)
|
chat.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from langchain_core.prompts import ChatPromptTemplate
|
3 |
+
from langchain_groq import ChatGroq
|
4 |
+
from langchain.chains import ConversationChain
|
5 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
6 |
+
from langchain_core.output_parsers import JsonOutputParser
|
7 |
+
def langchainConversation(conversation):
|
8 |
+
prompts = []
|
9 |
+
for message in conversation:
|
10 |
+
prompts.append((message['role'],message['context']))
|
11 |
+
chat_template = ChatPromptTemplate.from_messages(prompts)
|
12 |
+
return chat_template.format_messages()
|
13 |
+
def converse(conversation,provider,model,key):
|
14 |
+
if(provider=='groq'):
|
15 |
+
chat = ChatGroq(temperature=0, groq_api_key=key, model_name=model)
|
16 |
+
elif(provider=='google'):
|
17 |
+
chat = ChatGoogleGenerativeAI(model=model,google_api_key=key)
|
18 |
+
return json.dumps(json.loads(chat.invoke(langchainConversation(conversation)).json()),indent=4)
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
langchain
|
2 |
+
langchain_groq
|
3 |
+
langchain-google-genai
|
4 |
+
pillow
|
5 |
+
langchain-community
|
6 |
+
jq
|
7 |
+
flask
|
8 |
+
mdtex2html
|
9 |
+
protobuf
|