Spaces:
Runtime error
Runtime error
import flask | |
from flask import Flask, request, jsonify | |
#import streamlit as st | |
#from langchain.chat_models import ChatOpenAI | |
from langchain.document_loaders import WebBaseLoader | |
from langchain.chains.summarize import load_summarize_chain | |
from bs4 import BeautifulSoup | |
import os | |
from dotenv import load_dotenv | |
load_dotenv() | |
from langchain import HuggingFaceHub | |
from huggingface_hub import InferenceClient | |
import requests | |
import sys | |
#OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') | |
hf_token = os.environ.get('HUGGINGFACEHUB_API_TOKEN') | |
#starchat_repo_id = os.environ.get('starchat_repo_id') | |
repo_id=os.environ.get('repo_id') | |
#port = os.getenv('port') | |
llm = HuggingFaceHub(repo_id=repo_id, #for Llama2 | |
#repo_id=starchat_repo_id, #for StarChat | |
huggingfacehub_api_token=hf_token, | |
model_kwargs={#"min_length":512, #for StarChat | |
"min_length":1024, #for Llama2 | |
"max_new_tokens":3072, "do_sample":True, #for StarChat | |
#"max_new_tokens":5632, "do_sample":True, #for Llama2 | |
"temperature":0.1, | |
"top_k":50, | |
"top_p":0.95, "eos_token_id":49155}) | |
#llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo-16k") | |
chain = load_summarize_chain(llm, chain_type="stuff") | |
app = Flask(__name__) | |
def home_api(): | |
data = request.get_json() | |
user_query = data['user_question'] | |
print(user_query) | |
return {"Message":"Flask Home API Deploy Success on HF"} | |
def chat(): | |
#async def chat(): #Failed - Flask API 不支持async??? | |
data = request.get_json() | |
user_query = data['user_question'] | |
print(user_query) | |
if user_query !="" and not user_query.strip().isspace() and not user_query == "" and not user_query.strip() == "" and not user_query.isspace(): | |
try: | |
##loader = WebBaseLoader(user_query) | |
#loader = WebBaseLoader("https://zhuanlan.zhihu.com/p/627439522") | |
#print(loader) | |
print(user_query) | |
#with st.spinner("AI Thinking...Please wait a while to Cheers!"): | |
docs = loader.load() | |
result=chain.run(docs) | |
print(user_query) | |
print("AI Summarization: "+result) | |
#st.write("AI Summarization:")、 | |
#st.write(result) | |
return jsonify({'response': result}) | |
except Exception as e: | |
#st.write("Wrong URL or URL not parsable.") | |
err_msg="Wrong URL or URL not parsable." | |
print(err_msg) | |
return jsonify({'response': err_msg}) | |
#initial_response = llm_chain.run(user_query) | |
#return jsonify({'response': initial_response}) | |
#找到问题了:jsonify在Huggingface不支持;在Github然后部署到Render是可以的!---NO No No, it's supported | |
#return {'response': initial_response} | |
#return jsonify({'response': initial_response}) #tried and OKed! | |
#url=st.text_input("Enter webiste URL to summarize (format: https://www.usinoip.com):") | |
#loader = WebBaseLoader("https://www.usinoip.com/") | |
#if url !="" and not url.strip().isspace() and not url == "" and not url.strip() == "" and not url.isspace(): | |
# try: | |
# loader = WebBaseLoader(url) | |
# with st.spinner("AI Thinking...Please wait a while to Cheers!"): | |
# docs = loader.load() | |
# result=chain.run(docs) | |
# print(url) | |
# print("AI Summarization: "+result) | |
# st.write("AI Summarization:") | |
# st.write(result) | |
# except Exception as e: | |
# st.write("Wrong URL or URL not parsable.") | |
#**************************************************************# | |
#try: | |
# loader = WebBaseLoader(url) | |
# with st.spinner("AI Thinking...Please wait a while to Cheers!"): | |
# docs = loader.load() | |
# result=chain.run(docs) | |
# print(result) | |
# st.write("AI Summarization:") | |
# st.write(result) | |
#except Exception as e: | |
# st.write("Wrong URL") |