import os import sys import subprocess if not os.path.exists("ChatGLM-6b-onnx-u8s8"): subprocess.run(["git", "lfs", "install"]) subprocess.run(["git", "clone", "https://huggingface.co/K024/ChatGLM-6b-onnx-u8s8"]) os.chdir("ChatGLM-6b-onnx-u8s8") subprocess.run(["pip", "install", "-r", "requirements.txt"]) sys.path.append(os.getcwd()) else: sys.path.append(os.path.join(os.getcwd(), "ChatGLM-6b-onnx-u8s8")) from model import ChatGLMModel#, chat_template model = ChatGLMModel() # history = [] max_tokens = 2048 temperature = 1.0 top_p = 0.7 top_k = 50 from typing import Any, List, Mapping, Optional from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM class CustomLLM(LLM): model: ChatGLMModel # history: List @property def _llm_type(self) -> str: return "custom" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: if stop is not None: raise ValueError("stop kwargs are not permitted.") # prompt = chat_template(self.history, prompt) for answer in self.model.generate_iterate(prompt, max_generated_tokens=max_tokens, top_k=top_k, top_p=top_p, temperature=temperature): pass # self.history = self.history + [(question, answer)] return answer @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {"model": "ChatGLMModel"} llm = CustomLLM(model=model) import gradio as gr from langchain.prompts import PromptTemplate from langchain.docstore.document import Document from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chains.summarize import load_summarize_chain # from langchain.chains.question_answering import load_qa_chain # from langchain.embeddings import HuggingFaceEmbeddings # from langchain.vectorstores import Chroma # embeddings = HuggingFaceEmbeddings() query = "總結並以點列形式舉出重點" prompt_template = """總結下文並列舉出重點: {text} 摘要及各項重點:""" PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"]) # chain = load_summarize_chain(llm, chain_type="stuff", prompt=PROMPT) chain = load_summarize_chain(llm, chain_type="map_reduce", map_prompt=PROMPT, combine_prompt=PROMPT) # refine_template = ( # "你的任務是整理出一段摘要以及例舉所有重點\n" # "我們之前已經整理出這些內容: {existing_answer}\n" # "請再整合這些摘要並將重點整理到一個列表" # "(如果需要) 下文這裡有更多的參考資料:\n" # "------------\n" # "{text}\n" # "------------\n" # "根據新的資料,完善原有的摘要和重點列表" # "如果新資料對已經整理出的文字沒有補充,請重複原來的重點文字。" # ) # refine_prompt = PromptTemplate( # input_variables=["existing_answer", "text"], # template=refine_template, # ) # chain = load_summarize_chain(llm, chain_type="refine", question_prompt=PROMPT, refine_prompt=refine_prompt) # chain = load_qa_chain(llm, chain_type="map_reduce", map_prompt=PROMPT, combine_prompt=PROMPT) # chain = load_qa_chain(llm, chain_type="refine", question_prompt=PROMPT, refine_prompt=refine_prompt) def greet(text): docs = [Document(page_content=text)] text_splitter = RecursiveCharacterTextSplitter( chunk_size=256, # 分割最大尺寸 chunk_overlap=32, # 重复字数 length_function=len ) texts = text_splitter.split_documents(docs) # docsearch = Chroma.from_texts(texts, embeddings).as_retriever() # docs = docsearch.get_relevant_documents(query) return chain.run(texts) # return chain.run(input_documents=texts, question=query) iface = gr.Interface(fn=greet, inputs=gr.Textbox(lines=20, placeholder="Text Here..."), outputs="text") iface.launch()