# https://chat.lmsys.org/?leaderboard import langchain # https://huggingface.co/spaces/joyson072/LLm-Langchain/blob/main/app.py from langchain.llms import HuggingFaceHub # for the chain and prompt from langchain.prompts import PromptTemplate from langchain.llms import HuggingFaceHub from langchain.chains import LLMChain # https://colab.research.google.com/drive/1hrS6_g14EcOD4ezwSGlGX2zxJegX5uNX#scrollTo=NUwUR9U7qkld llm_hf = HuggingFaceHub( repo_id="HenryJJ/vincua-13b", model_kwargs={"temperature":0.9 } ) text = "Why did the chicken cross the road?" output_question_1 = llm_hf(text) print(output_question_1) ### ## FACT EXTRACTION ### fact_extraction_prompt = PromptTemplate( input_variables=["text_input"], template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}" ) fact_extraction_chain = LLMChain(llm=llm_hf, prompt=fact_extraction_prompt) facts = fact_extraction_chain.run(text + " " +output_question_1) print(facts)