File size: 4,706 Bytes
55d75c1 0f14421 55d75c1 44c37a8 6cf06ca 55d75c1 44c37a8 0f14421 e14631d 439110e e51e935 439110e a6201ac 8031095 3ae785a 55d75c1 3ae785a 8031095 3ae785a 8031095 a6201ac 439110e 8031095 439110e e14631d 8031095 0f14421 a6201ac 0f14421 e51e935 8b0addf 8031095 195a960 425a4a3 8031095 55d75c1 8031095 8b0addf 47736c9 0db0762 8031095 0f0577d 0db0762 0f0577d 8031095 0f0577d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
###
# Elo based comparison of models
# https://chat.lmsys.org/?leaderboard
###
##
# Libraries
# Langchain - https://python.langchain.com/docs/get_started/introduction.html
# Used for simplifiing calls, task
##
import langchain
import transformers
# https://huggingface.co/spaces/joyson072/LLm-Langchain/blob/main/app.py
from langchain.llms import HuggingFaceHub
# for the chain and prompt
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
#import model class and tokenizer
from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
###
# Definition of different purspose prompts
# https://huggingface.co/spaces/Chris4K/rlhf-arena/edit/main/app.py
####
def prompt_human_instruct(system_msg, history):
return system_msg.strip() + "\n" + \
"\n".join(["\n".join(["###Human: "+item[0], "###Assistant: "+item[1]])
for item in history])
def prompt_instruct(system_msg, history):
return system_msg.strip() + "\n" + \
"\n".join(["\n".join(["### Instruction: "+item[0], "### Response: "+item[1]])
for item in history])
def prompt_chat(system_msg, history):
return system_msg.strip() + "\n" + \
"\n".join(["\n".join(["USER: "+item[0], "ASSISTANT: "+item[1]])
for item in history])
def prompt_roleplay(system_msg, history):
return "<|system|>" + system_msg.strip() + "\n" + \
"\n".join(["\n".join(["<|user|>"+item[0], "<|model|>"+item[1]])
for item in history])
####
## Sentinent models
# https://huggingface.co/spaces/CK42/sentiment-model-comparison
# 1, 4 seem best for german
####
model_id_1 = "nlptown/bert-base-multilingual-uncased-sentiment"
model_id_2 = "microsoft/deberta-xlarge-mnli"
model_id_3 = "distilbert-base-uncased-finetuned-sst-2-english"
model_id_4 = "lordtt13/emo-mobilebert"
model_id_5 = "juliensimon/reviews-sentiment-analysis"
model_id_6 = "sbcBI/sentiment_analysis_model"
model_id_7 = "oliverguhr/german-sentiment-bert"
# https://colab.research.google.com/drive/1hrS6_g14EcOD4ezwSGlGX2zxJegX5uNX#scrollTo=NUwUR9U7qkld
#llm_hf_sentiment = HuggingFaceHub(
# repo_id= model_id_7,
# model_kwargs={"temperature":0.9 }
#)
from transformers import pipeline
pipe = pipeline("sentiment", model=model_id_7)
#pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-en-es")
def predict(text):
return pipe(text)[0]["translation_text"]
demo = gr.Interface(
fn=predict,
inputs='text',
outputs='text',
).launch
sentiment = gr.load(model_id_7)
def sentiment (message):
sentiment_label = sentiment.predict(message)
print ( sentiment_label)
return sentiment_label
#sentiment_prompt = PromptTemplate(
# input_variables=["text_input"],
# template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}"
#)
#def sentiment ( message):
# sentiment_chain = LLMChain(llm=llm_hf_sentiment, prompt=sentiment_prompt)
# facts = sentiment_chain.run(message)
# print(facts)
# return facts
####
## Chat models
# https://huggingface.co/spaces/CK42/sentiment-model-comparison
# 1 seem best for testing
####
chat_model_facebook_blenderbot_400M_distill = "facebook/blenderbot-400M-distill"
chat_model_HenryJJ_vincua_13b = "HenryJJ/vincua-13b"
text = "Why did the chicken cross the road?"
#output_question_1 = llm_hf(text)
#print(output_question_1)
###
## FACT EXTRACTION
###
# https://colab.research.google.com/drive/1hrS6_g14EcOD4ezwSGlGX2zxJegX5uNX#scrollTo=NUwUR9U7qkld
llm_factextract = HuggingFaceHub(
# repo_id="google/flan-ul2",
repo_id="google/flan-t5-small",
model_kwargs={"temperature":0.1,
"max_new_tokens":250})
fact_extraction_prompt = PromptTemplate(
input_variables=["text_input"],
template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}"
)
def factextraction (message):
fact_extraction_chain = LLMChain(llm=llm_factextract, prompt=fact_extraction_prompt)
facts = fact_extraction_chain.run(message)
print(facts)
return facts
####
## models
# 1 seem best for testing
####
#download and setup the model and tokenizer
model_name = 'facebook/blenderbot-400M-distill'
tokenizer = BlenderbotTokenizer.from_pretrained(model_name)
model = BlenderbotForConditionalGeneration.from_pretrained(model_name)
def func (message):
inputs = tokenizer(message, return_tensors="pt")
result = model.generate(**inputs)
return tokenizer.decode(result[0])
import gradio as gr
app = gr.Interface(fn=factextraction, inputs="textbox", outputs="textbox", title="Conversation Bot")
app.launch() |