Spaces:
Runtime error
Runtime error
File size: 11,248 Bytes
a0d3657 f6f725c 04e983e a0d3657 04e983e e714e4a 04e983e e714e4a 04e983e e714e4a 02a75ae 04e983e 02a75ae 04e983e 7a2caab ab9d3ba 078ace6 a533979 04e983e a533979 04e983e 078ace6 a533979 04e983e 078ace6 a533979 04e983e a533979 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 |
# 1. The RoBERTa base model is used, fine-tuned using the SQuAD 2.0 dataset.
# It’s been trained on question-answer pairs, including unanswerable questions, for the task of question and answering.
# from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
# import gradio as grad
# import ast
# mdl_name = "deepset/roberta-base-squad2"
# my_pipeline = pipeline('question-answering', model=mdl_name, tokenizer=mdl_name)
# def answer_question(question,context):
# text= "{"+"'question': '"+question+"','context': '"+context+"'}"
# di=ast.literal_eval(text)
# response = my_pipeline(di)
# return response
# grad.Interface(answer_question, inputs=["text","text"], outputs="text").launch()
#---------------------------------------------------------------------------------
# 2. Same task, different model.
# from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
# import gradio as grad
# import ast
# mdl_name = "distilbert-base-cased-distilled-squad"
# my_pipeline = pipeline('question-answering', model=mdl_name, tokenizer=mdl_name)
# def answer_question(question,context):
# text= "{"+"'question': '"+question+"','context': '"+context+"'}"
# di=ast.literal_eval(text)
# response = my_pipeline(di)
# return response
# grad.Interface(answer_question, inputs=["text","text"], outputs="text").launch()
#---------------------------------------------------------------------------------
# 3. Different task: language translation.
# from transformers import pipeline
# import gradio as grad
# First model translates English to German.
# mdl_name = "Helsinki-NLP/opus-mt-en-de"
# opus_translator = pipeline("translation", model=mdl_name)
# def translate(text):
# response = opus_translator(text)
# return response
# grad.Interface(translate, inputs=["text",], outputs="text").launch()
#----------------------------------------------------------------------------------
# 4. Language translation without pipeline API.
# Second model translates English to French.
# from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
# import gradio as grad
# mdl_name = "Helsinki-NLP/opus-mt-en-fr"
# mdl = AutoModelForSeq2SeqLM.from_pretrained(mdl_name)
# my_tkn = AutoTokenizer.from_pretrained(mdl_name)
# def translate(text):
# inputs = my_tkn(text, return_tensors="pt")
# trans_output = mdl.generate(**inputs)
# response = my_tkn.decode(trans_output[0], skip_special_tokens=True)
# return response
# txt = grad.Textbox(lines=1, label="English", placeholder="English Text here")
# out = grad.Textbox(lines=1, label="French")
# grad.Interface(translate, inputs=txt, outputs=out).launch()
#-----------------------------------------------------------------------------------
# 5. Different task: abstractive summarization
# Abstractive summarization is more difficult than extractive summarization,
# which pulls key sentences from a document and combines them to form a “summary.”
# Because abstractive summarization involves paraphrasing words, it is also more time-consuming;
# however, it has the potential to produce a more polished and coherent summary.
# from transformers import PegasusForConditionalGeneration, PegasusTokenizer
# import gradio as grad
# mdl_name = "google/pegasus-xsum"
# pegasus_tkn = PegasusTokenizer.from_pretrained(mdl_name)
# mdl = PegasusForConditionalGeneration.from_pretrained(mdl_name)
# def summarize(text):
# tokens = pegasus_tkn(text, truncation=True, padding="longest", return_tensors="pt")
# txt_summary = mdl.generate(**tokens)
# response = pegasus_tkn.batch_decode(txt_summary, skip_special_tokens=True)
# return response
# txt = grad.Textbox(lines=10, label="English", placeholder="English Text here")
# out = grad.Textbox(lines=10, label="Summary")
# grad.Interface(summarize, inputs=txt, outputs=out).launch()
#------------------------------------------------------------------------------------------
# 6. Same model with some tuning with some parameters: num_return_sequences=5, max_length=200, temperature=1.5, num_beams=10
# from transformers import PegasusForConditionalGeneration, PegasusTokenizer
# import gradio as grad
# mdl_name = "google/pegasus-xsum"
# pegasus_tkn = PegasusTokenizer.from_pretrained(mdl_name)
# mdl = PegasusForConditionalGeneration.from_pretrained(mdl_name)
# def summarize(text):
# tokens = pegasus_tkn(text, truncation=True, padding="longest", return_tensors="pt")
# translated_txt = mdl.generate(**tokens, num_return_sequences=5, max_length=200, temperature=1.5, num_beams=10)
# response = pegasus_tkn.batch_decode(translated_txt, skip_special_tokens=True)
# return response
# txt = grad.Textbox(lines=10, label="English", placeholder="English Text here")
# out = grad.Textbox(lines=10, label="Summary")
# grad.Interface(summarize, inputs=txt, outputs=out).launch()
#-----------------------------------------------------------------------------------
# 7. Zero-Shot Learning:
# Zero-shot learning, as the name implies, is to use a pretrained model , trained on a certain set of data,
# on a different set of data, which it has not seen during training. This would mean, as an example, to take
# some model from huggingface that is trained on a certain dataset and use it for inference on examples it has never seen before.
# The transformers are where the zero-shot classification implementations are most frequently found by us.
# There are more than 60 transformer models that function based on the zero-shot classification that are found in the huggingface library.
# When we discuss zero-shot text classification , there is one additional thing that springs to mind.
# In the same vein as zero-shot classification is few-shot classification, which is very similar to zero-shot classification.
# However, in contrast with zero-shot classification, few-shot classification makes use of very few labeled samples during the training process.
# The implementation of the few-shot classification methods can be found in OpenAI, where the GPT3 classifier is a well-known example of a few-shot classifier.
# Deploying the following code works but comes with a warning: "No model was supplied, defaulted to facebook/bart-large-mnli and revision c626438 (https://huggingface.co/facebook/bart-large-mnli).
# Using a pipeline without specifying a model name and revision in production is not recommended."
# from transformers import pipeline
# import gradio as grad
# zero_shot_classifier = pipeline("zero-shot-classification")
# def classify(text,labels):
# classifer_labels = labels.split(",")
# #["software", "politics", "love", "movies", "emergency", "advertisment","sports"]
# response = zero_shot_classifier(text,classifer_labels)
# return response
# txt=grad.Textbox(lines=1, label="English", placeholder="text to be classified")
# labels=grad.Textbox(lines=1, label="Labels", placeholder="comma separated labels")
# out=grad.Textbox(lines=1, label="Classification")
# grad.Interface(classify, inputs=[txt,labels], outputs=out).launch()
#-----------------------------------------------------------------------------------
# 8. Text Generation Task/Models
# The earliest text generation models were based on Markov chains . Markov chains are like a state machine wherein
# using only the previous state, the next state is predicted. This is similar also to what we studied in bigrams.
# Post the Markov chains, recurrent neural networks (RNNs) , which were capable of retaining a greater context of the text, were introduced.
# They are based on neural network architectures that are recurrent in nature. RNNs are able to retain a greater context of the text that was introduced.
# Nevertheless, the amount of information that these kinds of networks are able to remember is constrained, and it is also difficult to train them,
# which means that they are not effective at generating lengthy texts. To counter this issue with RNNs, LSTM architectures were evolved,
# which could capture long-term dependencies in text. Finally, we came to transformers, whose decoder architecture became popular for generative models
# used for generating text as an example.
# from transformers import GPT2LMHeadModel,GPT2Tokenizer
# import gradio as grad
# mdl = GPT2LMHeadModel.from_pretrained('gpt2')
# gpt2_tkn=GPT2Tokenizer.from_pretrained('gpt2')
# def generate(starting_text):
# tkn_ids = gpt2_tkn.encode(starting_text, return_tensors = 'pt')
# # When no specific parameter is specified, the model performs a greedy search to find the next word, which entails selecting the word from all of the
# # alternatives that has the highest probability of being correct. This process is deterministic in nature, which means that resultant text is the same
# # as before if we use the same parameters.
# # The num_beams parameter does a beam search: it returns the sequences that have the highest probability, and then, when it comes time to
# # choose, it picks the one that has the highest probability.
# # The do_sample parameter select the next word at random from the probability distribution.
# # The temperature parameter controls the level of greed that the generative model exhibits.
# # If the temperature is low, the probabilities of sample classes other than the one with the highest log probability will be low.
# # As a result, the model will probably output the text that is most correct, but it will be rather monotonous and contain only a small amount of variation.
# # If the temperature is high, the model has a greater chance of outputting different words than those with the highest probability.
# # The generated text will feature a greater variety of topics, but there is also an increased likelihood that it will generate nonsense text and
# # contain grammatical errors.
# # With less temperature (1.5 --> 0.1), the output becomes less variational.
# gpt2_tensors = mdl.generate(tkn_ids, max_length=100, no_repeat_ngram_size=True, num_beams=3, do_sample=True, temperature=0.1)
# response=""
# #response = gpt2_tensors
# for i, x in enumerate(gpt2_tensors):
# response=response+f"{i}: {gpt2_tkn.decode(x, skip_special_tokens=True)}" # Decode tensors into text
# return gpt2_tensors, response
# txt=grad.Textbox(lines=1, label="English", placeholder="English Text here")
# out_tensors=grad.Textbox(lines=1, label="Generated Tensors")
# out_text=grad.Textbox(lines=1, label="Generated Text")
# grad.Interface(generate, inputs=txt, outputs=[out_tensors, out_text]).launch()
#-----------------------------------------------------------------------------------
# 9. Text Generation: different model "distilgpt2"
from transformers import pipeline, set_seed
import gradio as grad
gpt2_pipe = pipeline('text-generation', model='distilgpt2')
set_seed(42)
def generate(starting_text):
response= gpt2_pipe(starting_text, max_length=20, num_return_sequences=5)
return response
txt=grad.Textbox(lines=1, label="English", placeholder="English Text here")
out=grad.Textbox(lines=1, label="Generated Text")
grad.Interface(generate, inputs=txt, outputs=out).launch()
|