Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -12,46 +12,20 @@ def get_model():
|
|
12 |
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo1.3BPointsLincolnFormalInformal")
|
13 |
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")
|
14 |
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln7")
|
15 |
-
model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincolnConciseWordy")
|
|
|
16 |
tokenizer = AutoTokenizer.from_pretrained("BigSalmon/Points2")
|
17 |
return model, tokenizer
|
18 |
|
19 |
model, tokenizer = get_model()
|
20 |
|
21 |
-
st.text('''
|
22 |
-
|
23 |
-
***
|
24 |
-
|
25 |
-
Translated into the Style of Abraham Lincoln: at a time when nintendo has become inflexible, ( stubbornly bent on / firmly set on / unyielding in its insistence on / steadfastly transfixed by / uncompromising in its commitment to / rigidly decided on ) consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices.
|
26 |
-
informal english: garage band has made people who know nothing about music good at creating music.
|
27 |
-
|
28 |
-
***
|
29 |
-
|
30 |
-
Translated into the Style of Abraham Lincoln: garage band ( offers the uninitiated in music the ability to produce professional-quality compositions / catapults those for whom music is an uncharted art the ability the realize masterpieces / stimulates music novice's competency to yield sublime arrangements / begets individuals of rudimentary musical talent the proficiency to fashion elaborate suites ).
|
31 |
-
informal english: chrome extensions can make doing regular tasks much easier to get done.
|
32 |
-
|
33 |
-
***
|
34 |
-
|
35 |
-
Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.
|
36 |
-
informal english:
|
37 |
-
|
38 |
-
***
|
39 |
-
|
40 |
-
OR
|
41 |
-
|
42 |
-
***
|
43 |
-
- declining viewership facing the nba.
|
44 |
-
- does not have to be this way.
|
45 |
-
- in fact, many solutions exist.
|
46 |
-
- the four point line would surely draw in eyes.
|
47 |
-
text: failing to draw in the masses, the nba has ( fallen into / succumb to / bowed to ) disrepair. such does not have to be the case, however. in fact, a myriad of simple, relatively cheap ( solutions / interventions / enhancements ) could revive the league. the addition of the much-hyped four-point line would surely juice viewership.
|
48 |
-
***
|
49 |
-
- ''')
|
50 |
|
51 |
temp = st.sidebar.slider("Temperature", 0.7, 1.5)
|
52 |
number_of_outputs = st.sidebar.slider("Number of Outputs", 5, 50)
|
53 |
lengths = st.sidebar.slider("Length", 3, 10)
|
54 |
bad_words = st.text_input("Words You Do Not Want Generated", " core lemon height time ")
|
|
|
55 |
|
56 |
def run_generate(text, bad_words):
|
57 |
yo = []
|
@@ -95,6 +69,6 @@ with st.form(key='my_form'):
|
|
95 |
logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
|
96 |
logits = logits[0,-1]
|
97 |
probabilities = torch.nn.functional.softmax(logits)
|
98 |
-
best_logits, best_indices = logits.topk(
|
99 |
best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
|
100 |
st.write(best_words)
|
|
|
12 |
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo1.3BPointsLincolnFormalInformal")
|
13 |
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")
|
14 |
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln7")
|
15 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincolnConciseWordy")
|
16 |
+
model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln2")
|
17 |
tokenizer = AutoTokenizer.from_pretrained("BigSalmon/Points2")
|
18 |
return model, tokenizer
|
19 |
|
20 |
model, tokenizer = get_model()
|
21 |
|
22 |
+
st.text('''For Prompt Templates: https://huggingface.co/BigSalmon/InformalToFormalLincoln35''')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
temp = st.sidebar.slider("Temperature", 0.7, 1.5)
|
25 |
number_of_outputs = st.sidebar.slider("Number of Outputs", 5, 50)
|
26 |
lengths = st.sidebar.slider("Length", 3, 10)
|
27 |
bad_words = st.text_input("Words You Do Not Want Generated", " core lemon height time ")
|
28 |
+
logs_outputs = st.sidebar.slider("Logit Outputs", 50, 300)
|
29 |
|
30 |
def run_generate(text, bad_words):
|
31 |
yo = []
|
|
|
69 |
logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
|
70 |
logits = logits[0,-1]
|
71 |
probabilities = torch.nn.functional.softmax(logits)
|
72 |
+
best_logits, best_indices = logits.topk(logs_outputs)
|
73 |
best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
|
74 |
st.write(best_words)
|