ahmadmac commited on
Commit
089fa15
·
verified ·
1 Parent(s): e55cc4d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -6
app.py CHANGED
@@ -7,12 +7,28 @@ st.set_page_config(page_title="GPT-2 Text Generator", layout="centered")
7
  #model = AutoModelForCausalLM.from_pretrained("ahmadmac/Pretrained-GPT2")
8
  tokenizer = AutoTokenizer.from_pretrained("ahmadmac/Pretrain-CSV-GPT2")
9
  model = AutoModelForCausalLM.from_pretrained("ahmadmac/results")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- def generate_text(prompt):
12
- inputs = tokenizer(prompt, return_tensors="pt")
13
- outputs = model.generate(**inputs, max_length=50)
14
- generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
15
- return generated_text
16
 
17
  st.title("GPT-2 Text Generator")
18
  st.write("Enter a prompt to generate text using GPT-2")
@@ -23,6 +39,6 @@ if st.button("Generate"):
23
  if user_input:
24
  with st.spinner("Generating..."):
25
  generated_text = generate_text(user_input)
26
- st.write(generated_text)
27
  else:
28
  st.warning("Please enter a prompt")
 
7
  #model = AutoModelForCausalLM.from_pretrained("ahmadmac/Pretrained-GPT2")
8
  tokenizer = AutoTokenizer.from_pretrained("ahmadmac/Pretrain-CSV-GPT2")
9
  model = AutoModelForCausalLM.from_pretrained("ahmadmac/results")
10
+ #google_api_key=
11
+ import google.generativeai as genai
12
+ GOOGLE_API_KEY=os.environ["google_api_key"]
13
+ genai.configure(api_key=GOOGLE_API_KEY)
14
+
15
+ # def generate_text(prompt):
16
+ # inputs = tokenizer(prompt, return_tensors="pt")
17
+ # outputs = model.generate(**inputs, max_length=50)
18
+ # generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
+ # return generated_text
20
+ gemini_model = genai.GenerativeModel('gemini-1.5-pro')
21
+
22
+ def generate_text(input_text):
23
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
24
+ trained_output = model.generate(input_ids, max_length=100, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
25
+ trained_response = tokenizer.decode(trained_output[0], skip_special_tokens=True)
26
+ prompt = f"Improve this text to make it clearer and more concise: {trained_response}"
27
+ generated_text = gemini_model.generate_content(prompt)
28
+
29
+ return generated_text
30
+
31
 
 
 
 
 
 
32
 
33
  st.title("GPT-2 Text Generator")
34
  st.write("Enter a prompt to generate text using GPT-2")
 
39
  if user_input:
40
  with st.spinner("Generating..."):
41
  generated_text = generate_text(user_input)
42
+ st.write(generated_text.text)
43
  else:
44
  st.warning("Please enter a prompt")