MarwanAshraf22 commited on
Commit
c040d38
·
1 Parent(s): 0b3905f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -16
app.py CHANGED
@@ -1,36 +1,37 @@
 
 
1
  import torch
2
  import streamlit as st
3
- from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
4
 
5
- model_name = "gpt2"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
- generator = pipeline(
10
  "text-generation",
11
- model=model,
12
  tokenizer=tokenizer,
13
- device=0 if torch.cuda.is_available() else -1
 
 
14
  )
15
 
16
- # Function for text generation with filtering of repeated sequences
17
- def generate_text(prompt, section, max_length=200, temperature=0.7, top_k=50, repetition_penalty=1.2):
18
- return generator(
19
  f"{section} - {prompt}",
20
  max_length=max_length,
21
  do_sample=True,
22
  top_k=top_k,
23
  temperature=temperature,
24
- repetition_penalty=repetition_penalty,
25
  num_return_sequences=1,
26
  eos_token_id=tokenizer.eos_token_id,
27
- )[0]["generated_text"]
 
28
 
29
  # Streamlit app
30
  st.title("AI-Generated Blog Post")
31
 
32
  # Keyword selection input
33
- keywords_input = st.text_input("Step 1: Keyword Selection (Separate keywords with commas)","Artificial Intelligence")
34
  keywords = [word.strip() for word in keywords_input.split(',')]
35
 
36
  # Display generated content on button click
@@ -38,11 +39,11 @@ if st.button('Generate Article'):
38
  if keywords_input:
39
 
40
  generated_text = " ".join(keywords)
41
- intro_text = generate_text(generated_text, "Introduction", max_length=200, temperature=0.7, top_k=50)
42
- body_text = generate_text(generated_text, "Body", max_length=500, temperature=0.7, top_k=50)
43
- conclusion_text = generate_text(generated_text, "Conclusion", max_length=150, temperature=0.7, top_k=50)
44
 
45
- # Displaying the sections with adjusted parameters
46
  st.header("Introduction")
47
  st.write(intro_text)
48
 
 
1
+ from transformers import AutoTokenizer
2
+ import transformers
3
  import torch
4
  import streamlit as st
 
5
 
6
+ model_name = "tiiuae/falcon-7b-instruct"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
8
 
9
+ generator = transformers.pipeline(
10
  "text-generation",
11
+ model=model_name,
12
  tokenizer=tokenizer,
13
+ torch_dtype=torch.bfloat16,
14
+ trust_remote_code=True,
15
+ device_map="auto"
16
  )
17
 
18
+ def generate_text(prompt, section, max_length=200, top_k=50, temperature=0.7):
19
+ sequences = generator(
 
20
  f"{section} - {prompt}",
21
  max_length=max_length,
22
  do_sample=True,
23
  top_k=top_k,
24
  temperature=temperature,
 
25
  num_return_sequences=1,
26
  eos_token_id=tokenizer.eos_token_id,
27
+ )
28
+ return sequences[0]["generated_text"]
29
 
30
  # Streamlit app
31
  st.title("AI-Generated Blog Post")
32
 
33
  # Keyword selection input
34
+ keywords_input = st.text_input("Step 1: Keyword Selection (Separate keywords with commas)", "Artificial Intelligence")
35
  keywords = [word.strip() for word in keywords_input.split(',')]
36
 
37
  # Display generated content on button click
 
39
  if keywords_input:
40
 
41
  generated_text = " ".join(keywords)
42
+ intro_text = generate_text(generated_text, "Introduction", max_length=200, top_k=50, temperature=0.7)
43
+ body_text = generate_text(generated_text, "Body", max_length=500, top_k=50, temperature=0.7)
44
+ conclusion_text = generate_text(generated_text, "Conclusion", max_length=150, top_k=50, temperature=0.7)
45
 
46
+ # Displaying the sections
47
  st.header("Introduction")
48
  st.write(intro_text)
49