SoSa123456 commited on
Commit
cc2185f
·
1 Parent(s): e049fa7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -32
app.py CHANGED
@@ -1,10 +1,6 @@
1
-
2
  import warnings
3
  from huggingface_hub import InferenceClient
4
  import gradio as gr
5
- from huggingface_hub import InferenceClient
6
-
7
-
8
 
9
  warnings.filterwarnings('ignore')
10
 
@@ -14,41 +10,35 @@ generator = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
14
  def generate_script(host_name, listener_location, causes_climate_change, co2_level, effects_climate_change,
15
  sea_level_rise, warming_rate, potential_solutions, individual_role, call_to_action,
16
  TOPIC, DESCRIPTION):
17
- # Variables
18
- introduction_template = f"{host_name}, good morning! This is {listener_location}'s local radio station. Today we're talking about an issue that affects us all - {TOPIC}. It's a pressing issue that requires our immediate attention..."
19
- causes_template = f"The causes of {TOPIC} are {causes_climate_change}. Today, the level of CO2 in our atmosphere is {co2_level}, which is concerning..."
20
- effects_template = f"These activities result in {effects_climate_change}, leading to drastic changes in our environment. For instance, sea levels are rising at a rate of {sea_level_rise} per year, and global temperatures are increasing at a rate of {warming_rate} per decade..."
21
- solutions_template = f"But don't worry, there are solutions. {potential_solutions} are all steps we can take to mitigate these effects..."
22
- role_template = f"Each one of us plays a role in combating {TOPIC}. Even small actions can make a big difference. In fact, our location, {listener_location}, is particularly vulnerable to {TOPIC} due to its geographical features..."
23
- action_template = f"So, {listener_location}, why wait? Start taking steps today towards a greener future. Support local businesses that prioritize sustainability, reduce your carbon footprint, and voice your opinion to policy makers..."
24
- summary_template = f"In conclusion, {TOPIC} is a serious issue that requires our immediate attention. But by understanding its causes, effects, and potential solutions, we can all play a part in mitigating its impact. Thank you for joining us today, and remember, every small action counts!"
25
 
26
- # Combine templates based on the DESCRIPTION
27
- prompt_template = f"""{introduction_template} {causes_template} {effects_template} {solutions_template} {role_template} {action_template} {summary_template}
28
-
29
- TOPIC: {TOPIC}. DESCRIPTION: {DESCRIPTION}"""
30
 
31
- # Generate the script using the language model
32
- script = generator(prompt_template, max_length=1000)[0]['generated_text']
33
 
34
- # Split the script into sections
35
- sections = script.split("\n")
36
 
37
- # Calculate the word count for each section
38
- word_counts = [len(section.split()) for section in sections]
39
 
40
- # Check if any section exceeds the target word count
41
- for i, count in enumerate(word_counts):
42
- if count > 200:
43
- print(f"Warning: Section {i + 1} exceeds the target word count. You may need to shorten this section.")
44
 
45
- return script
46
 
47
- # Example usage
48
- #generate_script("Host Name", "Listener Location", "Causes of Climate Change", 415, "Effects of Climate Change",
49
- # 2.5, 1.5, "Potential Solutions", "Individual Role", "Call to Action", "Environmental Issues", "Description of the environmental issues")
50
-
51
 
 
52
  iface = gr.Interface(fn=generate_script,
53
  inputs=[gr.Textbox(label="Host Name", value="John"),
54
  gr.Textbox(label="Listener Location", value="City"),
@@ -64,4 +54,3 @@ iface = gr.Interface(fn=generate_script,
64
 
65
  # Launch the interface
66
  iface.launch(debug=True)
67
-
 
 
1
  import warnings
2
  from huggingface_hub import InferenceClient
3
  import gradio as gr
 
 
 
4
 
5
  warnings.filterwarnings('ignore')
6
 
 
10
  def generate_script(host_name, listener_location, causes_climate_change, co2_level, effects_climate_change,
11
  sea_level_rise, warming_rate, potential_solutions, individual_role, call_to_action,
12
  TOPIC, DESCRIPTION):
13
+ try:
14
+ # Variables and template definitions...
 
 
 
 
 
 
15
 
16
+ # Combine templates based on the DESCRIPTION
17
+ prompt_template = f"""{introduction_template} {causes_template} {effects_template} {solutions_template} {role_template} {action_template} {summary_template}
18
+
19
+ TOPIC: {TOPIC}. DESCRIPTION: {DESCRIPTION}"""
20
 
21
+ # Generate the script using the language model
22
+ script = generator(prompt_template, max_length=1000)[0]['generated_text']
23
 
24
+ # Split the script into sections
25
+ sections = script.split("\n")
26
 
27
+ # Calculate the word count for each section
28
+ word_counts = [len(section.split()) for section in sections]
29
 
30
+ # Check if any section exceeds the target word count
31
+ for i, count in enumerate(word_counts):
32
+ if count > 200:
33
+ print(f"Warning: Section {i + 1} exceeds the target word count. You may need to shorten this section.")
34
 
35
+ return script
36
 
37
+ except Exception as e:
38
+ print(f"Error: {e}")
39
+ return "An error occurred. Please check the console for details."
 
40
 
41
+ # Gradio interface setup...
42
  iface = gr.Interface(fn=generate_script,
43
  inputs=[gr.Textbox(label="Host Name", value="John"),
44
  gr.Textbox(label="Listener Location", value="City"),
 
54
 
55
  # Launch the interface
56
  iface.launch(debug=True)