Pooja P commited on
Commit
fb7f1c2
Β·
1 Parent(s): bfe3b07

commented app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -51
app.py CHANGED
@@ -1,29 +1,46 @@
1
- import os
2
- import requests
3
- from transformers import pipeline
4
- import gradio as gr
5
 
6
- # Use the token from environment variable (secret)
7
- token = os.environ.get("OPENAI_API_KEY")
8
- # generator = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", token=token)
9
 
10
- # API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
11
- API_URL = "https://api-inference.huggingface.co/models/tiiuae/falcon-rw-1b"
12
- headers = {"Authorization": f"Bearer {token}"}
13
 
14
- # generator = pipeline("text-generation", model="tiiuae/falcon-rw-1b")
15
 
16
- def query(payload):
17
- response = requests.post(API_URL, headers=headers, json=payload)
18
- return response.json()
19
 
20
- def clean_topic(topic):
21
- topic = topic.lower()
22
- if "write a blog on" in topic:
23
- topic = topic.replace("write a blog on", "").strip()
24
- elif "write a blog about" in topic:
25
- topic = topic.replace("write a blog about", "").strip()
26
- return topic.capitalize()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  # def generate_blog(topic):
29
  # topic = clean_topic(topic)
@@ -32,38 +49,21 @@ def clean_topic(topic):
32
  # Include an introduction, 2–3 subheadings with paragraphs, and a conclusion.
33
  # Make it informative and conversational.
34
  # """
35
- # # output = query({"inputs": prompt})
36
- # # return output[0]["generated_text"]
37
- # # result = generator(prompt, max_length=700, do_sample=True, temperature=0.7, top_p=0.9)
38
- # # result = generator(prompt, max_length=300, do_sample=True, temperature=0.7, top_p=0.9)
39
-
40
- # # return result[0]['generated_text']
41
-
42
  # output = query({"inputs": prompt})
43
- # return output[0]["generated_text"]
44
-
45
- def generate_blog(topic):
46
- topic = clean_topic(topic)
47
- prompt = f"""
48
- Write a detailed and engaging blog post about "{topic}".
49
- Include an introduction, 2–3 subheadings with paragraphs, and a conclusion.
50
- Make it informative and conversational.
51
- """
52
- output = query({"inputs": prompt})
53
- print("API Response:", output) # <-- Add this for debugging
54
 
55
- if isinstance(output, list) and "generated_text" in output[0]:
56
- return output[0]["generated_text"]
57
- elif "error" in output:
58
- return f"Error from model: {output['error']}"
59
- else:
60
- return "Failed to generate blog. Please try again."
61
 
62
 
63
- gr.Interface(
64
- fn=generate_blog,
65
- inputs="text",
66
- outputs="text",
67
- title="AI Blog Writer"
68
- ).launch(share=True)
69
 
 
1
+ # import os
2
+ # import requests
3
+ # from transformers import pipeline
4
+ # import gradio as gr
5
 
6
+ # # Use the token from environment variable (secret)
7
+ # token = os.environ.get("OPENAI_API_KEY")
8
+ # # generator = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", token=token)
9
 
10
+ # # API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
11
+ # API_URL = "https://api-inference.huggingface.co/models/tiiuae/falcon-rw-1b"
12
+ # headers = {"Authorization": f"Bearer {token}"}
13
 
14
+ # # generator = pipeline("text-generation", model="tiiuae/falcon-rw-1b")
15
 
16
+ # def query(payload):
17
+ # response = requests.post(API_URL, headers=headers, json=payload)
18
+ # return response.json()
19
 
20
+ # def clean_topic(topic):
21
+ # topic = topic.lower()
22
+ # if "write a blog on" in topic:
23
+ # topic = topic.replace("write a blog on", "").strip()
24
+ # elif "write a blog about" in topic:
25
+ # topic = topic.replace("write a blog about", "").strip()
26
+ # return topic.capitalize()
27
+
28
+ # # def generate_blog(topic):
29
+ # # topic = clean_topic(topic)
30
+ # # prompt = f"""
31
+ # # Write a detailed and engaging blog post about "{topic}".
32
+ # # Include an introduction, 2–3 subheadings with paragraphs, and a conclusion.
33
+ # # Make it informative and conversational.
34
+ # # """
35
+ # # # output = query({"inputs": prompt})
36
+ # # # return output[0]["generated_text"]
37
+ # # # result = generator(prompt, max_length=700, do_sample=True, temperature=0.7, top_p=0.9)
38
+ # # # result = generator(prompt, max_length=300, do_sample=True, temperature=0.7, top_p=0.9)
39
+
40
+ # # # return result[0]['generated_text']
41
+
42
+ # # output = query({"inputs": prompt})
43
+ # # return output[0]["generated_text"]
44
 
45
  # def generate_blog(topic):
46
  # topic = clean_topic(topic)
 
49
  # Include an introduction, 2–3 subheadings with paragraphs, and a conclusion.
50
  # Make it informative and conversational.
51
  # """
 
 
 
 
 
 
 
52
  # output = query({"inputs": prompt})
53
+ # print("API Response:", output) # <-- Add this for debugging
 
 
 
 
 
 
 
 
 
 
54
 
55
+ # if isinstance(output, list) and "generated_text" in output[0]:
56
+ # return output[0]["generated_text"]
57
+ # elif "error" in output:
58
+ # return f"Error from model: {output['error']}"
59
+ # else:
60
+ # return "Failed to generate blog. Please try again."
61
 
62
 
63
+ # gr.Interface(
64
+ # fn=generate_blog,
65
+ # inputs="text",
66
+ # outputs="text",
67
+ # title="AI Blog Writer"
68
+ # ).launch(share=True)
69