Pooja P commited on
Commit
0d2da3e
Β·
1 Parent(s): fb7f1c2

renamed file names

Browse files
Files changed (2) hide show
  1. app.py +58 -69
  2. app1.py +69 -58
app.py CHANGED
@@ -1,69 +1,58 @@
1
- # import os
2
- # import requests
3
- # from transformers import pipeline
4
- # import gradio as gr
5
-
6
- # # Use the token from environment variable (secret)
7
- # token = os.environ.get("OPENAI_API_KEY")
8
- # # generator = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", token=token)
9
-
10
- # # API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
11
- # API_URL = "https://api-inference.huggingface.co/models/tiiuae/falcon-rw-1b"
12
- # headers = {"Authorization": f"Bearer {token}"}
13
-
14
- # # generator = pipeline("text-generation", model="tiiuae/falcon-rw-1b")
15
-
16
- # def query(payload):
17
- # response = requests.post(API_URL, headers=headers, json=payload)
18
- # return response.json()
19
-
20
- # def clean_topic(topic):
21
- # topic = topic.lower()
22
- # if "write a blog on" in topic:
23
- # topic = topic.replace("write a blog on", "").strip()
24
- # elif "write a blog about" in topic:
25
- # topic = topic.replace("write a blog about", "").strip()
26
- # return topic.capitalize()
27
-
28
- # # def generate_blog(topic):
29
- # # topic = clean_topic(topic)
30
- # # prompt = f"""
31
- # # Write a detailed and engaging blog post about "{topic}".
32
- # # Include an introduction, 2–3 subheadings with paragraphs, and a conclusion.
33
- # # Make it informative and conversational.
34
- # # """
35
- # # # output = query({"inputs": prompt})
36
- # # # return output[0]["generated_text"]
37
- # # # result = generator(prompt, max_length=700, do_sample=True, temperature=0.7, top_p=0.9)
38
- # # # result = generator(prompt, max_length=300, do_sample=True, temperature=0.7, top_p=0.9)
39
-
40
- # # # return result[0]['generated_text']
41
-
42
- # # output = query({"inputs": prompt})
43
- # # return output[0]["generated_text"]
44
-
45
- # def generate_blog(topic):
46
- # topic = clean_topic(topic)
47
- # prompt = f"""
48
- # Write a detailed and engaging blog post about "{topic}".
49
- # Include an introduction, 2–3 subheadings with paragraphs, and a conclusion.
50
- # Make it informative and conversational.
51
- # """
52
- # output = query({"inputs": prompt})
53
- # print("API Response:", output) # <-- Add this for debugging
54
-
55
- # if isinstance(output, list) and "generated_text" in output[0]:
56
- # return output[0]["generated_text"]
57
- # elif "error" in output:
58
- # return f"Error from model: {output['error']}"
59
- # else:
60
- # return "Failed to generate blog. Please try again."
61
-
62
-
63
- # gr.Interface(
64
- # fn=generate_blog,
65
- # inputs="text",
66
- # outputs="text",
67
- # title="AI Blog Writer"
68
- # ).launch(share=True)
69
-
 
1
+ import os
2
+ import requests
3
+ import gradio as gr
4
+
5
+ # Get token from environment variable (must be set in Hugging Face Secrets for Spaces)
6
+ HF_TOKEN = os.environ.get("OPENAI_API_KEY")
7
+ API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
8
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
9
+
10
+ def clean_topic(topic):
11
+ topic = topic.lower()
12
+ if "write a blog on" in topic:
13
+ topic = topic.replace("write a blog on", "").strip()
14
+ elif "write a blog about" in topic:
15
+ topic = topic.replace("write a blog about", "").strip()
16
+ return topic.capitalize()
17
+
18
+ def generate_blog(topic):
19
+ topic = clean_topic(topic)
20
+ prompt = f"""### Instruction:
21
+ Write a complete, friendly, and engaging blog post about "{topic}".
22
+ Structure the blog with:
23
+ - A title
24
+ - An introduction
25
+ - 2–3 subheadings with paragraphs
26
+ - A conclusion
27
+
28
+ Use markdown formatting with ## for subheadings. Keep the tone conversational.
29
+
30
+ ### Response:
31
+ """
32
+
33
+ payload = {
34
+ "inputs": prompt,
35
+ "parameters": {
36
+ "max_new_tokens": 700,
37
+ "do_sample": True,
38
+ "temperature": 0.7,
39
+ "top_p": 0.9,
40
+ }
41
+ }
42
+
43
+ try:
44
+ response = requests.post(API_URL, headers=headers, json=payload)
45
+ response.raise_for_status()
46
+ generated_text = response.json()[0]['generated_text']
47
+ return generated_text.split("### Response:")[-1].strip()
48
+ except Exception as e:
49
+ return f"❌ Failed to generate blog: {str(e)}"
50
+
51
+ # Gradio Interface
52
+ gr.Interface(
53
+ fn=generate_blog,
54
+ inputs=gr.Textbox(label="Blog Topic"),
55
+ outputs=gr.Textbox(label="Generated Blog"),
56
+ title="πŸ“ AI Blog Writer",
57
+ description="Enter a topic and get a well-written blog post.",
58
+ ).launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
app1.py CHANGED
@@ -1,58 +1,69 @@
1
- import os
2
- import requests
3
- import gradio as gr
4
-
5
- # Get token from environment variable (must be set in Hugging Face Secrets for Spaces)
6
- HF_TOKEN = os.environ.get("OPENAI_API_KEY")
7
- API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
8
- headers = {"Authorization": f"Bearer {HF_TOKEN}"}
9
-
10
- def clean_topic(topic):
11
- topic = topic.lower()
12
- if "write a blog on" in topic:
13
- topic = topic.replace("write a blog on", "").strip()
14
- elif "write a blog about" in topic:
15
- topic = topic.replace("write a blog about", "").strip()
16
- return topic.capitalize()
17
-
18
- def generate_blog(topic):
19
- topic = clean_topic(topic)
20
- prompt = f"""### Instruction:
21
- Write a complete, friendly, and engaging blog post about "{topic}".
22
- Structure the blog with:
23
- - A title
24
- - An introduction
25
- - 2–3 subheadings with paragraphs
26
- - A conclusion
27
-
28
- Use markdown formatting with ## for subheadings. Keep the tone conversational.
29
-
30
- ### Response:
31
- """
32
-
33
- payload = {
34
- "inputs": prompt,
35
- "parameters": {
36
- "max_new_tokens": 700,
37
- "do_sample": True,
38
- "temperature": 0.7,
39
- "top_p": 0.9,
40
- }
41
- }
42
-
43
- try:
44
- response = requests.post(API_URL, headers=headers, json=payload)
45
- response.raise_for_status()
46
- generated_text = response.json()[0]['generated_text']
47
- return generated_text.split("### Response:")[-1].strip()
48
- except Exception as e:
49
- return f"❌ Failed to generate blog: {str(e)}"
50
-
51
- # Gradio Interface
52
- gr.Interface(
53
- fn=generate_blog,
54
- inputs=gr.Textbox(label="Blog Topic"),
55
- outputs=gr.Textbox(label="Generated Blog"),
56
- title="πŸ“ AI Blog Writer",
57
- description="Enter a topic and get a well-written blog post.",
58
- ).launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import os
2
+ # import requests
3
+ # from transformers import pipeline
4
+ # import gradio as gr
5
+
6
+ # # Use the token from environment variable (secret)
7
+ # token = os.environ.get("OPENAI_API_KEY")
8
+ # # generator = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", token=token)
9
+
10
+ # # API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
11
+ # API_URL = "https://api-inference.huggingface.co/models/tiiuae/falcon-rw-1b"
12
+ # headers = {"Authorization": f"Bearer {token}"}
13
+
14
+ # # generator = pipeline("text-generation", model="tiiuae/falcon-rw-1b")
15
+
16
+ # def query(payload):
17
+ # response = requests.post(API_URL, headers=headers, json=payload)
18
+ # return response.json()
19
+
20
+ # def clean_topic(topic):
21
+ # topic = topic.lower()
22
+ # if "write a blog on" in topic:
23
+ # topic = topic.replace("write a blog on", "").strip()
24
+ # elif "write a blog about" in topic:
25
+ # topic = topic.replace("write a blog about", "").strip()
26
+ # return topic.capitalize()
27
+
28
+ # # def generate_blog(topic):
29
+ # # topic = clean_topic(topic)
30
+ # # prompt = f"""
31
+ # # Write a detailed and engaging blog post about "{topic}".
32
+ # # Include an introduction, 2–3 subheadings with paragraphs, and a conclusion.
33
+ # # Make it informative and conversational.
34
+ # # """
35
+ # # # output = query({"inputs": prompt})
36
+ # # # return output[0]["generated_text"]
37
+ # # # result = generator(prompt, max_length=700, do_sample=True, temperature=0.7, top_p=0.9)
38
+ # # # result = generator(prompt, max_length=300, do_sample=True, temperature=0.7, top_p=0.9)
39
+
40
+ # # # return result[0]['generated_text']
41
+
42
+ # # output = query({"inputs": prompt})
43
+ # # return output[0]["generated_text"]
44
+
45
+ # def generate_blog(topic):
46
+ # topic = clean_topic(topic)
47
+ # prompt = f"""
48
+ # Write a detailed and engaging blog post about "{topic}".
49
+ # Include an introduction, 2–3 subheadings with paragraphs, and a conclusion.
50
+ # Make it informative and conversational.
51
+ # """
52
+ # output = query({"inputs": prompt})
53
+ # print("API Response:", output) # <-- Add this for debugging
54
+
55
+ # if isinstance(output, list) and "generated_text" in output[0]:
56
+ # return output[0]["generated_text"]
57
+ # elif "error" in output:
58
+ # return f"Error from model: {output['error']}"
59
+ # else:
60
+ # return "Failed to generate blog. Please try again."
61
+
62
+
63
+ # gr.Interface(
64
+ # fn=generate_blog,
65
+ # inputs="text",
66
+ # outputs="text",
67
+ # title="AI Blog Writer"
68
+ # ).launch(share=True)
69
+