Jangai commited on
Commit
6f36f8a
·
verified ·
1 Parent(s): b17e2f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -14
app.py CHANGED
@@ -16,30 +16,34 @@ SD_API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-dif
16
 
17
 
18
  def query_zephyr(linkedin_text):
 
 
 
 
 
 
 
 
19
  headers = {
20
  "Authorization": f"Bearer {ZEPHYR_API_TOKEN}",
21
  "Content-Type": "application/json",
22
  }
23
 
24
- # Assuming Zephyr supports a similar conversational structure
25
- chat = [
26
- {"role": "system", "content": "Prepare a prompt for Stable Diffusion for the following LinkedIn post:"},
27
- {"role": "user", "content": linkedin_text},
28
- # You can add more turns here if necessary
29
- ]
30
-
31
- payload = {
32
- "inputs": chat,
33
- }
34
-
35
  response = requests.post(ZEPHYR_API_URL, headers=headers, json=payload)
36
  if response.status_code == 200:
37
  return response.json()
38
  else:
39
- print(f"Failed to query Zephyr model, status code: {response.status_code}")
40
- print(response.text) # Provides insight into what went wrong
41
- return None
42
 
 
 
 
 
 
 
 
 
43
 
44
  def generate_image_from_prompt(prompt, negative_prompt, guidance_scale, width, height, num_inference_steps):
45
  headers = {"Authorization": f"Bearer {SD_API_TOKEN}"}
 
16
 
17
 
18
  def query_zephyr(linkedin_text):
19
+ prompt = "Prepare a prompt for Stable Diffusion for the following LinkedIn post:"
20
+
21
+ # Attempting a simplified, possibly more correct structure
22
+ # Adjust this according to the actual requirements of the Zephyr model
23
+ payload = {
24
+ "inputs": f"{prompt}\n{linkedin_text}",
25
+ }
26
+
27
  headers = {
28
  "Authorization": f"Bearer {ZEPHYR_API_TOKEN}",
29
  "Content-Type": "application/json",
30
  }
31
 
 
 
 
 
 
 
 
 
 
 
 
32
  response = requests.post(ZEPHYR_API_URL, headers=headers, json=payload)
33
  if response.status_code == 200:
34
  return response.json()
35
  else:
36
+ print(response.text) # To get more insight into what went wrong
37
+ raise Exception(f"Failed to query Zephyr model, status code: {response.status_code}")
 
38
 
39
+ # Replace this with the actual LinkedIn text you wish to process
40
+ linkedin_text = "Your LinkedIn post content here."
41
+ try:
42
+ zephyr_response = query_zephyr(linkedin_text)
43
+ print(zephyr_response)
44
+ except Exception as e:
45
+ print(e)
46
+
47
 
48
  def generate_image_from_prompt(prompt, negative_prompt, guidance_scale, width, height, num_inference_steps):
49
  headers = {"Authorization": f"Bearer {SD_API_TOKEN}"}