amir22010 commited on
Commit
ed41d7d
·
1 Parent(s): 54397e6

changed llm

Browse files
Files changed (1) hide show
  1. app.py +27 -45
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import gradio as gr
2
  from llama_cpp import Llama
3
  import os
4
- import asyncio
5
  from groq import Groq
6
 
7
  client = Groq(
@@ -57,10 +56,12 @@ If the user prompt does not fall within these categories, is safe and does not n
57
  user prompt: {}
58
  """
59
 
60
- async def topical_guardrail(user_request):
61
- """
62
- checking provided user content authenticity
63
- """
 
 
64
  messages = [
65
  {
66
  "role": "system",
@@ -68,47 +69,28 @@ async def topical_guardrail(user_request):
68
  },
69
  {"role": "user", "content": guardrail_prompt.format(user_request)},
70
  ]
71
- response = client.chat.completions.create(
72
- model=guard_llm, messages=messages, temperature=0
73
- )
74
- return response.choices[0].message.content
75
-
76
- async def greet(product,description):
77
- user_reques = marketing_email_prompt.format(
78
- product, # product
79
- description, # description
80
- "", # output - leave this blank for generation!
81
- )
82
- topical_guardrail_task = asyncio.create_task(topical_guardrail(user_reques))
83
- while True:
84
- done, _ = await asyncio.wait(
85
- [topical_guardrail_task], return_when=asyncio.FIRST_COMPLETED
86
  )
87
- if topical_guardrail_task in done:
88
- guardrail_response = topical_guardrail_task.result()
89
- if guardrail_response != "not moderated":
90
- yield "Sorry can't proceed for generate marketing email!. Your content needs to be moderated first."
91
- else:
92
- output = llm.create_chat_completion(
93
- messages=[
94
- {
95
- "role": "system",
96
- "content": "Your go-to Email Marketing Guru - I'm here to help you craft compelling campaigns, boost conversions, and take your business to the next level.",
97
- },
98
- {"role": "user", "content": user_request},
99
- ],
100
- max_tokens=4096,
101
- temperature=0.7,
102
- stream=True
103
- )
104
- partial_message = ""
105
- for chunk in output:
106
- delta = chunk['choices'][0]['delta']
107
- if 'content' in delta:
108
- partial_message = partial_message + delta.get('content', '')
109
- yield partial_message
110
- else:
111
- await asyncio.sleep(0.1) # sleep for a bit before checking the tasks again
112
 
113
  demo = gr.Interface(fn=greet, inputs=["text","text"], outputs="text", concurrency_limit=10)
114
  demo.launch()
 
1
  import gradio as gr
2
  from llama_cpp import Llama
3
  import os
 
4
  from groq import Groq
5
 
6
  client = Groq(
 
56
  user prompt: {}
57
  """
58
 
59
+ async def greet(product,description):
60
+ user_reques = marketing_email_prompt.format(
61
+ product, # product
62
+ description, # description
63
+ "", # output - leave this blank for generation!
64
+ )
65
  messages = [
66
  {
67
  "role": "system",
 
69
  },
70
  {"role": "user", "content": guardrail_prompt.format(user_request)},
71
  ]
72
+ response = client.chat.completions.create(model=guard_llm, messages=messages, temperature=0)
73
+ if response.choices[0].message.content != "not moderated":
74
+ return "Sorry can't proceed for generate marketing email!. Your content needs to be moderated first."
75
+ else:
76
+ output = llm.create_chat_completion(
77
+ messages=[
78
+ {
79
+ "role": "system",
80
+ "content": "Your go-to Email Marketing Guru - I'm here to help you craft compelling campaigns, boost conversions, and take your business to the next level.",
81
+ },
82
+ {"role": "user", "content": user_request},
83
+ ],
84
+ max_tokens=4096,
85
+ temperature=0.7,
86
+ stream=True
87
  )
88
+ partial_message = ""
89
+ for chunk in output:
90
+ delta = chunk['choices'][0]['delta']
91
+ if 'content' in delta:
92
+ partial_message = partial_message + delta.get('content', '')
93
+ yield partial_message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
  demo = gr.Interface(fn=greet, inputs=["text","text"], outputs="text", concurrency_limit=10)
96
  demo.launch()