Z3ktrix commited on
Commit
37566bc
Β·
verified Β·
1 Parent(s): afca611

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -5
app.py CHANGED
@@ -16,9 +16,16 @@ API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Inst
16
  headers = {"Authorization": f"Bearer {HF_API_KEY}"}
17
 
18
  # Function to query the Hugging Face model with a structured prompt
19
- def query_huggingface(prompt):
 
 
 
 
 
 
 
20
  try:
21
- response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
22
  response.raise_for_status()
23
  return response.json()
24
  except requests.exceptions.RequestException as e:
@@ -37,13 +44,13 @@ async def on_ready():
37
  print(f'Bot is ready. Logged in as {bot.user}')
38
 
39
  @bot.command(name='ask')
40
- async def ask(ctx, *, question: str):
41
  """
42
  Command to ask a question to the Hugging Face model with an instructive prompt.
43
  """
44
  # Create a structured prompt
45
- prompt = f"Answer the following question as accurately and concisely as possible: {question}"
46
- response = query_huggingface(prompt)
47
 
48
  # Extract and clean the response
49
  if isinstance(response, dict) and 'generated_text' in response:
 
16
  headers = {"Authorization": f"Bearer {HF_API_KEY}"}
17
 
18
  # Function to query the Hugging Face model with a structured prompt
19
+ def query_huggingface(prompt, temperature=0.5, max_tokens=100):
20
+ payload = {
21
+ "inputs": prompt,
22
+ "parameters": {
23
+ "temperature": temperature,
24
+ "max_new_tokens": max_tokens
25
+ }
26
+ }
27
  try:
28
+ response = requests.post(API_URL, headers=headers, json=payload)
29
  response.raise_for_status()
30
  return response.json()
31
  except requests.exceptions.RequestException as e:
 
44
  print(f'Bot is ready. Logged in as {bot.user}')
45
 
46
  @bot.command(name='ask')
47
+ async def ask(ctx, question: str, temperature: float = 0.5, max_tokens: int = 100):
48
  """
49
  Command to ask a question to the Hugging Face model with an instructive prompt.
50
  """
51
  # Create a structured prompt
52
+ prompt = f"Do not under any circumstance, finish or add to the input. Excluded input correction or finishing at all times. Answer the following question as accurately and concisely as possible: {question}"
53
+ response = query_huggingface(prompt, temperature, max_tokens)
54
 
55
  # Extract and clean the response
56
  if isinstance(response, dict) and 'generated_text' in response: