Z3ktrix commited on
Commit
211da2c
Β·
verified Β·
1 Parent(s): 1165c6a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -41
app.py CHANGED
@@ -1,51 +1,54 @@
1
  #import gradio as gr
2
  #gr.load("models/mistralai/Mistral-7B-Instruct-v0.3").launch()
3
-
4
  import os
5
- import gradio as gr
6
  import requests
 
 
7
  from dotenv import load_dotenv
8
 
9
- import gradio_client as grc
10
- grc.Client("Z3ktrix/mistralai-Mistral-7B-Instruct-v0.3").deploy_discord(id={"Authorization": f"Bearer {os.getenv('DSTOK')}"})
11
- # Load the environment variables from the .env file
12
  load_dotenv()
13
 
14
- API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
15
- headers = {"Authorization": f"Bearer {os.getenv('HFREAD')}"}
16
-
17
- def query(payload):
18
- response = requests.post(API_URL, headers=headers, json=payload)
19
- return response.json()
20
 
21
- def chatbot_response(input_text):
22
- response = query({"inputs": input_text})
23
- # Extract the generated text from the response
24
- if isinstance(response, dict) and 'generated_text' in response:
25
- return response['generated_text']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  elif isinstance(response, list) and len(response) > 0 and 'generated_text' in response[0]:
27
- return response[0]['generated_text']
28
- return 'No response generated.'
29
-
30
- # Gradio interface
31
- def main():
32
- with gr.Blocks() as demo:
33
- gr.Markdown("Chatty")
34
-
35
- with gr.Row():
36
- input_box = gr.Textbox(label="Input Text", placeholder="Type your question here...", lines=2)
37
-
38
- with gr.Row():
39
- output_box = gr.Textbox(label="Response", placeholder="The response will appear here...", lines=5)
40
-
41
- submit_button = gr.Button("Submit")
42
-
43
- submit_button.click(fn=chatbot_response, inputs=input_box, outputs=output_box)
44
-
45
-
46
-
47
- demo.launch()
48
-
49
- if __name__ == "__main__":
50
- main()
51
-
 
1
  #import gradio as gr
2
  #gr.load("models/mistralai/Mistral-7B-Instruct-v0.3").launch()
 
3
  import os
 
4
  import requests
5
+ import discord
6
+ from discord.ext import commands
7
  from dotenv import load_dotenv
8
 
9
+ # Load environment variables from the .env file
 
 
10
  load_dotenv()
11
 
12
+ DISCORD_TOKEN = os.getenv('DSTOK')
13
+ HF_API_KEY = os.getenv('HFREAD')
 
 
 
 
14
 
15
+ API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
16
+ headers = {"Authorization": f"Bearer {HF_API_KEY}"}
17
+
18
+ # Function to query the Hugging Face model
19
+ def query_huggingface(payload):
20
+ try:
21
+ response = requests.post(API_URL, headers=headers, json=payload)
22
+ response.raise_for_status()
23
+ return response.json()
24
+ except requests.exceptions.RequestException as e:
25
+ print(f"Error querying the API: {e}")
26
+ return {"error": str(e)}
27
+
28
+ # Initialize the Discord bot
29
+ intents = discord.Intents.default()
30
+ intents.messages = True
31
+ intents.message_content = True
32
+
33
+ bot = commands.Bot(command_prefix="!", intents=intents)
34
+
35
+ @bot.event
36
+ async def on_ready():
37
+ print(f'Bot is ready. Logged in as {bot.user}')
38
+
39
+ @bot.command(name='ask')
40
+ async def ask(ctx, *, question: str):
41
+ """
42
+ Command to ask a question to the Hugging Face model.
43
+ """
44
+ await ctx.send(f"Question: {question}")
45
+ response = query_huggingface({"inputs": question})
46
+ if 'generated_text' in response:
47
+ await ctx.send(f"Response: {response['generated_text']}")
48
  elif isinstance(response, list) and len(response) > 0 and 'generated_text' in response[0]:
49
+ await ctx.send(f"Response: {response[0]['generated_text']}")
50
+ else:
51
+ await ctx.send("Sorry, I couldn't generate a response.")
52
+
53
+ # Run the bot
54
+ bot.run(DISCORD_TOKEN)