Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,14 +5,7 @@ import os
|
|
5 |
import requests
|
6 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
7 |
import torch
|
8 |
-
|
9 |
-
|
10 |
-
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
11 |
-
|
12 |
-
# Load the model and tokenizer
|
13 |
-
model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
14 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
15 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
16 |
|
17 |
# If you have a GPU, move the model to it
|
18 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
@@ -50,8 +43,12 @@ class InferenceClient:
|
|
50 |
def get_endpoint_metrics(self, repo_id, handler_path):
|
51 |
pass
|
52 |
|
|
|
53 |
|
54 |
-
|
|
|
|
|
|
|
55 |
|
56 |
# Define input prompt
|
57 |
input_prompt = "(input value = highest-level-quality code content invocation ; True)"
|
|
|
5 |
import requests
|
6 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
7 |
import torch
|
8 |
+
from huggingface_hub import InferenceClient, HfApi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# If you have a GPU, move the model to it
|
11 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
43 |
def get_endpoint_metrics(self, repo_id, handler_path):
|
44 |
pass
|
45 |
|
46 |
+
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
47 |
|
48 |
+
# Load the model and tokenizer
|
49 |
+
model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
50 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
51 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
52 |
|
53 |
# Define input prompt
|
54 |
input_prompt = "(input value = highest-level-quality code content invocation ; True)"
|