Update app.py
Browse files
app.py
CHANGED
|
@@ -4,9 +4,18 @@ from auto_gptq import AutoGPTQForCausalLM
|
|
| 4 |
import torch
|
| 5 |
import optimum
|
| 6 |
from transformers import (AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM, LlamaTokenizer, GenerationConfig, pipeline,)
|
|
|
|
| 7 |
|
| 8 |
app = FastAPI(title="Deploying FastAPI Apps on Huggingface")
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
# Load the model and tokenizer
|
| 12 |
model_name_or_path = "TheBloke/Wizard-Vicuna-7B-Uncensored-GPTQ"
|
|
|
|
| 4 |
import torch
|
| 5 |
import optimum
|
| 6 |
from transformers import (AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM, LlamaTokenizer, GenerationConfig, pipeline,)
|
| 7 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 8 |
|
| 9 |
app = FastAPI(title="Deploying FastAPI Apps on Huggingface")
|
| 10 |
|
| 11 |
+
app.add_middleware(
|
| 12 |
+
CORSMiddleware,
|
| 13 |
+
allow_origins=origins,
|
| 14 |
+
allow_credentials=True,
|
| 15 |
+
allow_methods=["*"],
|
| 16 |
+
allow_headers=["*"],
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
|
| 20 |
# Load the model and tokenizer
|
| 21 |
model_name_or_path = "TheBloke/Wizard-Vicuna-7B-Uncensored-GPTQ"
|