alaa-ahmed14 commited on
Commit
bc5f9c0
·
verified ·
1 Parent(s): 5f7bb6e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -2
app.py CHANGED
@@ -1,5 +1,29 @@
1
- from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
 
2
  import os
3
 
 
 
 
 
 
4
  tokenizer = AutoTokenizer.from_pretrained("matsant01/STEMerald-2b")
5
- model = AutoModelForCausalLM.from_pretrained("matsant01/STEMerald-2b")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
+
5
  import os
6
 
7
+
8
+ # Set cache directory for Hugging Face Transformers
9
+ os.environ["TRANSFORMERS_CACHE"] = "/home/user/.cache"
10
+
11
+ # Load the tokenizer and model
12
  tokenizer = AutoTokenizer.from_pretrained("matsant01/STEMerald-2b")
13
+ model = AutoModelForCausalLM.from_pretrained("matsant01/STEMerald-2b")
14
+
15
+ # Initialize FastAPI app
16
+ app = FastAPI()
17
+
18
+
19
+
20
+ @app.get("/")
21
+ def read_root():
22
+ return {"message": "Welcome to the STEMerald-2b API"}
23
+
24
+ #@app.post("/generate/")
25
+ #def generate_text(prompt: str):
26
+ # inputs = tokenizer(prompt, return_tensors="pt")
27
+ # outputs = model.generate(inputs["input_ids"], max_length=50)
28
+ # generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
29
+ # return {"generated_text": generated_text}