woods-today commited on
Commit
5948e4d
·
1 Parent(s): ffd715f

Workin on it

Browse files
Files changed (1) hide show
  1. routers/training.py +8 -8
routers/training.py CHANGED
@@ -14,10 +14,10 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
14
 
15
  from diffusers import StableDiffusionImg2ImgPipeline
16
 
17
- tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b")
18
- model = AutoModelForCausalLM.from_pretrained(
19
- "openlm-research/open_llama_7b", device_map="auto", load_in_4bit=True
20
- )
21
 
22
  model_id_or_path = "runwayml/stable-diffusion-v1-5"
23
  pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
@@ -37,9 +37,9 @@ class ActionBody(BaseModel):
37
  @router.post("/perform-action")
38
  async def performAction(actionBody: ActionBody):
39
 
40
- model_inputs = tokenizer(["A list of colors: red, blue"], return_tensors="pt").to("cuda")
41
- generated_ids = model.generate(**model_inputs)
42
- output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
43
 
44
  response = requests.get(actionBody.url)
45
  init_image = Image.open(BytesIO(response.content)).convert("RGB")
@@ -55,7 +55,7 @@ async def performAction(actionBody: ActionBody):
55
  return {
56
  "imageName" : imgUUID+".png",
57
  "image": "data:image/jpeg;base64,"+img_str.decode(),
58
- "output": output
59
  }
60
 
61
 
 
14
 
15
  from diffusers import StableDiffusionImg2ImgPipeline
16
 
17
+ # tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b")
18
+ # model = AutoModelForCausalLM.from_pretrained(
19
+ # "openlm-research/open_llama_7b", device_map="auto", load_in_4bit=True
20
+ # )
21
 
22
  model_id_or_path = "runwayml/stable-diffusion-v1-5"
23
  pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
 
37
  @router.post("/perform-action")
38
  async def performAction(actionBody: ActionBody):
39
 
40
+ # model_inputs = tokenizer(["A list of colors: red, blue"], return_tensors="pt").to("cuda")
41
+ # generated_ids = model.generate(**model_inputs)
42
+ # output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
43
 
44
  response = requests.get(actionBody.url)
45
  init_image = Image.open(BytesIO(response.content)).convert("RGB")
 
55
  return {
56
  "imageName" : imgUUID+".png",
57
  "image": "data:image/jpeg;base64,"+img_str.decode(),
58
+ # "output": output
59
  }
60
 
61