oleksandrfluxon commited on
Commit
4023b2d
1 Parent(s): 4d0044f

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +13 -16
handler.py CHANGED
@@ -11,7 +11,6 @@ class EndpointHandler:
11
  self.model = AutoModelForCausalLM.from_pretrained(
12
  path, device_map="auto", torch_dtype=torch.float16, trust_remote_code=True
13
  )
14
- print('===> cuda.is_available', torch.cuda.is_available())
15
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
16
 
17
  def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
@@ -19,18 +18,16 @@ class EndpointHandler:
19
  inputs = data.pop("inputs", data)
20
  parameters = data.pop("parameters", None)
21
 
22
- with torch.autocast('cuda'):
23
- # preprocess
24
- inputs = self.tokenizer(inputs, return_tensors="pt").to('cuda')
25
- self.model.to('cuda')
26
-
27
- # pass inputs with all kwargs in data
28
- if parameters is not None:
29
- outputs = self.model.generate(**inputs, **parameters)
30
- else:
31
- outputs = self.model.generate(**inputs)
32
-
33
- # postprocess the prediction
34
- prediction = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
35
-
36
- return [{"generated_text": prediction}]
 
11
  self.model = AutoModelForCausalLM.from_pretrained(
12
  path, device_map="auto", torch_dtype=torch.float16, trust_remote_code=True
13
  )
 
14
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
15
 
16
  def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
 
18
  inputs = data.pop("inputs", data)
19
  parameters = data.pop("parameters", None)
20
 
21
+ # preprocess
22
+ inputs = self.tokenizer(inputs, return_tensors="pt").to(self.device)
23
+
24
+ # pass inputs with all kwargs in data
25
+ if parameters is not None:
26
+ outputs = self.model.generate(**inputs, **parameters)
27
+ else:
28
+ outputs = self.model.generate(**inputs)
29
+
30
+ # postprocess the prediction
31
+ prediction = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
32
+
33
+ return [{"generated_text": prediction}]