rifatramadhani commited on
Commit
afcc761
·
1 Parent(s): 5f2cf96

fix: set truncate to maximumlength of model input size

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -13,6 +13,8 @@ sentiment_task = pipeline("sentiment-analysis", model=model_path, tokenizer=mode
13
  @spaces.GPU
14
  def classify(query):
15
  torch_device = 0 if torch.cuda.is_available() else -1
 
 
16
  sentiment_task = pipeline("sentiment-analysis", model=model_path, tokenizer=model_path, device=torch_device)
17
 
18
  request_type = type(query)
@@ -29,7 +31,7 @@ def classify(query):
29
 
30
  start_time = datetime.datetime.now()
31
 
32
- result = sentiment_task(data, batch_size=128, top_k=3)
33
 
34
  end_time = datetime.datetime.now()
35
  elapsed_time = end_time - start_time
 
13
  @spaces.GPU
14
  def classify(query):
15
  torch_device = 0 if torch.cuda.is_available() else -1
16
+ tokenizer_kwargs = {'truncation':True,'max_length':512}
17
+
18
  sentiment_task = pipeline("sentiment-analysis", model=model_path, tokenizer=model_path, device=torch_device)
19
 
20
  request_type = type(query)
 
31
 
32
  start_time = datetime.datetime.now()
33
 
34
+ result = sentiment_task(data, batch_size=128, top_k=3, **tokenizer_kwargs)
35
 
36
  end_time = datetime.datetime.now()
37
  elapsed_time = end_time - start_time