lamhieu commited on
Commit
53c6eb3
·
1 Parent(s): 74bbc64

chore: update gpus duration and max tokens default

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -7,8 +7,8 @@ import spaces
7
  import torch
8
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
 
10
- MAX_MAX_NEW_TOKENS = 2048
11
- DEFAULT_MAX_NEW_TOKENS = 1024
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192"))
13
 
14
  DESCRIPTION = """\
@@ -78,12 +78,12 @@ if torch.cuda.is_available():
78
  )
79
 
80
 
81
- @spaces.GPU(duration=60)
82
  def generate(
83
  message: str,
84
  chat_history: list[tuple[str, str]],
85
  system_prompt: str,
86
- max_new_tokens: int = 1024,
87
  temperature: float = 0.4,
88
  top_p: float = 0.95,
89
  top_k: int = 50,
 
7
  import torch
8
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
 
10
+ MAX_MAX_NEW_TOKENS = 4096
11
+ DEFAULT_MAX_NEW_TOKENS = 1536
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192"))
13
 
14
  DESCRIPTION = """\
 
78
  )
79
 
80
 
81
+ @spaces.GPU(duration=120)
82
  def generate(
83
  message: str,
84
  chat_history: list[tuple[str, str]],
85
  system_prompt: str,
86
+ max_new_tokens: int = 1536,
87
  temperature: float = 0.4,
88
  top_p: float = 0.95,
89
  top_k: int = 50,