zR commited on
Commit
f375ead
1 Parent(s): ca14f13
Files changed (3) hide show
  1. config.json +1 -1
  2. generation_config.json +1 -1
  3. modeling_cogvlm.py +3 -1
config.json CHANGED
@@ -21,7 +21,7 @@
21
  "template_version": "base",
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
- "transformers_version": "4.41.0",
25
  "use_cache": true,
26
  "vision_config": {
27
  "dropout_prob": 0.0,
 
21
  "template_version": "base",
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.43.1",
25
  "use_cache": true,
26
  "vision_config": {
27
  "dropout_prob": 0.0,
generation_config.json CHANGED
@@ -7,5 +7,5 @@
7
  "max_length": 2048,
8
  "top_p": 0.1,
9
  "top_k": 1,
10
- "transformers_version": "4.41.0"
11
  }
 
7
  "max_length": 2048,
8
  "top_p": 0.1,
9
  "top_k": 1,
10
+ "transformers_version": "4.43.1"
11
  }
modeling_cogvlm.py CHANGED
@@ -723,9 +723,11 @@ class CogVLMVideoForCausalLM(CogVLMPreTrainedModel):
723
  standardize_cache_format: bool = False,
724
  ) -> Dict[str, Any]:
725
  # update past_key_values
726
- model_kwargs["past_key_values"] = self._extract_past_from_model_output(
727
  outputs, standardize_cache_format=standardize_cache_format
728
  )
 
 
729
  if getattr(outputs, "state", None) is not None:
730
  model_kwargs["state"] = outputs.state
731
 
 
723
  standardize_cache_format: bool = False,
724
  ) -> Dict[str, Any]:
725
  # update past_key_values
726
+ cache_name, cache = self._extract_past_from_model_output(
727
  outputs, standardize_cache_format=standardize_cache_format
728
  )
729
+ model_kwargs[cache_name] = cache
730
+
731
  if getattr(outputs, "state", None) is not None:
732
  model_kwargs["state"] = outputs.state
733