codewithdark commited on
Commit
9fad8af
·
verified ·
1 Parent(s): 55c4358

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +7 -4
README.md CHANGED
@@ -78,22 +78,25 @@ probs = torch.softmax(logits[:, -1, :], dim=-1)
78
  next_token = torch.multinomial(probs, num_samples=1)
79
  generated_ids = torch.cat([input_ids, next_token], dim=1)
80
  generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
 
81
  print(generated_text)
82
  ```
83
 
84
  ### Alternative: Using the `generate()` Method
85
 
86
  ```python
87
- from transformers import AutoTokenizer, AutoModelForCausalLM
88
 
89
  tokenizer = AutoTokenizer.from_pretrained("codewithdark/latent-recurrent-depth-lm")
90
- model = AutoModelForCausalLM.from_pretrained("codewithdark/latent-recurrent-depth-lm")
91
 
92
  prompt = "In the realm of language modeling"
93
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids
94
- generated_ids = model.generate(input_ids, max_length=50, num_iterations=3, temperature=0.8, top_k=50)
95
  generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
96
- print(generated_text)
 
 
97
  ```
98
 
99
  ## Ethical Considerations
 
78
  next_token = torch.multinomial(probs, num_samples=1)
79
  generated_ids = torch.cat([input_ids, next_token], dim=1)
80
  generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
81
+ clean_text = generated_text.replace('Ġ','')
82
  print(generated_text)
83
  ```
84
 
85
  ### Alternative: Using the `generate()` Method
86
 
87
  ```python
88
+ from transformers import AutoTokenizer, AutoModel, AutoModelForCausalLM
89
 
90
  tokenizer = AutoTokenizer.from_pretrained("codewithdark/latent-recurrent-depth-lm")
91
+ model = AutoModel.from_pretrained("codewithdark/latent-recurrent-depth-lm", trust_remote_code=True)
92
 
93
  prompt = "In the realm of language modeling"
94
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids
95
+ generated_ids = model.generate(input_ids, max_length=50, num_iterations=10, temperature=0.5, top_k=50)
96
  generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
97
+ clean_text = generated_text.replace('Ġ','')
98
+ print(clean_text)
99
+
100
  ```
101
 
102
  ## Ethical Considerations