Update README.md
Browse files
README.md
CHANGED
@@ -52,8 +52,8 @@ def get_detailed_instruct(task_description: str, query: str) -> str:
|
|
52 |
return f'{task_description}\nQuery: {query}'
|
53 |
|
54 |
|
55 |
-
model = AutoModel.from_pretrained('yuri-no/gemma-
|
56 |
-
tokenizer = AutoTokenizer.from_pretrained('yuri-no/gemma-
|
57 |
|
58 |
# Each query must come with a one-sentence instruction that describes the task
|
59 |
task = 'Given a search query, retrieve relevant passages that answer the query'
|
@@ -80,7 +80,7 @@ embeddings = last_token_pool(outputs.last_hidden_state, batch_dict['attention_ma
|
|
80 |
embeddings = F.normalize(embeddings, p=2, dim=1)
|
81 |
scores = (embeddings[:2] @ embeddings[2:].T) * 100
|
82 |
print(scores.tolist())
|
83 |
-
# [[
|
84 |
-
# [
|
85 |
```
|
86 |
---
|
|
|
52 |
return f'{task_description}\nQuery: {query}'
|
53 |
|
54 |
|
55 |
+
model = AutoModel.from_pretrained('yuri-no/gemma-palm', torch_dtype=torch.bfloat16).to('cuda')
|
56 |
+
tokenizer = AutoTokenizer.from_pretrained('yuri-no/gemma-palm')
|
57 |
|
58 |
# Each query must come with a one-sentence instruction that describes the task
|
59 |
task = 'Given a search query, retrieve relevant passages that answer the query'
|
|
|
80 |
embeddings = F.normalize(embeddings, p=2, dim=1)
|
81 |
scores = (embeddings[:2] @ embeddings[2:].T) * 100
|
82 |
print(scores.tolist())
|
83 |
+
# [[60.5, 16.375, 17.5],
|
84 |
+
# [26.0, 58.25, 15.0625]]
|
85 |
```
|
86 |
---
|