Update README.md
Browse files
README.md
CHANGED
@@ -1,5 +1,77 @@
|
|
1 |
-
---
|
2 |
-
license: other
|
3 |
-
license_name: seniru-epasinghe
|
4 |
-
license_link: LICENSE
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: other
|
3 |
+
license_name: seniru-epasinghe
|
4 |
+
license_link: LICENSE
|
5 |
+
language:
|
6 |
+
- en
|
7 |
+
base_model:
|
8 |
+
- Qwen/Qwen2.5-Coder-0.5B
|
9 |
+
pipeline_tag: text-generation
|
10 |
+
---
|
11 |
+
|
12 |
+
### Finetuned-qwen2.5-coder-0.5B model on
|
13 |
+
|
14 |
+
```
|
15 |
+
from llama_cpp import Llama
|
16 |
+
|
17 |
+
# Configuration
|
18 |
+
gguf_model_path = "qwen0.5-finetuned.gguf" # Path to your GGUF file
|
19 |
+
|
20 |
+
# Define the commit message prompt (Minimal format, avoids assistant behavior)
|
21 |
+
commit_prompt = """Generate a meaningful commit message explaining all the changes in the provided Git diff.
|
22 |
+
|
23 |
+
### Git Diff:
|
24 |
+
{}
|
25 |
+
|
26 |
+
### Commit Message:""" # Removed {} after "Commit Message:" to prevent pre-filled text.
|
27 |
+
|
28 |
+
# Git diff example for commit message generation
|
29 |
+
git_diff_example = """
|
30 |
+
diff --git a/index.html b/index.html
|
31 |
+
index 89abcde..f123456 100644
|
32 |
+
--- a/index.html
|
33 |
+
+++ b/index.html
|
34 |
+
@@ -5,16 +5,6 @@ <body>
|
35 |
+
<h1>Welcome to My Page</h1>
|
36 |
+
|
37 |
+
- <table border="1">
|
38 |
+
- <tr>
|
39 |
+
- <th>Name</th>
|
40 |
+
- <th>Age</th>
|
41 |
+
- </tr>
|
42 |
+
- <tr>
|
43 |
+
- <td>John Doe</td>
|
44 |
+
- <td>30</td>
|
45 |
+
- </tr>
|
46 |
+
- </table>
|
47 |
+
|
48 |
+
+ <p>This is a newly added paragraph replacing the table.</p>
|
49 |
+
</body>
|
50 |
+
</html>
|
51 |
+
"""
|
52 |
+
|
53 |
+
# Load the GGUF model with increased context size (32768)
|
54 |
+
modelGGUF = Llama(
|
55 |
+
model_path=gguf_model_path,
|
56 |
+
rope_scaling={"type": "linear", "factor": 2.0},
|
57 |
+
chat_format=None, # Disables any chat formatting
|
58 |
+
n_ctx=32768, # Set the context size explicitly
|
59 |
+
)
|
60 |
+
|
61 |
+
# Prepare the raw input prompt
|
62 |
+
input_prompt = commit_prompt.format(git_diff_example)
|
63 |
+
|
64 |
+
# Generate commit message
|
65 |
+
output = modelGGUF(
|
66 |
+
input_prompt,
|
67 |
+
max_tokens=64,
|
68 |
+
temperature=0.6, # Balanced randomness
|
69 |
+
top_p=0.8, # Controls nucleus sampling
|
70 |
+
top_k=50, # Limits vocabulary selection
|
71 |
+
)
|
72 |
+
|
73 |
+
# Decode and print the output
|
74 |
+
commit_message = output["choices"][0]["text"].strip()
|
75 |
+
|
76 |
+
print("\nGenerated Commit Message:\n{}".format(commit_message))
|
77 |
+
```
|