File size: 3,457 Bytes
7def60a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
## This script simply help pull off some info from the HF api
## to speed up addition of new models to the gallery.
## It accepts as input a repo_id and returns part of the YAML data
## Use it as:
## OPENAI_BASE_URL="<api_url>" OPENAI_MODEL="" python .github/add_model.py mradermacher/HaloMaidRP-v1.33-15B-L3-i1-GGUF
## Example: 
# local-ai run hermes-2-theta-llama-3-8b
# OPENAI_BASE_URL="http://192.168.xx.xx:8080" OPENAI_MODEL="hermes-2-theta-llama-3-8b" python scripts/model_gallery_info.py mradermacher/HaloMaidRP-v1.33-15B-L3-i1-GGUF

import sys
import os
from openai import OpenAI
from huggingface_hub import HfFileSystem, get_paths_info

templated_yaml = """
- !!merge <<: *llama3
  name: "{model_name}"
  urls:
    - https://huggingface.co/{repo_id}
  description: |
    {description}
  overrides:
    parameters:
      model: {file_name}
  files:
    - filename: {file_name}
      sha256: {checksum}
      uri: huggingface://{repo_id}/{file_name}
"""

client = OpenAI()

model = os.environ.get("OPENAI_MODEL", "hermes-2-theta-llama-3-8b")


def summarize(text: str) -> str:
    chat_completion = client.chat.completions.create(
        messages=[
            {
                "role": "user",
                "content": "You are a bot which extracts the description of the LLM model from the following text. Return ONLY the description of the model, and nothing else.\n" + text,
            },
        ],
        model=model,
    )

    return chat_completion.choices[0].message.content

def format_description(description):
    return '\n    '.join(description.split('\n'))

# Example usage
if __name__ == "__main__":
    # Get repoid from argv[0]
    repo_id = sys.argv[1]
    token = ""  # Replace with your Hugging Face token if needed

    fs = HfFileSystem()
    all_files = fs.ls(repo_id, detail=False)

    print(all_files)

    # Find a file that has Q4_K in the name
    file_path = None
    file_name = None
    readmeFile = None
    for file in all_files:
        print(f"File found: {file}")
        if "readme" in file.lower():
            readmeFile = file
            print(f"Found README file: {readmeFile}")
        if "q4_k_m" in file.lower():
            file_path = file

    
    if file_path is None:
        print("No file with Q4_K_M found, using the first file in the list.")
        exit(1)    


    # Extract file from full path (is the last element)
    if file_path is not None:
        file_name = file_path.split("/")[-1]
    

    model_name = repo_id.split("/")[-1]

    checksum = None
    for file in get_paths_info(repo_id, [file_name], repo_type='model'):
        try:
            checksum = file.lfs.sha256
            break
        except Exception as e:
            print(f'Error from Hugging Face Hub: {str(e)}', file=sys.stderr)
            sys.exit(2)

    print(checksum)
    print(file_name)
    print(file_path)

    summarized_readme = ""

    if readmeFile:
        # If there is a README file, read it
        readme = fs.read_text(readmeFile)
        summarized_readme = summarize(readme)
        summarized_readme = format_description(summarized_readme)

    print("Model correctly processed")
    ## Append to the result YAML file
    with open("result.yaml", "a") as f:
        f.write(templated_yaml.format(model_name=model_name.lower().replace("-GGUF","").replace("-gguf",""), repo_id=repo_id, description=summarized_readme, file_name=file_name, checksum=checksum, file_path=file_path))