Spaces:
Configuration error
Configuration error
File size: 9,282 Bytes
206c9a3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 |
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
# === ENV VARIABLES ===
export HF_HOME="$HOME/.cache/huggingface"
export MODEL_NAME="EleutherAI/gpt-neo-1.3B"
export WORK_DIR="$HOME/dev/shx-hfspace"
export VENV_DIR="$WORK_DIR/shx-venv"
export LOG_FILE="$WORK_DIR/shx-setup.log"
export CONFIG_FILE="$WORK_DIR/shx-config.json"
export HF_SPACE_NAME="SHX-Auto"
export HF_USERNAME="subatomicERROR"
# === COLORS ===
RED="\e[91m"
GREEN="\e[92m"
YELLOW="\e[93m"
CYAN="\e[96m"
RESET="\e[0m"
# === SELF-HEAL ===
trap 'echo -e "\n${RED}β Error occurred at line $LINENO: $BASH_COMMAND${RESET}" >> "$LOG_FILE"; echo -e "${YELLOW}π§ Triggering SHX Self-Healing...${RESET}"; shx_self_heal $LINENO "$BASH_COMMAND"' ERR
shx_self_heal() {
local line=$1
local cmd="$2"
echo -e "${CYAN}π Self-Healing (Line $line | Command: $cmd)${RESET}"
if [[ "$cmd" == *"pip install"* ]]; then
echo -e "${YELLOW}π Retrying pip install with --no-cache-dir...${RESET}"
pip install --no-cache-dir transformers torch gradio git-lfs huggingface_hub || true
fi
if [[ "$cmd" == *"huggingface-cli login"* ]]; then
echo -e "${YELLOW}π Retrying interactive Hugging Face login...${RESET}"
huggingface-cli login || true
fi
if [[ "$cmd" == *"git push"* ]]; then
echo -e "${YELLOW}π Retrying git push...${RESET}"
git push -u origin main || true
fi
echo -e "${GREEN}β
Self-Heal Complete. Please rerun if needed.${RESET}"
exit 1
}
# === START ===
echo -e "${CYAN}\nπ [SHX] Launching Hyper-Intelligent Setup...\n${RESET}"
# === CLEAN + VENV ===
echo -e "${CYAN}π§Ή Preparing Virtual Environment...${RESET}"
rm -rf "$VENV_DIR"
python3 -m venv "$VENV_DIR"
source "$VENV_DIR/bin/activate"
echo -e "${GREEN}β
Venv activated at $VENV_DIR${RESET}"
# === DEPENDENCIES ===
echo -e "${CYAN}\nπ¦ Installing Python packages...${RESET}"
pip install --upgrade pip
pip install --no-cache-dir transformers torch gradio git-lfs huggingface_hub
# === CHECK TORCH ===
echo -e "${CYAN}π§ Verifying PyTorch...\n${RESET}"
PYTORCH_VERSION=$(python3 -c "import torch; print(torch.__version__)")
echo -e "${GREEN}β
PyTorch: $PYTORCH_VERSION${RESET}"
# === AUTHENTICATION ===
echo -e "\n${CYAN}π Enter your Hugging Face token:${RESET}"
read -s hf_token
huggingface-cli login --token "$hf_token"
export HF_TOKEN="$hf_token"
whoami_output=$(huggingface-cli whoami)
echo -e "${GREEN}β
Logged in as: $whoami_output${RESET}"
# === MODEL SELECTION ===
echo -e "\n${CYAN}π§ Select a model (default: EleutherAI/gpt-neo-1.3B):${RESET}"
read -p "Model name: " selected_model
MODEL_NAME=${selected_model:-EleutherAI/gpt-neo-1.3B}
export HF_MODEL="$MODEL_NAME"
# === CLEAR BROKEN CACHE ===
echo -e "${CYAN}\nπ Clearing broken cache for $MODEL_NAME...${RESET}"
rm -rf ~/.cache/huggingface/hub/models--EleutherAI--gpt-neo-1.3B
# === MODEL DOWNLOAD ===
echo -e "${CYAN}\nπ Downloading $MODEL_NAME Model (via GPTNeoForCausalLM)...\n${RESET}"
python3 - <<EOF
from transformers import GPT2Tokenizer, GPTNeoForCausalLM
print("π Downloading tokenizer & model (GPTNeoForCausalLM)...")
tokenizer = GPT2Tokenizer.from_pretrained("$MODEL_NAME")
tokenizer.pad_token = tokenizer.eos_token
model = GPTNeoForCausalLM.from_pretrained("$MODEL_NAME")
print("β
Model ready (GPTNeoForCausalLM).")
EOF
# === GRADIO APP ===
echo -e "${CYAN}π₯οΈ Writing Gradio Interface...${RESET}"
cat <<EOF > "$WORK_DIR/app.py"
import gradio as gr
from transformers import GPT2Tokenizer, GPTNeoForCausalLM
import torch
import json
import os
# Load configuration
with open("$CONFIG_FILE", "r") as f:
config = json.load(f)
tokenizer = GPT2Tokenizer.from_pretrained(config["model_name"])
tokenizer.pad_token = tokenizer.eos_token
model = GPTNeoForCausalLM.from_pretrained(config["model_name"])
chat_history = []
def shx_terminal(prompt, history):
inputs = tokenizer(prompt, return_tensors="pt", padding=True)
input_ids = inputs.input_ids
attention_mask = inputs.attention_mask
pad_token_id = tokenizer.eos_token_id
try:
with torch.no_grad():
output = model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
pad_token_id=pad_token_id,
max_length=config["max_length"],
temperature=config["temperature"],
top_k=config["top_k"],
top_p=config["top_p"],
do_sample=True
)
response = tokenizer.decode(output[0], skip_special_tokens=True)
chat_history.append((prompt, response))
return response, chat_history
except Exception as e:
return f"β οΈ SHX caught an error during generation:\\n{str(e)}", chat_history
with gr.Blocks(css="body { background-color: black; color: #00FF41; font-family: monospace; }") as demo:
gr.Markdown("## π€ **SHX-Auto: Multiversal System Builder**")
with gr.Row():
with gr.Column():
input_box = gr.Textbox(label="Your Command")
output_box = gr.Textbox(label="SHX Response")
run_btn = gr.Button("Run")
run_btn.click(shx_terminal, inputs=[input_box, gr.State(chat_history)], outputs=[output_box, gr.State(chat_history)])
with gr.Column():
chat_box = gr.Chatbot(label="Chat History")
chat_box.update(chat_history)
demo.launch()
EOF
# === REQUIREMENTS & README ===
echo -e "${CYAN}π¦ Writing requirements.txt and README.md...${RESET}"
cat <<EOF > "$WORK_DIR/requirements.txt"
transformers
torch
gradio
git-lfs
huggingface_hub
EOF
cat <<EOF > "$WORK_DIR/README.md"
# SHX-Auto: Multiversal System Builder
## π€― GPT-Neo-based automation terminal agent for quantum-native devs.
β¨ By: subatomicERROR
EOF
# === CONFIGURATION FILE ===
echo -e "${CYAN}βοΈ Writing configuration file...${RESET}"
cat <<EOF > "$WORK_DIR/shx-config.json"
{
"model_name": "$MODEL_NAME",
"max_length": 150,
"temperature": 0.7,
"top_k": 50,
"top_p": 0.9
}
EOF
# === FINAL TEST ===
echo -e "${CYAN}\nπ§ͺ Running Final Test...${RESET}"
python3 - <<EOF
from transformers import GPT2Tokenizer, GPTNeoForCausalLM
import json
# Load configuration
with open("$WORK_DIR/shx-config.json", "r") as f:
config = json.load(f)
tokenizer = GPT2Tokenizer.from_pretrained(config["model_name"])
tokenizer.pad_token = tokenizer.eos_token
model = GPTNeoForCausalLM.from_pretrained(config["model_name"])
prompt = "SHX is"
inputs = tokenizer(prompt, return_tensors="pt", padding=True)
output = model.generate(
input_ids=inputs.input_ids,
attention_mask=inputs.attention_mask,
pad_token_id=tokenizer.eos_token_id,
max_length=config["max_length"],
temperature=config["temperature"],
top_k=config["top_k"],
top_p=config["top_p"],
do_sample=True
)
print("π§ SHX Test Output:", tokenizer.decode(output[0], skip_special_tokens=True))
EOF
echo -e "\n${GREEN}β
SHX is FULLY ONLINE and OPERATIONAL (with $MODEL_NAME)!${RESET}"
echo -e "${CYAN}π Access: https://huggingface.co/spaces/$HF_USERNAME/$HF_SPACE_NAME${RESET}"
# === AI-DRIVEN AUTOMATION ===
echo -e "${CYAN}\nπ€ Initializing AI-Driven Automation...${RESET}"
cat <<EOF > "$WORK_DIR/shx-ai.py"
import json
import subprocess
import os
# Load configuration
with open("$WORK_DIR/shx-config.json", "r") as f:
config = json.load(f)
def run_command(command):
try:
result = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
return result.stdout
except subprocess.CalledProcessError as e:
return f"β οΈ Error: {e.stderr}"
def shx_ai(prompt):
# Generate response using the model
response = run_command(f"python3 $WORK_DIR/app.py --prompt '{prompt}'")
return response
# Example usage
if __name__ == "__main__":
prompt = "Create a simple web application with a form to collect user data."
response = shx_ai(prompt)
print(f"π€ SHX Response: {response}")
EOF
echo -e "${GREEN}β
AI-Driven Automation Initialized. Ready to build almost anything!${RESET}"
# === FINAL MESSAGE ===
echo ""
echo "π βοΈ Boom your SHX is ready! And now fully configured."
echo ""
echo "β
PyTorch: $PYTORCH_VERSION"
echo "β
Model: $HF_MODEL"
echo "β
Hugging Face Token saved for: $HF_USERNAME"
echo ""
echo "π οΈ Now to push your SHX Space manually to Hugging Face, follow these final steps:"
echo ""
echo "1. Initialize git in this folder:"
echo " git init"
echo ""
echo "2. Commit your SHX files:"
echo " git add . && git commit -m \"Initial SHX commit\""
echo ""
echo "3. Create the Space manually (choose SDK: gradio/static/etc):"
echo " huggingface-cli repo create SHX-Auto --type space --space-sdk gradio"
echo ""
echo "4. Add remote:"
echo " git remote add origin https://huggingface.co/spaces/$HF_USERNAME/SHX-Auto"
echo ""
echo "5. Push your space:"
echo " git branch -M main && git push -u origin main"
echo ""
echo "π After that, visit: https://huggingface.co/spaces/$HF_USERNAME/SHX-Auto"
echo ""
echo "SHX interface will now be live on Hugging Face. HAPPY CODING!"
echo ""
echo "For more information and support, visit our GitHub repository:"
echo "https://github.com/subatomicERROR"
echo "" |