Spaces:
Configuration error
Configuration error
set -euo pipefail | |
IFS=$'\n\t' | |
# === ENV VARIABLES === | |
export HF_HOME="$HOME/.cache/huggingface" | |
export MODEL_NAME="EleutherAI/gpt-neo-1.3B" | |
export WORK_DIR="$HOME/dev/shx-hfspace" | |
export VENV_DIR="$WORK_DIR/shx-venv" | |
export LOG_FILE="$WORK_DIR/shx-setup.log" | |
export CONFIG_FILE="$WORK_DIR/shx-config.json" | |
export HF_SPACE_NAME="SHX-Auto" | |
export HF_USERNAME="subatomicERROR" | |
# === COLORS === | |
RED="\e[91m" | |
GREEN="\e[92m" | |
YELLOW="\e[93m" | |
CYAN="\e[96m" | |
RESET="\e[0m" | |
# === SELF-HEAL === | |
trap 'echo -e "\n${RED}β Error occurred at line $LINENO: $BASH_COMMAND${RESET}" >> "$LOG_FILE"; echo -e "${YELLOW}π§ Triggering SHX Self-Healing...${RESET}"; shx_self_heal $LINENO "$BASH_COMMAND"' ERR | |
shx_self_heal() { | |
local line=$1 | |
local cmd="$2" | |
echo -e "${CYAN}π Self-Healing (Line $line | Command: $cmd)${RESET}" | |
if [[ "$cmd" == *"pip install"* ]]; then | |
echo -e "${YELLOW}π Retrying pip install with --no-cache-dir...${RESET}" | |
pip install --no-cache-dir transformers torch gradio git-lfs huggingface_hub || true | |
fi | |
if [[ "$cmd" == *"huggingface-cli login"* ]]; then | |
echo -e "${YELLOW}π Retrying interactive Hugging Face login...${RESET}" | |
huggingface-cli login || true | |
fi | |
if [[ "$cmd" == *"git push"* ]]; then | |
echo -e "${YELLOW}π Retrying git push...${RESET}" | |
git push -u origin main || true | |
fi | |
echo -e "${GREEN}β Self-Heal Complete. Please rerun if needed.${RESET}" | |
exit 1 | |
} | |
# === START === | |
echo -e "${CYAN}\nπ [SHX] Launching Hyper-Intelligent Setup...\n${RESET}" | |
# === CLEAN + VENV === | |
echo -e "${CYAN}π§Ή Preparing Virtual Environment...${RESET}" | |
rm -rf "$VENV_DIR" | |
python3 -m venv "$VENV_DIR" | |
source "$VENV_DIR/bin/activate" | |
echo -e "${GREEN}β Venv activated at $VENV_DIR${RESET}" | |
# === DEPENDENCIES === | |
echo -e "${CYAN}\nπ¦ Installing Python packages...${RESET}" | |
pip install --upgrade pip | |
pip install --no-cache-dir transformers torch gradio git-lfs huggingface_hub | |
# === CHECK TORCH === | |
echo -e "${CYAN}π§ Verifying PyTorch...\n${RESET}" | |
PYTORCH_VERSION=$(python3 -c "import torch; print(torch.__version__)") | |
echo -e "${GREEN}β PyTorch: $PYTORCH_VERSION${RESET}" | |
# === AUTHENTICATION === | |
echo -e "\n${CYAN}π Enter your Hugging Face token:${RESET}" | |
read -s hf_token | |
huggingface-cli login --token "$hf_token" | |
export HF_TOKEN="$hf_token" | |
whoami_output=$(huggingface-cli whoami) | |
echo -e "${GREEN}β Logged in as: $whoami_output${RESET}" | |
# === MODEL SELECTION === | |
echo -e "\n${CYAN}π§ Select a model (default: EleutherAI/gpt-neo-1.3B):${RESET}" | |
read -p "Model name: " selected_model | |
MODEL_NAME=${selected_model:-EleutherAI/gpt-neo-1.3B} | |
export HF_MODEL="$MODEL_NAME" | |
# === CLEAR BROKEN CACHE === | |
echo -e "${CYAN}\nπ Clearing broken cache for $MODEL_NAME...${RESET}" | |
rm -rf ~/.cache/huggingface/hub/models--EleutherAI--gpt-neo-1.3B | |
# === MODEL DOWNLOAD === | |
echo -e "${CYAN}\nπ Downloading $MODEL_NAME Model (via GPTNeoForCausalLM)...\n${RESET}" | |
python3 - <<EOF | |
from transformers import GPT2Tokenizer, GPTNeoForCausalLM | |
print("π Downloading tokenizer & model (GPTNeoForCausalLM)...") | |
tokenizer = GPT2Tokenizer.from_pretrained("$MODEL_NAME") | |
tokenizer.pad_token = tokenizer.eos_token | |
model = GPTNeoForCausalLM.from_pretrained("$MODEL_NAME") | |
print("β Model ready (GPTNeoForCausalLM).") | |
EOF | |
# === GRADIO APP === | |
echo -e "${CYAN}π₯οΈ Writing Gradio Interface...${RESET}" | |
cat <<EOF > "$WORK_DIR/app.py" | |
import gradio as gr | |
from transformers import GPT2Tokenizer, GPTNeoForCausalLM | |
import torch | |
import json | |
import os | |
# Load configuration | |
with open("$CONFIG_FILE", "r") as f: | |
config = json.load(f) | |
tokenizer = GPT2Tokenizer.from_pretrained(config["model_name"]) | |
tokenizer.pad_token = tokenizer.eos_token | |
model = GPTNeoForCausalLM.from_pretrained(config["model_name"]) | |
chat_history = [] | |
def shx_terminal(prompt, history): | |
inputs = tokenizer(prompt, return_tensors="pt", padding=True) | |
input_ids = inputs.input_ids | |
attention_mask = inputs.attention_mask | |
pad_token_id = tokenizer.eos_token_id | |
try: | |
with torch.no_grad(): | |
output = model.generate( | |
input_ids=input_ids, | |
attention_mask=attention_mask, | |
pad_token_id=pad_token_id, | |
max_length=config["max_length"], | |
temperature=config["temperature"], | |
top_k=config["top_k"], | |
top_p=config["top_p"], | |
do_sample=True | |
) | |
response = tokenizer.decode(output[0], skip_special_tokens=True) | |
chat_history.append((prompt, response)) | |
return response, chat_history | |
except Exception as e: | |
return f"β οΈ SHX caught an error during generation:\\n{str(e)}", chat_history | |
with gr.Blocks(css="body { background-color: black; color: #00FF41; font-family: monospace; }") as demo: | |
gr.Markdown("## π€ **SHX-Auto: Multiversal System Builder**") | |
with gr.Row(): | |
with gr.Column(): | |
input_box = gr.Textbox(label="Your Command") | |
output_box = gr.Textbox(label="SHX Response") | |
run_btn = gr.Button("Run") | |
run_btn.click(shx_terminal, inputs=[input_box, gr.State(chat_history)], outputs=[output_box, gr.State(chat_history)]) | |
with gr.Column(): | |
chat_box = gr.Chatbot(label="Chat History") | |
chat_box.update(chat_history) | |
demo.launch() | |
EOF | |
# === REQUIREMENTS & README === | |
echo -e "${CYAN}π¦ Writing requirements.txt and README.md...${RESET}" | |
cat <<EOF > "$WORK_DIR/requirements.txt" | |
transformers | |
torch | |
gradio | |
git-lfs | |
huggingface_hub | |
EOF | |
cat <<EOF > "$WORK_DIR/README.md" | |
# SHX-Auto: Multiversal System Builder | |
## π€― GPT-Neo-based automation terminal agent for quantum-native devs. | |
β¨ By: subatomicERROR | |
EOF | |
# === CONFIGURATION FILE === | |
echo -e "${CYAN}βοΈ Writing configuration file...${RESET}" | |
cat <<EOF > "$WORK_DIR/shx-config.json" | |
{ | |
"model_name": "$MODEL_NAME", | |
"max_length": 150, | |
"temperature": 0.7, | |
"top_k": 50, | |
"top_p": 0.9 | |
} | |
EOF | |
# === FINAL TEST === | |
echo -e "${CYAN}\nπ§ͺ Running Final Test...${RESET}" | |
python3 - <<EOF | |
from transformers import GPT2Tokenizer, GPTNeoForCausalLM | |
import json | |
# Load configuration | |
with open("$WORK_DIR/shx-config.json", "r") as f: | |
config = json.load(f) | |
tokenizer = GPT2Tokenizer.from_pretrained(config["model_name"]) | |
tokenizer.pad_token = tokenizer.eos_token | |
model = GPTNeoForCausalLM.from_pretrained(config["model_name"]) | |
prompt = "SHX is" | |
inputs = tokenizer(prompt, return_tensors="pt", padding=True) | |
output = model.generate( | |
input_ids=inputs.input_ids, | |
attention_mask=inputs.attention_mask, | |
pad_token_id=tokenizer.eos_token_id, | |
max_length=config["max_length"], | |
temperature=config["temperature"], | |
top_k=config["top_k"], | |
top_p=config["top_p"], | |
do_sample=True | |
) | |
print("π§ SHX Test Output:", tokenizer.decode(output[0], skip_special_tokens=True)) | |
EOF | |
echo -e "\n${GREEN}β SHX is FULLY ONLINE and OPERATIONAL (with $MODEL_NAME)!${RESET}" | |
echo -e "${CYAN}π Access: https://huggingface.co/spaces/$HF_USERNAME/$HF_SPACE_NAME${RESET}" | |
# === AI-DRIVEN AUTOMATION === | |
echo -e "${CYAN}\nπ€ Initializing AI-Driven Automation...${RESET}" | |
cat <<EOF > "$WORK_DIR/shx-ai.py" | |
import json | |
import subprocess | |
import os | |
# Load configuration | |
with open("$WORK_DIR/shx-config.json", "r") as f: | |
config = json.load(f) | |
def run_command(command): | |
try: | |
result = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) | |
return result.stdout | |
except subprocess.CalledProcessError as e: | |
return f"β οΈ Error: {e.stderr}" | |
def shx_ai(prompt): | |
# Generate response using the model | |
response = run_command(f"python3 $WORK_DIR/app.py --prompt '{prompt}'") | |
return response | |
# Example usage | |
if __name__ == "__main__": | |
prompt = "Create a simple web application with a form to collect user data." | |
response = shx_ai(prompt) | |
print(f"π€ SHX Response: {response}") | |
EOF | |
echo -e "${GREEN}β AI-Driven Automation Initialized. Ready to build almost anything!${RESET}" | |
# === FINAL MESSAGE === | |
echo "" | |
echo "π βοΈ Boom your SHX is ready! And now fully configured." | |
echo "" | |
echo "β PyTorch: $PYTORCH_VERSION" | |
echo "β Model: $HF_MODEL" | |
echo "β Hugging Face Token saved for: $HF_USERNAME" | |
echo "" | |
echo "π οΈ Now to push your SHX Space manually to Hugging Face, follow these final steps:" | |
echo "" | |
echo "1. Initialize git in this folder:" | |
echo " git init" | |
echo "" | |
echo "2. Commit your SHX files:" | |
echo " git add . && git commit -m \"Initial SHX commit\"" | |
echo "" | |
echo "3. Create the Space manually (choose SDK: gradio/static/etc):" | |
echo " huggingface-cli repo create SHX-Auto --type space --space-sdk gradio" | |
echo "" | |
echo "4. Add remote:" | |
echo " git remote add origin https://huggingface.co/spaces/$HF_USERNAME/SHX-Auto" | |
echo "" | |
echo "5. Push your space:" | |
echo " git branch -M main && git push -u origin main" | |
echo "" | |
echo "π After that, visit: https://huggingface.co/spaces/$HF_USERNAME/SHX-Auto" | |
echo "" | |
echo "SHX interface will now be live on Hugging Face. HAPPY CODING!" | |
echo "" | |
echo "For more information and support, visit our GitHub repository:" | |
echo "https://github.com/subatomicERROR" | |
echo "" |