changed to a new api endpoint
Browse files
app.py
CHANGED
|
@@ -12,7 +12,6 @@ with open('style.css') as f:
|
|
| 12 |
|
| 13 |
# Initialize the HuggingFace Inference Client
|
| 14 |
text_client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.1")
|
| 15 |
-
#image_client = Client("Boboiazumi/animagine-xl-3.1")
|
| 16 |
image_client = Client("phenixrhyder/nsfw-waifu-gradio")
|
| 17 |
|
| 18 |
def format_prompt_for_description(name, hair_color, personality, outfit_style, hobbies, favorite_food, background_story):
|
|
@@ -49,35 +48,16 @@ def generate_text(prompt, temperature=0.9, max_new_tokens=512, top_p=0.95, repet
|
|
| 49 |
st.error(f"Error generating text: {e}")
|
| 50 |
return ""
|
| 51 |
|
|
|
|
| 52 |
def generate_image(prompt):
|
| 53 |
try:
|
| 54 |
result = image_client.predict(
|
| 55 |
-
|
| 56 |
-
negative_prompt="",
|
| 57 |
-
seed=0,
|
| 58 |
-
custom_width=1024,
|
| 59 |
-
custom_height=1024,
|
| 60 |
-
guidance_scale=7.0,
|
| 61 |
-
num_inference_steps=28,
|
| 62 |
-
sampler="Euler a",
|
| 63 |
-
aspect_ratio_selector="896 x 1152",
|
| 64 |
-
style_selector="(None)",
|
| 65 |
-
quality_selector="Standard v3.1",
|
| 66 |
-
use_upscaler=False,
|
| 67 |
-
upscaler_strength=0.55,
|
| 68 |
-
upscale_by=1.5,
|
| 69 |
-
add_quality_tags=True,
|
| 70 |
-
isImg2Img=False,
|
| 71 |
-
img_path=None,
|
| 72 |
-
img2img_strength=0.65,
|
| 73 |
api_name="/predict"
|
| 74 |
)
|
| 75 |
# Process and display the result
|
| 76 |
if result:
|
| 77 |
-
|
| 78 |
-
for image_data in result[0]:
|
| 79 |
-
images.append(image_data['image'])
|
| 80 |
-
return images
|
| 81 |
else:
|
| 82 |
st.error("Unexpected result format from the Gradio API.")
|
| 83 |
return None
|
|
|
|
| 12 |
|
| 13 |
# Initialize the HuggingFace Inference Client
|
| 14 |
text_client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.1")
|
|
|
|
| 15 |
image_client = Client("phenixrhyder/nsfw-waifu-gradio")
|
| 16 |
|
| 17 |
def format_prompt_for_description(name, hair_color, personality, outfit_style, hobbies, favorite_food, background_story):
|
|
|
|
| 48 |
st.error(f"Error generating text: {e}")
|
| 49 |
return ""
|
| 50 |
|
| 51 |
+
# Updated part for the new API
|
| 52 |
def generate_image(prompt):
|
| 53 |
try:
|
| 54 |
result = image_client.predict(
|
| 55 |
+
param_0=prompt,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
api_name="/predict"
|
| 57 |
)
|
| 58 |
# Process and display the result
|
| 59 |
if result:
|
| 60 |
+
return [result] # Assuming the API returns a single image path as a result
|
|
|
|
|
|
|
|
|
|
| 61 |
else:
|
| 62 |
st.error("Unexpected result format from the Gradio API.")
|
| 63 |
return None
|