seokhyun119 commited on
Commit
4944c37
·
1 Parent(s): df599aa

모델 업로드

Browse files
app3.py CHANGED
@@ -1,3 +1,44 @@
1
- asdasdasd
2
- asdasdsa
3
- asdasdasdas
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import re # ← 문장 분리용
4
+
5
+ # 1. 디바이스 설정
6
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
7
+
8
+ # 2. 한국어 GPT-2 모델과 토크나이저 로드
9
+ tokenizer = AutoTokenizer.from_pretrained("skt/kogpt2-base-v2")
10
+ model = AutoModelForCausalLM.from_pretrained("skt/kogpt2-base-v2").to(device)
11
+
12
+ # 3. 한국어 소설 생성 함수 (4문장만 출력)
13
+ def generate_korean_story(prompt, max_length=300, num_sentences=4):
14
+ input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
15
+
16
+ outputs = model.generate(
17
+ input_ids,
18
+ max_length=max_length,
19
+ min_length=100,
20
+ do_sample=True,
21
+ temperature=0.9,
22
+ top_k=50,
23
+ top_p=0.95,
24
+ repetition_penalty=1.2,
25
+ no_repeat_ngram_size=3,
26
+ eos_token_id=tokenizer.eos_token_id
27
+ )
28
+
29
+ full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
30
+
31
+ # 문장 단위로 자르기 (정규표현식으로 마침표/물음표/느낌표 기준)
32
+ sentences = re.split(r'(?<=[.?!])\s+', full_text.strip())
33
+
34
+ # 앞에서 4문장만 선택 후 합치기
35
+ story = " ".join(sentences[:num_sentences])
36
+ return story
37
+
38
+ # 4. 실행
39
+ if __name__ == "__main__":
40
+ user_prompt = input("📜 소설의 시작 문장을 입력하세요 (한국어): ")
41
+ result = generate_korean_story(user_prompt, max_length=500, num_sentences=4)
42
+
43
+ print("\n📖 생성된 한국어 소설 (4문장):\n")
44
+ print(result)
model/animagine_xl.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
2
+ import torch
3
+
4
+ def generate_animagine_xl(prompt: str):
5
+ model_id = "Linaqruf/animagine-xl"
6
+ pipe = StableDiffusionXLPipeline.from_pretrained(
7
+ model_id,
8
+ torch_dtype=torch.float16,
9
+ use_safetensors=True,
10
+ variant="fp16"
11
+ )
12
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
13
+ pipe = pipe.to("cuda")
14
+ image = pipe(prompt=prompt, width=1024, height=1024).images[0]
15
+ image.save("output_animagine_xl.png")
16
+ print("✅ 저장 완료: output_animagine_xl.png")
17
+ return image
18
+
19
+ if __name__ == "__main__":
20
+ prompt = "그녀를 바라보는 한 남자의 야망"
21
+ generate_animagine_xl(prompt)
model/animesai.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionXLPipeline, AutoencoderKL
2
+ import torch
3
+
4
+ def generate_animesai(prompt: str):
5
+ model_id = "enhanceaiteam/AnimeSAI"
6
+ vae = AutoencoderKL.from_pretrained(
7
+ "madebyollin/sdxl-vae-fp16-fix",
8
+ torch_dtype=torch.float16
9
+ ).to("cuda")
10
+ pipe = StableDiffusionXLPipeline.from_pretrained(
11
+ model_id,
12
+ vae=vae,
13
+ torch_dtype=torch.float16,
14
+ use_safetensors=True
15
+ ).to("cuda")
16
+ image = pipe(prompt=prompt, width=1024, height=1024, guidance_scale=7).images[0]
17
+ image.save("output_animesai.png")
18
+ print("✅ 저장 완료: output_animesai.png")
19
+ return image
20
+
21
+ if __name__ == "__main__":
22
+ prompt = "그녀를 바라보는 한 남자의 야망"
23
+ generate_animesai(prompt)
model/generate_sdxl_with_refiner.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
2
+ import torch
3
+ from PIL import Image
4
+
5
+ def generate_sdxl_with_refiner(prompt: str):
6
+ # 1단계: Base 모델로 초기 이미지 생성
7
+ base_model_id = "stabilityai/stable-diffusion-xl-base-1.0"
8
+ base_pipe = StableDiffusionXLPipeline.from_pretrained(
9
+ base_model_id,
10
+ torch_dtype=torch.float16,
11
+ variant="fp16",
12
+ use_safetensors=True
13
+ ).to("cuda")
14
+
15
+ base_image = base_pipe(prompt=prompt, num_inference_steps=30).images[0]
16
+
17
+ # 2단계: Refiner 모델로 이미지 개선
18
+ refiner_model_id = "stabilityai/stable-diffusion-xl-refiner-1.0"
19
+ refiner_pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
20
+ refiner_model_id,
21
+ torch_dtype=torch.float16,
22
+ variant="fp16",
23
+ use_safetensors=True
24
+ ).to("cuda")
25
+
26
+ # Refiner는 PIL 이미지를 입력받아 후처리
27
+ refined_image = refiner_pipe(
28
+ prompt=prompt,
29
+ image=base_image,
30
+ strength=0.3 # 얼마나 많이 보정할지 (0~1)
31
+ ).images[0]
32
+
33
+ refined_image.save("output_sdxl_refined.png")
34
+ return refined_image
35
+
36
+ if __name__ == "__main__":
37
+ prompt = '그녀를 바라보는 한 남자의 야망'
38
+ img = generate_sdxl_with_refiner(prompt)
39
+ img.show()
model/ghibli.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionPipeline
2
+ import torch
3
+
4
+ def generate_ghibli(prompt: str):
5
+ model_id = "nitrosocke/Ghibli-Diffusion"
6
+ pipe = StableDiffusionPipeline.from_pretrained(
7
+ model_id,
8
+ torch_dtype=torch.float16,
9
+ use_safetensors=True
10
+ ).to("cuda")
11
+ image = pipe(prompt=prompt).images[0]
12
+ image.save("output_ghibli.png")
13
+ print("✅ 저장 완료: output_ghibli.png")
14
+ return image
15
+
16
+ if __name__ == "__main__":
17
+ prompt = "그녀를 바라보는 한 남자의 야망"
18
+ generate_ghibli(prompt)
model/realistic.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionPipeline
2
+ import torch
3
+
4
+ def generate_realistic(prompt: str):
5
+ model_id = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
6
+ pipe = StableDiffusionPipeline.from_pretrained(
7
+ model_id,
8
+ torch_dtype=torch.float16,
9
+ use_safetensors=False,
10
+ # variant="fp16"
11
+ ).to("cuda")
12
+
13
+ image = pipe(prompt=prompt).images[0]
14
+ return image
15
+
16
+ if __name__ == "__main__":
17
+ prompt = '그녀를 바라보는 한 남자의 야망'
18
+ image = generate_realistic(prompt)
19
+ image.save("output_realistic.png")
20
+ print("✅ 저장 완료: output_realistic.png")
21
+ image.show()
model/sd_turbo.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionPipeline
2
+ import torch
3
+
4
+ def generate_sd_turbo(prompt: str):
5
+ model_id = "stabilityai/sd-turbo"
6
+
7
+ pipe = StableDiffusionPipeline.from_pretrained(
8
+ model_id,
9
+ torch_dtype=torch.float16,
10
+ use_safetensors=True
11
+ ).to("cuda")
12
+
13
+ image = pipe(prompt=prompt, guidance_scale=0.0).images[0]
14
+ image.save("output_sd_turbo.png")
15
+ return image
16
+
17
+ if __name__ == "__main__":
18
+ prompt = "그녀를 바라보는 한 남자의 야망"
19
+ img = generate_sd_turbo(prompt)
20
+ print("✅ 저장 완료: output_sd_turbo.png")
21
+ img.show()
model/waifu.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionPipeline
2
+ import torch
3
+
4
+ def generate_waifu(prompt: str):
5
+ model_id = "hakurei/waifu-diffusion"
6
+ pipe = StableDiffusionPipeline.from_pretrained(
7
+ model_id,
8
+ torch_dtype=torch.float16,
9
+ use_safetensors=False,
10
+ revision="fp16"
11
+ ).to("cuda")
12
+
13
+ image = pipe(prompt=prompt).images[0]
14
+ return image
15
+
16
+ if __name__ == "__main__":
17
+ prompt = '그녀를 바라보는 한 남자의 야망'
18
+ image = generate_waifu(prompt)
19
+ image.save("output_waifu.png")
20
+ print("✅ 저장 완료: output_waifu.png")
21
+ image.show()