Spaces:
Running
on
Zero
Running
on
Zero
Upload 2 files
Browse files- app.py +13 -12
- modutils.py +9 -8
app.py
CHANGED
|
@@ -344,8 +344,9 @@ class GuiSD:
|
|
| 344 |
vae_model=None,
|
| 345 |
type_model_precision=torch.float16,
|
| 346 |
retain_task_model_in_cache=False,
|
| 347 |
-
device="cpu",
|
| 348 |
)
|
|
|
|
| 349 |
|
| 350 |
def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
|
| 351 |
progress(0, desc="Start inference...")
|
|
@@ -725,12 +726,12 @@ class GuiSD:
|
|
| 725 |
sd_gen = GuiSD()
|
| 726 |
|
| 727 |
#@spaces.GPU
|
| 728 |
-
def sd_gen_load_new_model(*args, progress=gr.Progress(track_tqdm=True)):
|
| 729 |
-
yield from sd_gen.load_new_model(*args, progress)
|
| 730 |
|
| 731 |
-
|
| 732 |
-
def sd_gen_generate_pipeline(*args, progress=gr.Progress(track_tqdm=True)):
|
| 733 |
-
yield from sd_gen.generate_pipeline(*args, progress)
|
| 734 |
|
| 735 |
## BEGIN MOD
|
| 736 |
CSS ="""
|
|
@@ -1250,8 +1251,8 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
|
|
| 1250 |
"votepurchase/animagine-xl-3.1",
|
| 1251 |
],
|
| 1252 |
],
|
| 1253 |
-
|
| 1254 |
-
fn=sd_gen_generate_pipeline,
|
| 1255 |
inputs=[
|
| 1256 |
prompt_gui,
|
| 1257 |
neg_prompt_gui,
|
|
@@ -1430,8 +1431,8 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
|
|
| 1430 |
translate_prompt_button.click(translate_prompt, [series_dbt], [series_dbt], queue=False)
|
| 1431 |
|
| 1432 |
generate_button.click(
|
| 1433 |
-
|
| 1434 |
-
fn=sd_gen_load_new_model,
|
| 1435 |
inputs=[
|
| 1436 |
model_name_gui,
|
| 1437 |
vae_model_gui,
|
|
@@ -1441,8 +1442,8 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
|
|
| 1441 |
queue=True,
|
| 1442 |
show_progress="minimal",
|
| 1443 |
).success(
|
| 1444 |
-
|
| 1445 |
-
fn=sd_gen_generate_pipeline,
|
| 1446 |
inputs=[
|
| 1447 |
prompt_gui,
|
| 1448 |
neg_prompt_gui,
|
|
|
|
| 344 |
vae_model=None,
|
| 345 |
type_model_precision=torch.float16,
|
| 346 |
retain_task_model_in_cache=False,
|
| 347 |
+
#device="cpu",
|
| 348 |
)
|
| 349 |
+
self.model.device = torch.device("cpu") #
|
| 350 |
|
| 351 |
def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
|
| 352 |
progress(0, desc="Start inference...")
|
|
|
|
| 726 |
sd_gen = GuiSD()
|
| 727 |
|
| 728 |
#@spaces.GPU
|
| 729 |
+
#def sd_gen_load_new_model(*args, progress=gr.Progress(track_tqdm=True)):
|
| 730 |
+
# yield from sd_gen.load_new_model(*args, progress)
|
| 731 |
|
| 732 |
+
#@spaces.GPU
|
| 733 |
+
#def sd_gen_generate_pipeline(*args, progress=gr.Progress(track_tqdm=True)):
|
| 734 |
+
# yield from sd_gen.generate_pipeline(*args, progress)
|
| 735 |
|
| 736 |
## BEGIN MOD
|
| 737 |
CSS ="""
|
|
|
|
| 1251 |
"votepurchase/animagine-xl-3.1",
|
| 1252 |
],
|
| 1253 |
],
|
| 1254 |
+
fn=sd_gen.generate_pipeline,
|
| 1255 |
+
#fn=sd_gen_generate_pipeline,
|
| 1256 |
inputs=[
|
| 1257 |
prompt_gui,
|
| 1258 |
neg_prompt_gui,
|
|
|
|
| 1431 |
translate_prompt_button.click(translate_prompt, [series_dbt], [series_dbt], queue=False)
|
| 1432 |
|
| 1433 |
generate_button.click(
|
| 1434 |
+
fn=sd_gen.load_new_model,
|
| 1435 |
+
#fn=sd_gen_load_new_model,
|
| 1436 |
inputs=[
|
| 1437 |
model_name_gui,
|
| 1438 |
vae_model_gui,
|
|
|
|
| 1442 |
queue=True,
|
| 1443 |
show_progress="minimal",
|
| 1444 |
).success(
|
| 1445 |
+
fn=sd_gen.generate_pipeline,
|
| 1446 |
+
#fn=sd_gen_generate_pipeline,
|
| 1447 |
inputs=[
|
| 1448 |
prompt_gui,
|
| 1449 |
neg_prompt_gui,
|
modutils.py
CHANGED
|
@@ -39,7 +39,6 @@ def get_local_model_list(dir_path):
|
|
| 39 |
|
| 40 |
def download_things(directory, url, hf_token="", civitai_api_key=""):
|
| 41 |
url = url.strip()
|
| 42 |
-
|
| 43 |
if "drive.google.com" in url:
|
| 44 |
original_dir = os.getcwd()
|
| 45 |
os.chdir(directory)
|
|
@@ -187,10 +186,10 @@ def get_model_id_list():
|
|
| 187 |
try:
|
| 188 |
models_likes = []
|
| 189 |
for author in HF_MODEL_USER_LIKES:
|
| 190 |
-
models_likes.extend(api.list_models(author=author, cardData=True, sort="likes"))
|
| 191 |
models_ex = []
|
| 192 |
for author in HF_MODEL_USER_EX:
|
| 193 |
-
models_ex = api.list_models(author=author, cardData=True, sort="last_modified")
|
| 194 |
except Exception as e:
|
| 195 |
print(f"Error: Failed to list {author}'s models.")
|
| 196 |
print(e)
|
|
@@ -200,8 +199,8 @@ def get_model_id_list():
|
|
| 200 |
anime_models = []
|
| 201 |
real_models = []
|
| 202 |
for model in models_ex:
|
| 203 |
-
if not model.private:
|
| 204 |
-
anime_models.append(model.id) if
|
| 205 |
model_ids.extend(anime_models)
|
| 206 |
model_ids.extend(real_models)
|
| 207 |
model_id_list = model_ids.copy()
|
|
@@ -252,6 +251,8 @@ def get_tupled_model_list(model_list):
|
|
| 252 |
tags = model.tags
|
| 253 |
info = []
|
| 254 |
if not 'diffusers' in tags: continue
|
|
|
|
|
|
|
| 255 |
if 'diffusers:StableDiffusionXLPipeline' in tags:
|
| 256 |
info.append("SDXL")
|
| 257 |
elif 'diffusers:StableDiffusionPipeline' in tags:
|
|
@@ -858,7 +859,7 @@ def find_similar_lora(q: str):
|
|
| 858 |
def change_interface_mode(mode: str):
|
| 859 |
if mode == "Fast":
|
| 860 |
return gr.update(open=False), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
|
| 861 |
-
gr.update(visible=True), gr.update(open=False), gr.update(visible=True), gr.update(open=
|
| 862 |
gr.update(visible=True), gr.update(value="Fast")
|
| 863 |
elif mode == "Simple": # t2i mode
|
| 864 |
return gr.update(open=True), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
|
|
@@ -866,11 +867,11 @@ def change_interface_mode(mode: str):
|
|
| 866 |
gr.update(visible=False), gr.update(value="Standard")
|
| 867 |
elif mode == "LoRA": # t2i LoRA mode
|
| 868 |
return gr.update(open=True), gr.update(visible=True), gr.update(open=True), gr.update(open=False),\
|
| 869 |
-
gr.update(visible=True), gr.update(open=True), gr.update(visible=True), gr.update(open=
|
| 870 |
gr.update(visible=False), gr.update(value="Standard")
|
| 871 |
else: # Standard
|
| 872 |
return gr.update(open=False), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
|
| 873 |
-
gr.update(visible=True), gr.update(open=False), gr.update(visible=True), gr.update(open=
|
| 874 |
gr.update(visible=True), gr.update(value="Standard")
|
| 875 |
|
| 876 |
|
|
|
|
| 39 |
|
| 40 |
def download_things(directory, url, hf_token="", civitai_api_key=""):
|
| 41 |
url = url.strip()
|
|
|
|
| 42 |
if "drive.google.com" in url:
|
| 43 |
original_dir = os.getcwd()
|
| 44 |
os.chdir(directory)
|
|
|
|
| 186 |
try:
|
| 187 |
models_likes = []
|
| 188 |
for author in HF_MODEL_USER_LIKES:
|
| 189 |
+
models_likes.extend(api.list_models(author=author, task="text-to-image", cardData=True, sort="likes"))
|
| 190 |
models_ex = []
|
| 191 |
for author in HF_MODEL_USER_EX:
|
| 192 |
+
models_ex = api.list_models(author=author, task="text-to-image", cardData=True, sort="last_modified")
|
| 193 |
except Exception as e:
|
| 194 |
print(f"Error: Failed to list {author}'s models.")
|
| 195 |
print(e)
|
|
|
|
| 199 |
anime_models = []
|
| 200 |
real_models = []
|
| 201 |
for model in models_ex:
|
| 202 |
+
if not model.private and not model.gated and "diffusers:FluxPipeline" not in model.tags:
|
| 203 |
+
anime_models.append(model.id) if "anime" in model.tags else real_models.append(model.id)
|
| 204 |
model_ids.extend(anime_models)
|
| 205 |
model_ids.extend(real_models)
|
| 206 |
model_id_list = model_ids.copy()
|
|
|
|
| 251 |
tags = model.tags
|
| 252 |
info = []
|
| 253 |
if not 'diffusers' in tags: continue
|
| 254 |
+
if 'diffusers:FluxPipeline' in tags:
|
| 255 |
+
info.append("FLUX.1")
|
| 256 |
if 'diffusers:StableDiffusionXLPipeline' in tags:
|
| 257 |
info.append("SDXL")
|
| 258 |
elif 'diffusers:StableDiffusionPipeline' in tags:
|
|
|
|
| 859 |
def change_interface_mode(mode: str):
|
| 860 |
if mode == "Fast":
|
| 861 |
return gr.update(open=False), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
|
| 862 |
+
gr.update(visible=True), gr.update(open=False), gr.update(visible=True), gr.update(open=False),\
|
| 863 |
gr.update(visible=True), gr.update(value="Fast")
|
| 864 |
elif mode == "Simple": # t2i mode
|
| 865 |
return gr.update(open=True), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
|
|
|
|
| 867 |
gr.update(visible=False), gr.update(value="Standard")
|
| 868 |
elif mode == "LoRA": # t2i LoRA mode
|
| 869 |
return gr.update(open=True), gr.update(visible=True), gr.update(open=True), gr.update(open=False),\
|
| 870 |
+
gr.update(visible=True), gr.update(open=True), gr.update(visible=True), gr.update(open=False),\
|
| 871 |
gr.update(visible=False), gr.update(value="Standard")
|
| 872 |
else: # Standard
|
| 873 |
return gr.update(open=False), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
|
| 874 |
+
gr.update(visible=True), gr.update(open=False), gr.update(visible=True), gr.update(open=False),\
|
| 875 |
gr.update(visible=True), gr.update(value="Standard")
|
| 876 |
|
| 877 |
|