File size: 6,020 Bytes
a063df3
62594be
55fe803
230ef07
 
cc558f6
3a0fd89
a063df3
 
 
 
 
 
 
af81fa5
 
 
 
 
b0f4e30
af81fa5
8e2a83e
dab04f5
a063df3
af81fa5
a063df3
 
dab04f5
 
3a0fd89
af81fa5
 
230ef07
a063df3
3a0fd89
a063df3
230ef07
a063df3
756a807
230ef07
55fe803
 
a063df3
 
 
84489e7
489aca1
55fe803
af81fa5
 
 
 
 
 
 
489aca1
af81fa5
a063df3
af81fa5
a063df3
84489e7
a063df3
 
734c48e
a063df3
6bc6bb4
 
 
 
 
 
 
 
 
 
 
 
a063df3
84839a7
34ee4d8
 
 
a063df3
558bc3b
a063df3
a967139
84489e7
 
55fe803
a063df3
84839a7
 
fd3bbdf
d590771
 
0ef563d
d590771
fd3bbdf
73cbe25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd3bbdf
 
6e3ce65
fd3bbdf
 
a063df3
73cbe25
a063df3
84839a7
 
 
 
6bc6bb4
84839a7
a063df3
84839a7
a063df3
55fe803
a063df3
 
73cbe25
a063df3
 
 
 
 
 
73cbe25
55fe803
 
 
84839a7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
import torch
import spaces
import gradio as gr
import random
import numpy as np
from diffusers import ZImagePipeline

# Load the pipeline once at startup
print("Loading Z-Image-Turbo pipeline...")
pipe = ZImagePipeline.from_pretrained(
    "Tongyi-MAI/Z-Image-Turbo",
    torch_dtype=torch.bfloat16,
    low_cpu_mem_usage=False,
)
pipe_no_lora = ZImagePipeline.from_pretrained(
    "Tongyi-MAI/Z-Image-Turbo",
    torch_dtype=torch.bfloat16,
    low_cpu_mem_usage=False,
)
pipe.load_lora_weights("Shakker-Labs/AWPortrait-Z", weight_name="AWPortrait-Z.safetensors", adapter_name="lora")
pipe.set_adapters(["lora",], adapter_weights=[1.])
pipe.fuse_lora(adapter_names=["lora"], lora_scale=.9)
pipe.unload_lora_weights()
pipe.to("cuda")
pipe_no_lora.to("cuda")

# ======== AoTI compilation + FA3 ========
pipe.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"]
spaces.aoti_blocks_load(pipe.transformer.layers, "zerogpu-aoti/Z-Image", variant="fa3")

pipe_no_lora.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"]
spaces.aoti_blocks_load(pipe_no_lora.transformer.layers, "zerogpu-aoti/Z-Image", variant="fa3")
MAX_SEED = np.iinfo(np.int32).max
print("Pipeline loaded!")

@spaces.GPU
def generate_image(prompt, height, width, num_inference_steps, seed=42, randomize_seed=True, progress=gr.Progress(track_tqdm=True)):
    """Generate an image from the given prompt."""
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
    image = pipe(
        prompt=prompt,
        height=int(height),
        width=int(width),
        num_inference_steps=int(num_inference_steps),
        guidance_scale=0.0,  # Guidance should be 0 for Turbo models
        generator = torch.Generator(device="cuda").manual_seed(seed)
    ).images[0]

    image_no_lora = pipe_no_lora(
        prompt=prompt,
        height=int(height),
        width=int(width),
        num_inference_steps=int(num_inference_steps),
        guidance_scale=0.0,  # Guidance should be 0 for Turbo models
        generator = torch.Generator(device="cuda").manual_seed(seed)
    ).images[0]
    
    return (image_no_lora,image), seed


# Example prompts
examples = [
    ["A dramatic close-up high-fashion portrait with avant-garde futuristic styling, metallic accents, sculptural makeup, glowing rim light, hyperreal detail, cool-toned color palette, glossy finish, fashion campaign quality."],
]
css = """
#col-container { max-width: 950px; margin: 0 auto; }
.dark .progress-text { color: white !important; }

#examples { max-width: 950px; margin: 0 auto; }

.dark #examples button,
.dark #examples .example,
.dark #examples span {
    color: white !important;
}
"""
# Build the Gradio interface
with gr.Blocks() as demo:
    
    with gr.Column(elem_id="col-container"):
        gr.Markdown(
        """
        # Z-Image-Turbo Portrait✨
        
        Generate high-quality portrait images with [Tongyi-MAI/Z-Image-Turbo](https://huggingface.co/Tongyi-MAI/Z-Image-Turbo) using [portrait-beauty LoRA by @dynamicwangs and Shakker Labs](https://huggingface.co/Shakker-Labs/AWPortrait-Z), for fast inference with enhanced details. 
        This turbo model generates images in just 8 inference steps!
        """
    )
    
        with gr.Row():
            with gr.Column(scale=1):
                prompt = gr.Textbox(
                        label="Prompt",
                        placeholder="Enter your image description...",
                        max_lines=4,
                    )
                generate_btn = gr.Button("Generate", variant="primary")
                with gr.Accordion("Advanced Settings", open=False):
                    with gr.Row():
                        height = gr.Slider(
                            minimum=512,
                            maximum=2048,
                            value=1024,
                            step=64,
                            label="Height",
                        )
                        width = gr.Slider(
                            minimum=512,
                            maximum=2048,
                            value=1024,
                            step=64,
                            label="Width",
                        )
                    
                    with gr.Row():
                        num_inference_steps = gr.Slider(
                            minimum=1,
                            maximum=20,
                            value=9,
                            step=1,
                            label="Inference Steps",
                            info="9 steps results in 8 DiT forwards",
                        )
                    
                    with gr.Row():
                        seed = gr.Number(
                            label="Seed",
                            value=42,
                            precision=0,
                        )
                        randomize_seed = gr.Checkbox(
                            label="Randomize Seed",
                            value=True,
                        )
            with gr.Column(scale=1):    
                output_image = gr.ImageSlider(
                        label="Output (Left-with the LoRA, Right-without)",
                        type="pil",
                    )
            
                
        
        gr.Examples(
            examples=examples,
            inputs=[prompt],
            cache_examples=False,
            elem_id="examples", 
        )
    
        
    # Connect the generate button
    generate_btn.click(
        fn=generate_image,
        inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed],
        outputs=[output_image, seed],
    )
    
    # Also allow generating by pressing Enter in the prompt box
    prompt.submit(
        fn=generate_image,
        inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed],
        outputs=[output_image, seed],
    )

if __name__ == "__main__":
    demo.launch(mcp_server=True, theme=gr.themes.Citrus(), css=css)