Spaces:
Running
on
Zero
Running
on
Zero
Fix CUDA initialization error: call compile_models_with_aoti() inside main block after ZeroGPU init
Browse files
app.py
CHANGED
|
@@ -1726,15 +1726,14 @@ def _pipeline_artistic(
|
|
| 1726 |
log_progress(msg, gr_progress, 1.0)
|
| 1727 |
yield (final_image, msg)
|
| 1728 |
|
| 1729 |
-
# Call AOT compilation during startup (only on CUDA, not MPS)
|
| 1730 |
-
# This must be after pipeline function definitions
|
| 1731 |
-
if not torch.backends.mps.is_available():
|
| 1732 |
-
compile_models_with_aoti()
|
| 1733 |
-
else:
|
| 1734 |
-
print("ℹ️ AOT compilation skipped on MPS (MacBook) - using eager mode\n")
|
| 1735 |
-
|
| 1736 |
-
|
| 1737 |
if __name__ == "__main__" and not os.environ.get("QR_TESTING_MODE"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1738 |
# Start your Gradio app with automatic cache cleanup
|
| 1739 |
# delete_cache=(3600, 3600) means: check every hour and delete files older than 1 hour
|
| 1740 |
with gr.Blocks(delete_cache=(3600, 3600)) as app:
|
|
|
|
| 1726 |
log_progress(msg, gr_progress, 1.0)
|
| 1727 |
yield (final_image, msg)
|
| 1728 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1729 |
if __name__ == "__main__" and not os.environ.get("QR_TESTING_MODE"):
|
| 1730 |
+
# Call AOT compilation during startup (only on CUDA, not MPS)
|
| 1731 |
+
# Must be called after module init but before Gradio app launch
|
| 1732 |
+
if not torch.backends.mps.is_available():
|
| 1733 |
+
compile_models_with_aoti()
|
| 1734 |
+
else:
|
| 1735 |
+
print("ℹ️ AOT compilation skipped on MPS (MacBook) - using eager mode\n")
|
| 1736 |
+
|
| 1737 |
# Start your Gradio app with automatic cache cleanup
|
| 1738 |
# delete_cache=(3600, 3600) means: check every hour and delete files older than 1 hour
|
| 1739 |
with gr.Blocks(delete_cache=(3600, 3600)) as app:
|