Skip to content

Commit 388b306

Browse files
authored
feat(api-nodes): network client v2: async ops, cancellation, downloads, refactor (Comfy-Org#10390)
* feat(api-nodes): implement new API client for V3 nodes * feat(api-nodes): implement new API client for V3 nodes * feat(api-nodes): implement new API client for V3 nodes * converted WAN nodes to use new client; polishing * fix(auth): do not leak authentification for the absolute urls * convert BFL API nodes to use new API client; remove deprecated BFL nodes * converted Google Veo nodes * fix(Veo3.1 model): take into account "generate_audio" parameter
1 parent 24188b3 commit 388b306

29 files changed

+2933
-2296
lines changed

comfy_api_nodes/apinode_utils.py

Lines changed: 2 additions & 433 deletions
Large diffs are not rendered by default.

comfy_api_nodes/apis/bfl_api.py

Lines changed: 3 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -50,44 +50,6 @@ class BFLFluxFillImageRequest(BaseModel):
5050
mask: str = Field(None, description='A Base64-encoded string representing the mask of the areas you with to modify.')
5151

5252

53-
class BFLFluxCannyImageRequest(BaseModel):
54-
prompt: str = Field(..., description='Text prompt for image generation')
55-
prompt_upsampling: Optional[bool] = Field(
56-
None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.'
57-
)
58-
canny_low_threshold: Optional[int] = Field(None, description='Low threshold for Canny edge detection')
59-
canny_high_threshold: Optional[int] = Field(None, description='High threshold for Canny edge detection')
60-
seed: Optional[int] = Field(None, description='The seed value for reproducibility.')
61-
steps: conint(ge=15, le=50) = Field(..., description='Number of steps for the image generation process')
62-
guidance: confloat(ge=1, le=100) = Field(..., description='Guidance strength for the image generation process')
63-
safety_tolerance: Optional[conint(ge=0, le=6)] = Field(
64-
6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.'
65-
)
66-
output_format: Optional[BFLOutputFormat] = Field(
67-
BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png']
68-
)
69-
control_image: Optional[str] = Field(None, description='Base64 encoded image to use as control input if no preprocessed image is provided')
70-
preprocessed_image: Optional[str] = Field(None, description='Optional pre-processed image that will bypass the control preprocessing step')
71-
72-
73-
class BFLFluxDepthImageRequest(BaseModel):
74-
prompt: str = Field(..., description='Text prompt for image generation')
75-
prompt_upsampling: Optional[bool] = Field(
76-
None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.'
77-
)
78-
seed: Optional[int] = Field(None, description='The seed value for reproducibility.')
79-
steps: conint(ge=15, le=50) = Field(..., description='Number of steps for the image generation process')
80-
guidance: confloat(ge=1, le=100) = Field(..., description='Guidance strength for the image generation process')
81-
safety_tolerance: Optional[conint(ge=0, le=6)] = Field(
82-
6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.'
83-
)
84-
output_format: Optional[BFLOutputFormat] = Field(
85-
BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png']
86-
)
87-
control_image: Optional[str] = Field(None, description='Base64 encoded image to use as control input if no preprocessed image is provided')
88-
preprocessed_image: Optional[str] = Field(None, description='Optional pre-processed image that will bypass the control preprocessing step')
89-
90-
9153
class BFLFluxProGenerateRequest(BaseModel):
9254
prompt: str = Field(..., description='The text prompt for image generation.')
9355
prompt_upsampling: Optional[bool] = Field(
@@ -160,15 +122,8 @@ class BFLStatus(str, Enum):
160122
error = "Error"
161123

162124

163-
class BFLFluxProStatusResponse(BaseModel):
125+
class BFLFluxStatusResponse(BaseModel):
164126
id: str = Field(..., description="The unique identifier for the generation task.")
165127
status: BFLStatus = Field(..., description="The status of the task.")
166-
result: Optional[Dict[str, Any]] = Field(
167-
None, description="The result of the task (null if not completed)."
168-
)
169-
progress: confloat(ge=0.0, le=1.0) = Field(
170-
..., description="The progress of the task (0.0 to 1.0)."
171-
)
172-
details: Optional[Dict[str, Any]] = Field(
173-
None, description="Additional details about the task (null if not available)."
174-
)
128+
result: Optional[Dict[str, Any]] = Field(None, description="The result of the task (null if not completed).")
129+
progress: Optional[float] = Field(None, description="The progress of the task (0.0 to 1.0).", ge=0.0, le=1.0)

comfy_api_nodes/apis/veo_api.py

Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,111 @@
1+
from typing import Optional, Union
2+
from enum import Enum
3+
4+
from pydantic import BaseModel, Field
5+
6+
7+
class Image2(BaseModel):
8+
bytesBase64Encoded: str
9+
gcsUri: Optional[str] = None
10+
mimeType: Optional[str] = None
11+
12+
13+
class Image3(BaseModel):
14+
bytesBase64Encoded: Optional[str] = None
15+
gcsUri: str
16+
mimeType: Optional[str] = None
17+
18+
19+
class Instance1(BaseModel):
20+
image: Optional[Union[Image2, Image3]] = Field(
21+
None, description='Optional image to guide video generation'
22+
)
23+
prompt: str = Field(..., description='Text description of the video')
24+
25+
26+
class PersonGeneration1(str, Enum):
27+
ALLOW = 'ALLOW'
28+
BLOCK = 'BLOCK'
29+
30+
31+
class Parameters1(BaseModel):
32+
aspectRatio: Optional[str] = Field(None, examples=['16:9'])
33+
durationSeconds: Optional[int] = None
34+
enhancePrompt: Optional[bool] = None
35+
generateAudio: Optional[bool] = Field(
36+
None,
37+
description='Generate audio for the video. Only supported by veo 3 models.',
38+
)
39+
negativePrompt: Optional[str] = None
40+
personGeneration: Optional[PersonGeneration1] = None
41+
sampleCount: Optional[int] = None
42+
seed: Optional[int] = None
43+
storageUri: Optional[str] = Field(
44+
None, description='Optional Cloud Storage URI to upload the video'
45+
)
46+
47+
48+
class VeoGenVidRequest(BaseModel):
49+
instances: Optional[list[Instance1]] = None
50+
parameters: Optional[Parameters1] = None
51+
52+
53+
class VeoGenVidResponse(BaseModel):
54+
name: str = Field(
55+
...,
56+
description='Operation resource name',
57+
examples=[
58+
'projects/PROJECT_ID/locations/us-central1/publishers/google/models/MODEL_ID/operations/a1b07c8e-7b5a-4aba-bb34-3e1ccb8afcc8'
59+
],
60+
)
61+
62+
63+
class VeoGenVidPollRequest(BaseModel):
64+
operationName: str = Field(
65+
...,
66+
description='Full operation name (from predict response)',
67+
examples=[
68+
'projects/PROJECT_ID/locations/us-central1/publishers/google/models/MODEL_ID/operations/OPERATION_ID'
69+
],
70+
)
71+
72+
73+
class Video(BaseModel):
74+
bytesBase64Encoded: Optional[str] = Field(
75+
None, description='Base64-encoded video content'
76+
)
77+
gcsUri: Optional[str] = Field(None, description='Cloud Storage URI of the video')
78+
mimeType: Optional[str] = Field(None, description='Video MIME type')
79+
80+
81+
class Error1(BaseModel):
82+
code: Optional[int] = Field(None, description='Error code')
83+
message: Optional[str] = Field(None, description='Error message')
84+
85+
86+
class Response1(BaseModel):
87+
field_type: Optional[str] = Field(
88+
None,
89+
alias='@type',
90+
examples=[
91+
'type.googleapis.com/cloud.ai.large_models.vision.GenerateVideoResponse'
92+
],
93+
)
94+
raiMediaFilteredCount: Optional[int] = Field(
95+
None, description='Count of media filtered by responsible AI policies'
96+
)
97+
raiMediaFilteredReasons: Optional[list[str]] = Field(
98+
None, description='Reasons why media was filtered by responsible AI policies'
99+
)
100+
videos: Optional[list[Video]] = None
101+
102+
103+
class VeoGenVidPollResponse(BaseModel):
104+
done: Optional[bool] = None
105+
error: Optional[Error1] = Field(
106+
None, description='Error details if operation failed'
107+
)
108+
name: Optional[str] = None
109+
response: Optional[Response1] = Field(
110+
None, description='The actual prediction response if done is true'
111+
)

0 commit comments

Comments
 (0)