39 lines
1.1 KiB
JSON
39 lines
1.1 KiB
JSON
|
{
|
||
|
"_class_name": "AllegroTransformer3DModel",
|
||
|
"_diffusers_version": "0.28.0",
|
||
|
"_name_or_path": "/cpfs/data/user/yanghuan/expr/rsora/RSoraT2V_L32AH24AD96_122_20240918_88x720x1280_fps15_t5/checkpoint-38000/model",
|
||
|
"activation_fn": "gelu-approximate",
|
||
|
"attention_bias": true,
|
||
|
"attention_head_dim": 96,
|
||
|
"ca_attention_mode": "xformers",
|
||
|
"caption_channels": 4096,
|
||
|
"cross_attention_dim": 2304,
|
||
|
"double_self_attention": false,
|
||
|
"downsampler": null,
|
||
|
"dropout": 0.0,
|
||
|
"in_channels": 4,
|
||
|
"interpolation_scale_h": 2.0,
|
||
|
"interpolation_scale_t": 2.2,
|
||
|
"interpolation_scale_w": 2.0,
|
||
|
"model_max_length": 300,
|
||
|
"norm_elementwise_affine": false,
|
||
|
"norm_eps": 1e-06,
|
||
|
"norm_type": "ada_norm_single",
|
||
|
"num_attention_heads": 24,
|
||
|
"num_embeds_ada_norm": 1000,
|
||
|
"num_layers": 32,
|
||
|
"only_cross_attention": false,
|
||
|
"out_channels": 4,
|
||
|
"patch_size": 2,
|
||
|
"patch_size_t": 1,
|
||
|
"sa_attention_mode": "flash",
|
||
|
"sample_size": [
|
||
|
90,
|
||
|
160
|
||
|
],
|
||
|
"sample_size_t": 22,
|
||
|
"upcast_attention": false,
|
||
|
"use_additional_conditions": null,
|
||
|
"use_linear_projection": false,
|
||
|
"use_rope": true
|
||
|
}
|