# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/edgetam/modular_edgetam.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_edgetam.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...configuration_utils import PretrainedConfig from ..auto import CONFIG_MAPPING, AutoConfig class EdgeTamVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`EdgeTamVisionModel`]. It is used to instantiate a SAM vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of SAM 2.1 Hiera-tiny [facebook/EdgeTAM](https://huggingface.co/facebook/EdgeTAM) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone_config (`Union[dict, "PretrainedConfig"]`, *optional*): Configuration for the vision backbone. This is used to instantiate the backbone using `AutoModel.from_config`. backbone_channel_list (`List[int]`, *optional*, defaults to `[384, 192, 96, 48]`): The list of channel dimensions for the backbone. backbone_feature_sizes (`List[List[int]]`, *optional*, defaults to `[[256, 256], [128, 128], [64, 64]]`): The spatial sizes of the feature maps from the backbone. fpn_hidden_size (`int`, *optional*, defaults to 256): The hidden dimension of the FPN. fpn_kernel_size (`int`, *optional*, defaults to 1): The kernel size for the convolutions in the neck. fpn_stride (`int`, *optional*, defaults to 1): The stride for the convolutions in the neck. fpn_padding (`int`, *optional*, defaults to 0): The padding for the convolutions in the neck. fpn_top_down_levels (`List[int]`, *optional*, defaults to `[2, 3]`): The levels for the top-down FPN connections. num_feature_levels (`int`, *optional*, defaults to 3): The number of feature levels from the FPN to use. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the neck. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon for the layer normalization. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. """ base_config_key = "vision_config" model_type = "edgetam_vision_model" sub_configs = { "backbone_config": AutoConfig, } def __init__( self, backbone_config=None, backbone_channel_list=None, backbone_feature_sizes=None, fpn_hidden_size=256, fpn_kernel_size=1, fpn_stride=1, fpn_padding=0, fpn_top_down_levels=None, num_feature_levels=3, hidden_act="gelu", layer_norm_eps=1e-6, initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) backbone_channel_list = [384, 192, 96, 48] if backbone_channel_list is None else backbone_channel_list backbone_feature_sizes = ( [[256, 256], [128, 128], [64, 64]] if backbone_feature_sizes is None else backbone_feature_sizes ) fpn_top_down_levels = [2, 3] if fpn_top_down_levels is None else fpn_top_down_levels if isinstance(backbone_config, dict): backbone_config["model_type"] = backbone_config.get("model_type", "timm_wrapper") backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config) elif isinstance(backbone_config, AutoConfig): backbone_config = backbone_config elif backbone_config is None: backbone_config = AutoConfig.from_pretrained( "timm/repvit_m1.dist_in1k", model_args={"in_chans": 3, "features_only": True, "out_indices": [0, 1, 2, 3]}, ) self.backbone_config = backbone_config # Neck self.backbone_channel_list = backbone_channel_list self.backbone_feature_sizes = backbone_feature_sizes self.fpn_hidden_size = fpn_hidden_size self.fpn_kernel_size = fpn_kernel_size self.fpn_stride = fpn_stride self.fpn_padding = fpn_padding self.fpn_top_down_levels = fpn_top_down_levels self.num_feature_levels = num_feature_levels self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range class EdgeTamPromptEncoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`EdgeTamPromptEncoder`]. The [`EdgeTamPromptEncoder`] module is used to encode the input 2D points and bounding boxes. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden states. image_size (`int`, *optional*, defaults to 1024): The expected output resolution of the image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. mask_input_channels (`int`, *optional*, defaults to 16): The number of channels to be fed to the `MaskDecoder` module. num_point_embeddings (`int`, *optional*, defaults to 4): The number of point embeddings to be used. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the encoder and pooler. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. scale (`float`, *optional*, defaults to 1): The scale factor for the prompt encoder. """ base_config_key = "prompt_encoder_config" def __init__( self, hidden_size=256, image_size=1024, patch_size=16, mask_input_channels=16, num_point_embeddings=4, hidden_act="gelu", layer_norm_eps=1e-6, scale=1, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.image_size = image_size self.patch_size = patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.scale = scale class EdgeTamMaskDecoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`EdgeTamMaskDecoder`]. It is used to instantiate a EDGETAM memory encoder according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden states. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the EDGETAM mask decoder. mlp_dim (`int`, *optional*, defaults to 2048): The dimension of the MLP in the two-way transformer. num_hidden_layers (`int`, *optional*, defaults to 2): The number of hidden layers in the two-way transformer. num_attention_heads (`int`, *optional*, defaults to 8): The number of attention heads in the two-way transformer. attention_downsample_rate (`int`, *optional*, defaults to 2): The downsample rate for the attention layers. num_multimask_outputs (`int`, *optional*, defaults to 3): The number of multimask outputs. iou_head_depth (`int`, *optional*, defaults to 3): The depth of the IoU head. iou_head_hidden_dim (`int`, *optional*, defaults to 256): The hidden dimension of the IoU head. dynamic_multimask_via_stability (`bool`, *optional*, defaults to `True`): Whether to use dynamic multimask via stability. dynamic_multimask_stability_delta (`float`, *optional*, defaults to 0.05): The stability delta for the dynamic multimask. dynamic_multimask_stability_thresh (`float`, *optional*, defaults to 0.98): The stability threshold for the dynamic multimask. """ base_config_key = "mask_decoder_config" def __init__( self, hidden_size=256, hidden_act="gelu", mlp_dim=2048, num_hidden_layers=2, num_attention_heads=8, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=256, dynamic_multimask_via_stability=True, dynamic_multimask_stability_delta=0.05, dynamic_multimask_stability_thresh=0.98, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_multimask_outputs = num_multimask_outputs self.hidden_act = hidden_act self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim self.dynamic_multimask_via_stability = dynamic_multimask_via_stability self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh # TwoWayTransformer configuration self.num_hidden_layers = num_hidden_layers self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.mlp_dim = mlp_dim self.attention_downsample_rate = attention_downsample_rate class EdgeTamConfig(PretrainedConfig): r""" [`EdgeTamConfig`] is the configuration class to store the configuration of a [`EdgeTamModel`]. It is used to instantiate a EDGETAM model according to the specified arguments, defining the memory attention, memory encoder, and image encoder configs. Instantiating a configuration defaults will yield a similar configuration to that of the SAM 2.1 Hiera-tiny [facebook/edgetam.1-hiera-tiny](https://huggingface.co/facebook/edgetam.1-hiera-tiny) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (Union[`dict`, `EdgeTamVisionConfig`], *optional*): Dictionary of configuration options used to initialize [`EdgeTamVisionConfig`]. prompt_encoder_config (Union[`dict`, `EdgeTamPromptEncoderConfig`], *optional*): Dictionary of configuration options used to initialize [`EdgeTamPromptEncoderConfig`]. mask_decoder_config (Union[`dict`, `EdgeTamMaskDecoderConfig`], *optional*): Dictionary of configuration options used to initialize [`EdgeTamMaskDecoderConfig`]. initializer_range (`float`, *optional*, defaults to 0.02): Standard deviation for parameter initialization. Example: ```python >>> from transformers import ( ... EdgeTamVisionConfig, ... EdgeTamPromptEncoderConfig, ... EdgeTamMaskDecoderConfig, ... EdgeTamModel, ... ) >>> # Initializing a EdgeTamConfig with `"facebook/edgetam.1_hiera_tiny"` style configuration >>> configuration = EdgeTamconfig() >>> # Initializing a EdgeTamModel (with random weights) from the `"facebook/edgetam.1_hiera_tiny"` style configuration >>> model = EdgeTamModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a EdgeTamConfig from a EdgeTamVisionConfig, EdgeTamPromptEncoderConfig, and EdgeTamMaskDecoderConfig >>> # Initializing EDGETAM vision encoder, memory attention, and memory encoder configurations >>> vision_config = EdgeTamVisionConfig() >>> prompt_encoder_config = EdgeTamPromptEncoderConfig() >>> mask_decoder_config = EdgeTamMaskDecoderConfig() >>> config = EdgeTamConfig(vision_config, prompt_encoder_config, mask_decoder_config) ```""" model_type = "edgetam" sub_configs = { "vision_config": AutoConfig, "prompt_encoder_config": EdgeTamPromptEncoderConfig, "mask_decoder_config": EdgeTamMaskDecoderConfig, } def __init__( self, vision_config=None, prompt_encoder_config=None, mask_decoder_config=None, initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) vision_config = vision_config if vision_config is not None else {} prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {} mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {} if isinstance(vision_config, dict): vision_config["model_type"] = vision_config.get("model_type", "edgetam_vision_model") vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) if isinstance(prompt_encoder_config, EdgeTamPromptEncoderConfig): prompt_encoder_config = prompt_encoder_config.to_dict() if isinstance(mask_decoder_config, EdgeTamMaskDecoderConfig): mask_decoder_config = mask_decoder_config.to_dict() self.vision_config = vision_config self.prompt_encoder_config = EdgeTamPromptEncoderConfig(**prompt_encoder_config) self.mask_decoder_config = EdgeTamMaskDecoderConfig(**mask_decoder_config) self.initializer_range = initializer_range __all__ = ["EdgeTamConfig", "EdgeTamVisionConfig", "EdgeTamPromptEncoderConfig", "EdgeTamMaskDecoderConfig"]