# coding=utf-8 # Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch ViT MAE (masked autoencoder) model.""" import collections.abc from copy import deepcopy from dataclasses import dataclass from typing import Callable, Optional import numpy as np import torch from torch import nn from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, TransformersKwargs, auto_docstring, logging, torch_int from ...utils.generic import can_return_tuple, check_model_inputs from .configuration_vit_mae import ViTMAEConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Class for ViTMAEModel's outputs, with potential hidden states and attentions. """ ) class ViTMAEModelOutput(ModelOutput): r""" mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Tensor indicating which patches are masked (1) and which are not (0). ids_restore (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Tensor containing the original index of the (shuffled) masked patches. """ last_hidden_state: Optional[torch.FloatTensor] = None mask: Optional[torch.LongTensor] = None ids_restore: Optional[torch.LongTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None @dataclass @auto_docstring( custom_intro=""" Class for ViTMAEDecoder's outputs, with potential hidden states and attentions. """ ) class ViTMAEDecoderOutput(ModelOutput): r""" logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, patch_size ** 2 * num_channels)`): Pixel reconstruction logits. """ logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None @dataclass @auto_docstring( custom_intro=""" Class for ViTMAEForPreTraining's outputs, with potential hidden states and attentions. """ ) class ViTMAEForPreTrainingOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`): Pixel reconstruction loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, patch_size ** 2 * num_channels)`): Pixel reconstruction logits. mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Tensor indicating which patches are masked (1) and which are not (0). ids_restore (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Tensor containing the original index of the (shuffled) masked patches. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None mask: Optional[torch.LongTensor] = None ids_restore: Optional[torch.LongTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None def get_2d_sincos_pos_embed(embed_dim, grid_size, add_cls_token=False): """ Create 2D sin/cos positional embeddings. Args: embed_dim (`int`): Embedding dimension. grid_size (`int`): The grid height and width. add_cls_token (`bool`, *optional*, defaults to `False`): Whether or not to add a classification (CLS) token. Returns: (`torch.FloatTensor` of shape (grid_size*grid_size, embed_dim) or (1+grid_size*grid_size, embed_dim): the position embeddings (with or without classification token) """ grid_h = np.arange(grid_size, dtype=np.float32) grid_w = np.arange(grid_size, dtype=np.float32) grid = np.meshgrid(grid_w, grid_h) # here w goes first grid = np.stack(grid, axis=0) grid = grid.reshape([2, 1, grid_size, grid_size]) pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) if add_cls_token: pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) return pos_embed def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): if embed_dim % 2 != 0: raise ValueError("embed_dim must be even") # use half of dimensions to encode grid_h emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) return emb def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): """ embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D) """ if embed_dim % 2 != 0: raise ValueError("embed_dim must be even") omega = np.arange(embed_dim // 2, dtype=float) omega /= embed_dim / 2.0 omega = 1.0 / 10000**omega # (D/2,) pos = pos.reshape(-1) # (M,) out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product emb_sin = np.sin(out) # (M, D/2) emb_cos = np.cos(out) # (M, D/2) emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) return emb class ViTMAEEmbeddings(nn.Module): """ Construct the CLS token, position and patch embeddings. """ def __init__(self, config): super().__init__() self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.patch_embeddings = ViTMAEPatchEmbeddings(config) self.num_patches = self.patch_embeddings.num_patches # fixed sin-cos embedding self.position_embeddings = nn.Parameter( torch.zeros(1, self.num_patches + 1, config.hidden_size), requires_grad=False ) self.patch_size = config.patch_size self.config = config def initialize_weights(self): # initialize (and freeze) position embeddings by sin-cos embedding pos_embed = get_2d_sincos_pos_embed( self.position_embeddings.shape[-1], int(self.patch_embeddings.num_patches**0.5), add_cls_token=True ) self.position_embeddings.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0)) # initialize patch_embeddings like nn.Linear (instead of nn.Conv2d) w = self.patch_embeddings.projection.weight.data torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1])) # timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.) torch.nn.init.normal_(self.cls_token, std=self.config.initializer_range) # Copied from transformers.models.vit.modeling_vit.ViTEmbeddings.interpolate_pos_encoding def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def random_masking(self, sequence, noise=None): """ Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random noise. Args: sequence (`torch.LongTensor` of shape `(batch_size, sequence_length, dim)`) noise (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*) which is mainly used for testing purposes to control randomness and maintain the reproducibility """ batch_size, seq_length, dim = sequence.shape len_keep = int(seq_length * (1 - self.config.mask_ratio)) if noise is None: noise = torch.rand(batch_size, seq_length, device=sequence.device) # noise in [0, 1] # sort noise for each sample ids_shuffle = torch.argsort(noise, dim=1).to(sequence.device) # ascend: small is keep, large is remove ids_restore = torch.argsort(ids_shuffle, dim=1).to(sequence.device) # keep the first subset ids_keep = ids_shuffle[:, :len_keep] sequence_unmasked = torch.gather(sequence, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, dim)) # generate the binary mask: 0 is keep, 1 is remove mask = torch.ones([batch_size, seq_length], device=sequence.device) mask[:, :len_keep] = 0 # unshuffle to get the binary mask mask = torch.gather(mask, dim=1, index=ids_restore) return sequence_unmasked, mask, ids_restore def forward(self, pixel_values, noise=None, interpolate_pos_encoding: bool = False): batch_size, num_channels, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) if interpolate_pos_encoding: position_embeddings = self.interpolate_pos_encoding(embeddings, height, width) else: position_embeddings = self.position_embeddings # add position embeddings w/o cls token embeddings = embeddings + position_embeddings[:, 1:, :] # masking: length -> length * config.mask_ratio embeddings, mask, ids_restore = self.random_masking(embeddings, noise) # append cls token cls_token = self.cls_token + position_embeddings[:, :1, :] cls_tokens = cls_token.expand(embeddings.shape[0], -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) return embeddings, mask, ids_restore class ViTMAEPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values, interpolate_pos_encoding: bool = False): batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if not interpolate_pos_encoding and (height != self.image_size[0] or width != self.image_size[1]): raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." ) x = self.projection(pixel_values).flatten(2).transpose(1, 2) return x # Copied from transformers.models.vit.modeling_vit.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): # Take the dot product between "query" and "key" to get the raw attention scores. attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling # Normalize the attention scores to probabilities. attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) # Mask heads if we want to if attention_mask is not None: attn_weights = attn_weights * attention_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention ViT->ViTMAE class ViTMAESelfAttention(nn.Module): def __init__(self, config: ViTMAEConfig): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.config = config self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.dropout_prob = config.attention_probs_dropout_prob self.scaling = self.attention_head_size**-0.5 self.is_causal = False self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None ) -> tuple[torch.Tensor, torch.Tensor]: batch_size = hidden_states.shape[0] new_shape = batch_size, -1, self.num_attention_heads, self.attention_head_size key_layer = self.key(hidden_states).view(*new_shape).transpose(1, 2) value_layer = self.value(hidden_states).view(*new_shape).transpose(1, 2) query_layer = self.query(hidden_states).view(*new_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] context_layer, attention_probs = attention_interface( self, query_layer, key_layer, value_layer, head_mask, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.dropout_prob, ) new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.reshape(new_context_layer_shape) return context_layer, attention_probs # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->ViTMAE class ViTMAESelfOutput(nn.Module): """ The residual connection is defined in ViTMAELayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: ViTMAEConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->ViTMAE class ViTMAEAttention(nn.Module): def __init__(self, config: ViTMAEConfig): super().__init__() self.attention = ViTMAESelfAttention(config) self.output = ViTMAESelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: set[int]): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None) -> torch.Tensor: self_attn_output, _ = self.attention(hidden_states, head_mask) output = self.output(self_attn_output, hidden_states) return output # Copied from transformers.models.vit.modeling_vit.ViTIntermediate ViT->ViTMAE class ViTMAEIntermediate(nn.Module): def __init__(self, config: ViTMAEConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTOutput ViT->ViTMAE class ViTMAEOutput(nn.Module): def __init__(self, config: ViTMAEConfig): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->ViTMAE,VIT->VITMAE class ViTMAELayer(GradientCheckpointingLayer): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: ViTMAEConfig): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ViTMAEAttention(config) self.intermediate = ViTMAEIntermediate(config) self.output = ViTMAEOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None) -> torch.Tensor: hidden_states_norm = self.layernorm_before(hidden_states) attention_output = self.attention(hidden_states_norm, head_mask) # first residual connection hidden_states = attention_output + hidden_states # in ViTMAE, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) return layer_output # Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->ViTMAE class ViTMAEEncoder(nn.Module): def __init__(self, config: ViTMAEConfig): super().__init__() self.config = config self.layer = nn.ModuleList([ViTMAELayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None) -> BaseModelOutput: for i, layer_module in enumerate(self.layer): layer_head_mask = head_mask[i] if head_mask is not None else None hidden_states = layer_module(hidden_states, layer_head_mask) return BaseModelOutput(last_hidden_state=hidden_states) @auto_docstring class ViTMAEPreTrainedModel(PreTrainedModel): config: ViTMAEConfig base_model_prefix = "vit" main_input_name = "pixel_values" supports_gradient_checkpointing = True _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True _supports_attention_backend = True _can_record_outputs = { "hidden_states": ViTMAELayer, "attentions": ViTMAESelfAttention, } def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, ViTMAEEmbeddings): module.initialize_weights() elif isinstance(module, ViTMAEDecoder): module.mask_token.data.zero_() module.decoder_pos_embed.data.zero_() @auto_docstring class ViTMAEModel(ViTMAEPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = ViTMAEEmbeddings(config) self.encoder = ViTMAEEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @check_model_inputs @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, noise: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, interpolate_pos_encoding: bool = False, **kwargs: Unpack[TransformersKwargs], ) -> ViTMAEModelOutput: r""" noise (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mainly used for testing purposes to control randomness and maintain the reproducibility interpolate_pos_encoding (`bool`, *optional*, default `False`): Whether to interpolate the pre-trained position encodings. This is mainly used to use the model on higher resolution images. Examples: ```python >>> from transformers import AutoImageProcessor, ViTMAEModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("facebook/vit-mae-base") >>> model = ViTMAEModel.from_pretrained("facebook/vit-mae-base") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output, mask, ids_restore = self.embeddings( pixel_values, noise=noise, interpolate_pos_encoding=interpolate_pos_encoding ) encoder_outputs: BaseModelOutput = self.encoder(embedding_output, head_mask=head_mask) sequence_output = encoder_outputs.last_hidden_state sequence_output = self.layernorm(sequence_output) return ViTMAEModelOutput(last_hidden_state=sequence_output, mask=mask, ids_restore=ids_restore) class ViTMAEDecoder(nn.Module): def __init__(self, config: ViTMAEConfig, num_patches: int): super().__init__() self.decoder_embed = nn.Linear(config.hidden_size, config.decoder_hidden_size, bias=True) self.mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size)) self.decoder_pos_embed = nn.Parameter( torch.zeros(1, num_patches + 1, config.decoder_hidden_size), requires_grad=False ) # fixed sin-cos embedding decoder_config = deepcopy(config) decoder_config.hidden_size = config.decoder_hidden_size decoder_config.num_hidden_layers = config.decoder_num_hidden_layers decoder_config.num_attention_heads = config.decoder_num_attention_heads decoder_config.intermediate_size = config.decoder_intermediate_size self.decoder_layers = nn.ModuleList( [ViTMAELayer(decoder_config) for _ in range(config.decoder_num_hidden_layers)] ) self.decoder_norm = nn.LayerNorm(config.decoder_hidden_size, eps=config.layer_norm_eps) self.decoder_pred = nn.Linear( config.decoder_hidden_size, config.patch_size**2 * config.num_channels, bias=True ) # encoder to decoder self.gradient_checkpointing = False self.config = decoder_config self.initialize_weights(num_patches) def interpolate_pos_encoding(self, embeddings: torch.Tensor) -> torch.Tensor: """ This method is a modified version of the interpolation function for ViT-mae model at the decoder, that allows to interpolate the pre-trained decoder position encodings, to be able to use the model on higher resolution images. Adapted from: https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 """ # -1 removes the class dimension since we later append it without interpolation embeddings_positions = embeddings.shape[1] - 1 # Separation of class token and patch tokens class_pos_embed = self.decoder_pos_embed[:, :1] patch_pos_embed = self.decoder_pos_embed[:, 1:] # To retain the final 3d tensor with the required dimensions dim = self.decoder_pos_embed.shape[-1] # Increasing a dimension to enable bicubic interpolation patch_pos_embed = patch_pos_embed.reshape(1, 1, -1, dim) # permute to bring the dimension to be interpolated, to the last patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) # Interpolating the decoder position embeddings shape wrt embeddings shape i.e (x). # we keep the second last dimension constant patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(patch_pos_embed.shape[-2], embeddings_positions), mode="bicubic", align_corners=False, ) # Converting back to the original shape patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) # Adding the class token back return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def initialize_weights(self, num_patches): # initialize (and freeze) position embeddings by sin-cos embedding decoder_pos_embed = get_2d_sincos_pos_embed( self.decoder_pos_embed.shape[-1], int(num_patches**0.5), add_cls_token=True ) self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0)) # timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.) torch.nn.init.normal_(self.mask_token, std=self.config.initializer_range) def forward(self, hidden_states: torch.Tensor, ids_restore: torch.Tensor, interpolate_pos_encoding: bool = False): # Embed tokens x = self.decoder_embed(hidden_states) # Append mask tokens to sequence mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1) x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) # no cls token # Unshuffle x_ = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2]).to(x_.device)) x = torch.cat([x[:, :1, :], x_], dim=1) # append cls token # Add pos embed if interpolate_pos_encoding: decoder_pos_embed = self.interpolate_pos_encoding(x) else: decoder_pos_embed = self.decoder_pos_embed hidden_states = x + decoder_pos_embed # Apply Transformer layers (blocks) for layer_module in self.decoder_layers: hidden_states = layer_module(hidden_states, head_mask=None) hidden_states = self.decoder_norm(hidden_states) # Predictor projection logits = self.decoder_pred(hidden_states) # Remove cls token logits = logits[:, 1:, :] return ViTMAEDecoderOutput(logits=logits) @auto_docstring( custom_intro=""" The ViTMAE Model transformer with the decoder on top for self-supervised pre-training. Note that we provide a script to pre-train this model on custom data in our [examples directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining). """ ) class ViTMAEForPreTraining(ViTMAEPreTrainedModel): def __init__(self, config: ViTMAEConfig): super().__init__(config) self.config = config self.vit = ViTMAEModel(config) self.decoder = ViTMAEDecoder(config, num_patches=self.vit.embeddings.num_patches) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.vit.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def patchify(self, pixel_values, interpolate_pos_encoding: bool = False): """ Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. interpolate_pos_encoding (`bool`, *optional*, default `False`): interpolation flag passed during the forward pass. Returns: `torch.FloatTensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`: Patchified pixel values. """ patch_size, num_channels = self.config.patch_size, self.config.num_channels # sanity checks if not interpolate_pos_encoding and ( pixel_values.shape[2] != pixel_values.shape[3] or pixel_values.shape[2] % patch_size != 0 ): raise ValueError("Make sure the pixel values have a squared size that is divisible by the patch size") if pixel_values.shape[1] != num_channels: raise ValueError( "Make sure the number of channels of the pixel values is equal to the one set in the configuration" ) # patchify batch_size = pixel_values.shape[0] num_patches_h = pixel_values.shape[2] // patch_size num_patches_w = pixel_values.shape[3] // patch_size patchified_pixel_values = pixel_values.reshape( batch_size, num_channels, num_patches_h, patch_size, num_patches_w, patch_size ) patchified_pixel_values = torch.einsum("nchpwq->nhwpqc", patchified_pixel_values) patchified_pixel_values = patchified_pixel_values.reshape( batch_size, num_patches_h * num_patches_w, patch_size**2 * num_channels ) return patchified_pixel_values def unpatchify(self, patchified_pixel_values, original_image_size: Optional[tuple[int, int]] = None): """ Args: patchified_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`: Patchified pixel values. original_image_size (`tuple[int, int]`, *optional*): Original image size. Returns: `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`: Pixel values. """ patch_size, num_channels = self.config.patch_size, self.config.num_channels original_image_size = ( original_image_size if original_image_size is not None else (self.config.image_size, self.config.image_size) ) original_height, original_width = original_image_size num_patches_h = original_height // patch_size num_patches_w = original_width // patch_size # sanity check if num_patches_h * num_patches_w != patchified_pixel_values.shape[1]: raise ValueError( f"The number of patches in the patchified pixel values {patchified_pixel_values.shape[1]}, does not match the number of patches on original image {num_patches_h}*{num_patches_w}" ) # unpatchify batch_size = patchified_pixel_values.shape[0] patchified_pixel_values = patchified_pixel_values.reshape( batch_size, num_patches_h, num_patches_w, patch_size, patch_size, num_channels, ) patchified_pixel_values = torch.einsum("nhwpqc->nchpwq", patchified_pixel_values) pixel_values = patchified_pixel_values.reshape( batch_size, num_channels, num_patches_h * patch_size, num_patches_w * patch_size, ) return pixel_values def forward_loss(self, pixel_values, pred, mask, interpolate_pos_encoding: bool = False): """ Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. pred (`torch.FloatTensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`: Predicted pixel values. mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Tensor indicating which patches are masked (1) and which are not (0). interpolate_pos_encoding (`bool`, *optional*, default `False`): interpolation flag passed during the forward pass. Returns: `torch.FloatTensor`: Pixel reconstruction loss. """ target = self.patchify(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) if self.config.norm_pix_loss: mean = target.mean(dim=-1, keepdim=True) var = target.var(dim=-1, keepdim=True) target = (target - mean) / (var + 1.0e-6) ** 0.5 loss = (pred - target) ** 2 loss = loss.mean(dim=-1) # [N, L], mean loss per patch loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches return loss @can_return_tuple @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, noise: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, interpolate_pos_encoding: bool = False, **kwargs: Unpack[TransformersKwargs], ) -> ViTMAEForPreTrainingOutput: r""" noise (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mainly used for testing purposes to control randomness and maintain the reproducibility interpolate_pos_encoding (`bool`, *optional*, default `False`): Whether to interpolate the pre-trained position encodings. This is mainly used to use the model on higher resolution images. Examples: ```python >>> from transformers import AutoImageProcessor, ViTMAEForPreTraining >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("facebook/vit-mae-base") >>> model = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> loss = outputs.loss >>> mask = outputs.mask >>> ids_restore = outputs.ids_restore ```""" outputs: ViTMAEModelOutput = self.vit( pixel_values, noise=noise, head_mask=head_mask, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs ) latent = outputs.last_hidden_state ids_restore = outputs.ids_restore mask = outputs.mask decoder_outputs: ViTMAEDecoderOutput = self.decoder( latent, ids_restore, interpolate_pos_encoding=interpolate_pos_encoding ) logits = decoder_outputs.logits # shape (batch_size, num_patches, patch_size*patch_size*num_channels) loss = self.forward_loss(pixel_values, logits, mask, interpolate_pos_encoding=interpolate_pos_encoding) return ViTMAEForPreTrainingOutput( loss=loss, logits=logits, mask=mask, ids_restore=ids_restore, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = ["ViTMAEForPreTraining", "ViTMAELayer", "ViTMAEModel", "ViTMAEPreTrainedModel"]