562 lines
25 KiB
Python
562 lines
25 KiB
Python
# Copyright 2023 Haotian Liu & Qinghao Ye (Modified from LLaVA)
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
from abc import ABC, abstractmethod
|
|
from typing import List, Optional, Tuple, Union
|
|
|
|
import torch
|
|
import torch.nn as nn
|
|
from torch.nn import CrossEntropyLoss
|
|
from transformers import AutoConfig, AutoModelForCausalLM, LlamaModel, LlamaForCausalLM
|
|
from transformers.modeling_outputs import CausalLMOutputWithPast
|
|
|
|
from extensions.sd_smartprocess.mplug_owl2.constants import IMAGE_TOKEN_INDEX, IGNORE_INDEX
|
|
from .configuration_mplug_owl2 import MPLUGOwl2Config, MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig, \
|
|
MPLUGOwl2QwenConfig
|
|
from .modeling_llama2 import replace_llama_modality_adaptive
|
|
from .modeling_qwen import QWenLMHeadModel, QWenModel
|
|
from .visual_encoder import MplugOwlVisionModel, MplugOwlVisualAbstractorModel
|
|
|
|
|
|
class MPLUGOwl2MetaModel:
|
|
def __init__(self, config):
|
|
super(MPLUGOwl2MetaModel, self).__init__(config)
|
|
self.vision_model = MplugOwlVisionModel(
|
|
MplugOwlVisionConfig(**config.visual_config["visual_model"])
|
|
)
|
|
self.visual_abstractor = MplugOwlVisualAbstractorModel(
|
|
MplugOwlVisualAbstractorConfig(**config.visual_config["visual_abstractor"]), config.hidden_size
|
|
)
|
|
|
|
def get_vision_tower(self):
|
|
vision_model = getattr(self, 'vision_model', None)
|
|
if type(vision_model) is list:
|
|
vision_model = vision_model[0]
|
|
return vision_model
|
|
|
|
def get_visual_abstractor(self):
|
|
visual_abstractor = getattr(self, 'visual_abstractor', None)
|
|
if type(visual_abstractor) is list:
|
|
visual_abstractor = visual_abstractor[0]
|
|
return visual_abstractor
|
|
|
|
|
|
class MPLUGOwl2MetaForCausalLM(ABC):
|
|
@abstractmethod
|
|
def get_model(self):
|
|
pass
|
|
|
|
def encode_images(self, images):
|
|
image_features = self.get_model().vision_model(images).last_hidden_state
|
|
image_features = self.get_model().visual_abstractor(encoder_hidden_states=image_features).last_hidden_state
|
|
return image_features
|
|
|
|
def prepare_inputs_labels_for_multimodal(
|
|
self, input_ids, attention_mask, past_key_values, labels, images
|
|
):
|
|
if images is None or input_ids.shape[1] == 1:
|
|
if past_key_values is not None and images is not None and input_ids.shape[1] == 1:
|
|
# print(attention_mask)
|
|
if attention_mask is not None:
|
|
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1),
|
|
dtype=attention_mask.dtype, device=attention_mask.device)
|
|
multiway_indices = torch.zeros_like(input_ids).long().to(self.device)
|
|
return input_ids, multiway_indices, attention_mask, past_key_values, None, labels
|
|
|
|
if type(images) is list or images.ndim == 5:
|
|
concat_images = torch.cat([image for image in images], dim=0)
|
|
image_features = self.encode_images(concat_images)
|
|
split_sizes = [image.shape[0] for image in images]
|
|
image_features = torch.split(image_features, split_sizes, dim=0)
|
|
image_features = [x.flatten(0, 1) for x in image_features]
|
|
else:
|
|
image_features = self.encode_images(images)
|
|
|
|
new_input_embeds = []
|
|
new_modality_indicators = []
|
|
new_labels = [] if labels is not None else None
|
|
cur_image_idx = 0
|
|
for batch_idx, cur_input_ids in enumerate(input_ids):
|
|
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:
|
|
# multimodal LLM, but the current sample is not multimodal
|
|
# FIXME: this is a hacky fix, for deepspeed zero3 to work
|
|
half_len = cur_input_ids.shape[0] // 2
|
|
cur_image_features = image_features[cur_image_idx]
|
|
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])
|
|
cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])
|
|
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0], cur_input_embeds_2], dim=0)
|
|
new_input_embeds.append(cur_input_embeds)
|
|
|
|
cur_modality_indicators = torch.zeros(len(cur_input_embeds)).long().to(self.device)
|
|
new_modality_indicators.append(cur_modality_indicators)
|
|
if labels is not None:
|
|
new_labels.append(labels[batch_idx])
|
|
cur_image_idx += 1
|
|
continue
|
|
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
|
|
cur_new_input_embeds = []
|
|
cur_modality_indicators = []
|
|
if labels is not None:
|
|
cur_labels = labels[batch_idx]
|
|
cur_new_labels = []
|
|
assert cur_labels.shape == cur_input_ids.shape
|
|
while image_token_indices.numel() > 0:
|
|
cur_image_features = image_features[cur_image_idx]
|
|
image_token_start = image_token_indices[0]
|
|
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))
|
|
cur_new_input_embeds.append(cur_image_features)
|
|
|
|
# Add modality indicator
|
|
assert image_token_start == len(cur_input_ids[:image_token_start])
|
|
cur_modality_indicators.append(torch.zeros(len(cur_input_ids[:image_token_start])).long())
|
|
cur_modality_indicators.append(torch.ones(len(cur_image_features)).long())
|
|
|
|
if labels is not None:
|
|
cur_new_labels.append(cur_labels[:image_token_start])
|
|
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device,
|
|
dtype=labels.dtype))
|
|
cur_labels = cur_labels[image_token_start + 1:]
|
|
cur_image_idx += 1
|
|
cur_input_ids = cur_input_ids[image_token_start + 1:]
|
|
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
|
|
if cur_input_ids.numel() > 0:
|
|
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))
|
|
cur_modality_indicators.append(torch.zeros(len(cur_input_ids)).long())
|
|
if labels is not None:
|
|
cur_new_labels.append(cur_labels)
|
|
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
|
|
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
|
|
new_input_embeds.append(cur_new_input_embeds)
|
|
|
|
# Modality
|
|
cur_modality_indicators = [x.to(device=self.device) for x in cur_modality_indicators]
|
|
cur_modality_indicators = torch.cat(cur_modality_indicators, dim=0)
|
|
new_modality_indicators.append(cur_modality_indicators)
|
|
|
|
if labels is not None:
|
|
cur_new_labels = torch.cat(cur_new_labels, dim=0)
|
|
new_labels.append(cur_new_labels)
|
|
|
|
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
|
|
max_len = max(x.shape[0] for x in new_input_embeds)
|
|
|
|
# Embedding
|
|
new_input_embeds_align = []
|
|
for cur_new_embed in new_input_embeds:
|
|
cur_new_embed = torch.cat((cur_new_embed,
|
|
torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]),
|
|
dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
|
|
new_input_embeds_align.append(cur_new_embed)
|
|
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
|
|
|
|
# Modality
|
|
new_modality_indicators_align = []
|
|
for cur_modality_indicator in new_modality_indicators:
|
|
cur_new_embed = torch.cat((cur_modality_indicator,
|
|
torch.zeros(max_len - cur_modality_indicator.shape[0],
|
|
dtype=cur_modality_indicator.dtype,
|
|
device=cur_modality_indicator.device)), dim=0)
|
|
new_modality_indicators_align.append(cur_new_embed)
|
|
new_modality_indicators = torch.stack(new_modality_indicators_align, dim=0)
|
|
|
|
# Label
|
|
if labels is not None:
|
|
new_labels_align = []
|
|
_new_labels = new_labels
|
|
for cur_new_label in new_labels:
|
|
cur_new_label = torch.cat((cur_new_label,
|
|
torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX,
|
|
dtype=cur_new_label.dtype, device=cur_new_label.device)),
|
|
dim=0)
|
|
new_labels_align.append(cur_new_label)
|
|
new_labels = torch.stack(new_labels_align, dim=0)
|
|
|
|
# Attention Mask
|
|
if attention_mask is not None:
|
|
new_attention_mask = []
|
|
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels,
|
|
new_labels):
|
|
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True,
|
|
dtype=attention_mask.dtype, device=attention_mask.device)
|
|
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],),
|
|
False, dtype=attention_mask.dtype,
|
|
device=attention_mask.device)
|
|
cur_new_attention_mask = torch.cat(
|
|
(new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
|
|
new_attention_mask.append(cur_new_attention_mask)
|
|
attention_mask = torch.stack(new_attention_mask, dim=0)
|
|
assert attention_mask.shape == new_labels.shape
|
|
else:
|
|
new_input_embeds = torch.stack(new_input_embeds, dim=0)
|
|
new_modality_indicators = torch.stack(new_modality_indicators, dim=0)
|
|
if labels is not None:
|
|
new_labels = torch.stack(new_labels, dim=0)
|
|
|
|
if attention_mask is not None:
|
|
new_attn_mask_pad_left = torch.full(
|
|
(attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True,
|
|
dtype=attention_mask.dtype, device=attention_mask.device)
|
|
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)
|
|
assert attention_mask.shape == new_input_embeds.shape[:2]
|
|
return None, new_modality_indicators, attention_mask, past_key_values, new_input_embeds, new_labels
|
|
|
|
|
|
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
|
|
"""
|
|
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
|
"""
|
|
bsz, src_len = mask.size()
|
|
tgt_len = tgt_len if tgt_len is not None else src_len
|
|
|
|
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
|
|
|
|
inverted_mask = 1.0 - expanded_mask
|
|
|
|
return inverted_mask.masked_fill(
|
|
inverted_mask.to(torch.bool), torch.finfo(dtype).min
|
|
)
|
|
|
|
|
|
def _make_causal_mask(
|
|
input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0
|
|
):
|
|
"""
|
|
Make causal mask used for bi-directional self-attention.
|
|
"""
|
|
bsz, tgt_len = input_ids_shape
|
|
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min))
|
|
mask_cond = torch.arange(mask.size(-1))
|
|
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
|
|
mask = mask.to(dtype)
|
|
|
|
if past_key_values_length > 0:
|
|
mask = torch.cat(
|
|
[torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1
|
|
)
|
|
return mask[None, None, :, :].expand(
|
|
bsz, 1, tgt_len, tgt_len + past_key_values_length
|
|
)
|
|
|
|
|
|
class MPLUGOwl2LlamaModel(MPLUGOwl2MetaModel, LlamaModel):
|
|
config_class = MPLUGOwl2Config
|
|
|
|
def __init__(self, config: MPLUGOwl2Config):
|
|
super(MPLUGOwl2LlamaModel, self).__init__(config)
|
|
|
|
def _prepare_decoder_attention_mask(
|
|
self, attention_mask, input_shape, inputs_embeds, past_key_values_length
|
|
):
|
|
# create causal mask
|
|
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
|
combined_attention_mask = None
|
|
if input_shape[-1] > 1:
|
|
combined_attention_mask = _make_causal_mask(
|
|
input_shape,
|
|
inputs_embeds.dtype,
|
|
past_key_values_length=past_key_values_length,
|
|
).to(inputs_embeds.device)
|
|
|
|
if attention_mask is not None:
|
|
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
|
expanded_attn_mask = _expand_mask(
|
|
attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
|
|
).to(inputs_embeds.device)
|
|
combined_attention_mask = (
|
|
expanded_attn_mask
|
|
if combined_attention_mask is None
|
|
else expanded_attn_mask + combined_attention_mask
|
|
)
|
|
|
|
return combined_attention_mask
|
|
|
|
|
|
class MPLUGOwl2QWenModel(MPLUGOwl2MetaModel, QWenModel):
|
|
config_class = MPLUGOwl2QwenConfig
|
|
|
|
def __init__(self, config: MPLUGOwl2QwenConfig):
|
|
super(MPLUGOwl2QWenModel, self).__init__(config)
|
|
|
|
|
|
class MPLUGOwl2LlamaForCausalLM(LlamaForCausalLM, MPLUGOwl2MetaForCausalLM):
|
|
config_class = MPLUGOwl2Config
|
|
|
|
def __init__(self, config):
|
|
super(LlamaForCausalLM, self).__init__(config)
|
|
self.model = MPLUGOwl2LlamaModel(config)
|
|
|
|
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
|
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
def encode_images(self, images):
|
|
image_features = self.get_model().vision_model(images).last_hidden_state
|
|
image_features = self.get_model().visual_abstractor(encoder_hidden_states=image_features).last_hidden_state
|
|
return image_features
|
|
|
|
def get_model(self):
|
|
return self.model
|
|
|
|
def forward(
|
|
self,
|
|
input_ids: torch.LongTensor = None,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
labels: Optional[torch.LongTensor] = None,
|
|
use_cache: Optional[bool] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
images: Optional[torch.FloatTensor] = None,
|
|
return_dict: Optional[bool] = None,
|
|
) -> Union[Tuple, CausalLMOutputWithPast]:
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
)
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
input_ids, modality_indicators, attention_mask, past_key_values, inputs_embeds, labels = \
|
|
self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images)
|
|
|
|
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
|
outputs = self.model(
|
|
input_ids=input_ids,
|
|
modality_indicators=modality_indicators,
|
|
attention_mask=attention_mask,
|
|
past_key_values=past_key_values,
|
|
inputs_embeds=inputs_embeds,
|
|
use_cache=use_cache,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict
|
|
)
|
|
|
|
hidden_states = outputs[0]
|
|
logits = self.lm_head(hidden_states)
|
|
|
|
loss = None
|
|
if labels is not None:
|
|
# Shift so that tokens < n predict n
|
|
shift_logits = logits[..., :-1, :].contiguous()
|
|
shift_labels = labels[..., 1:].contiguous()
|
|
# Flatten the tokens
|
|
loss_fct = CrossEntropyLoss()
|
|
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
|
shift_labels = shift_labels.view(-1)
|
|
# Enable model/pipeline parallelism
|
|
shift_labels = shift_labels.to(shift_logits.device)
|
|
loss = loss_fct(shift_logits, shift_labels)
|
|
|
|
if not return_dict:
|
|
output = (logits,) + outputs[1:]
|
|
return (loss,) + output if loss is not None else output
|
|
|
|
return CausalLMOutputWithPast(
|
|
loss=loss,
|
|
logits=logits,
|
|
past_key_values=outputs.past_key_values,
|
|
hidden_states=outputs.hidden_states,
|
|
attentions=outputs.attentions,
|
|
)
|
|
|
|
def prepare_inputs_for_generation(
|
|
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
|
):
|
|
if past_key_values:
|
|
input_ids = input_ids[:, -1:]
|
|
|
|
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
|
if inputs_embeds is not None and past_key_values is None:
|
|
model_inputs = {"inputs_embeds": inputs_embeds}
|
|
else:
|
|
model_inputs = {"input_ids": input_ids}
|
|
|
|
model_inputs.update(
|
|
{
|
|
"past_key_values": past_key_values,
|
|
"use_cache": kwargs.get("use_cache"),
|
|
"attention_mask": attention_mask,
|
|
"images": kwargs.get("images", None),
|
|
}
|
|
)
|
|
return model_inputs
|
|
|
|
|
|
class MPLUGOwl2QWenForCausalLM(QWenLMHeadModel, MPLUGOwl2MetaForCausalLM):
|
|
config_class = MPLUGOwl2QwenConfig
|
|
|
|
def __init__(self, config):
|
|
super(QWenLMHeadModel, self).__init__(config)
|
|
from .modeling_qwen import SUPPORT_BF16, logger, SUPPORT_FP16, SUPPORT_CUDA, _import_flash_attn
|
|
autoset_precision = config.bf16 + config.fp16 + config.fp32 == 0
|
|
|
|
if autoset_precision:
|
|
if SUPPORT_BF16:
|
|
logger.warn(
|
|
"The model is automatically converting to bf16 for faster inference. "
|
|
"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
|
|
)
|
|
config.bf16 = True
|
|
elif SUPPORT_FP16:
|
|
logger.warn(
|
|
"The model is automatically converting to fp16 for faster inference. "
|
|
"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
|
|
)
|
|
config.fp16 = True
|
|
else:
|
|
config.fp32 = True
|
|
|
|
if config.bf16 and SUPPORT_CUDA and not SUPPORT_BF16:
|
|
logger.warn(
|
|
"Your device does NOT seem to support bf16, you can switch to fp16 or fp32 by by passing fp16/fp32=True in \"AutoModelForCausalLM.from_pretrained\".")
|
|
if config.fp16 and SUPPORT_CUDA and not SUPPORT_FP16:
|
|
logger.warn(
|
|
"Your device does NOT support faster inference with fp16, please switch to fp32 which is likely to be faster")
|
|
if config.fp32:
|
|
if SUPPORT_BF16:
|
|
logger.warn(
|
|
"Your device support faster inference by passing bf16=True in \"AutoModelForCausalLM.from_pretrained\".")
|
|
elif SUPPORT_FP16:
|
|
logger.warn(
|
|
"Your device support faster inference by passing fp16=True in \"AutoModelForCausalLM.from_pretrained\".")
|
|
|
|
if config.use_flash_attn == "auto":
|
|
if config.bf16 or config.fp16:
|
|
logger.warn("Try importing flash-attention for faster inference...")
|
|
config.use_flash_attn = True
|
|
else:
|
|
config.use_flash_attn = False
|
|
if config.use_flash_attn and config.fp32:
|
|
logger.warn("Flash attention will be disabled because it does NOT support fp32.")
|
|
|
|
if config.use_flash_attn:
|
|
_import_flash_attn()
|
|
|
|
self.transformer = MPLUGOwl2QWenModel(config)
|
|
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
|
|
|
if config.bf16:
|
|
self.transformer.bfloat16()
|
|
self.lm_head.bfloat16()
|
|
if config.fp16:
|
|
self.transformer.half()
|
|
self.lm_head.half()
|
|
self.post_init()
|
|
|
|
def get_model(self):
|
|
return self.transformer
|
|
|
|
def forward(
|
|
self,
|
|
input_ids: Optional[torch.LongTensor] = None,
|
|
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
|
attention_mask: Optional[torch.FloatTensor] = None,
|
|
token_type_ids: Optional[torch.LongTensor] = None,
|
|
position_ids: Optional[torch.LongTensor] = None,
|
|
head_mask: Optional[torch.FloatTensor] = None,
|
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
encoder_hidden_states: Optional[torch.Tensor] = None,
|
|
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
|
labels: Optional[torch.LongTensor] = None,
|
|
use_cache: Optional[bool] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
images=None,
|
|
return_dict: Optional[bool] = None,
|
|
) -> Union[Tuple, CausalLMOutputWithPast]:
|
|
|
|
return_dict = (
|
|
return_dict if return_dict is not None else self.config.use_return_dict
|
|
)
|
|
input_ids, modality_indicators, attention_mask, past_key_values, inputs_embeds, labels = \
|
|
self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images)
|
|
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
|
outputs = self.transformer(
|
|
input_ids,
|
|
modality_indicators=modality_indicators,
|
|
past_key_values=past_key_values,
|
|
attention_mask=attention_mask,
|
|
token_type_ids=token_type_ids,
|
|
position_ids=position_ids,
|
|
head_mask=head_mask,
|
|
inputs_embeds=inputs_embeds,
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
encoder_attention_mask=encoder_attention_mask,
|
|
use_cache=use_cache,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
)
|
|
|
|
hidden_states = outputs[0]
|
|
logits = self.lm_head(hidden_states)
|
|
|
|
loss = None
|
|
if labels is not None:
|
|
# Shift so that tokens < n predict n
|
|
shift_logits = logits[..., :-1, :].contiguous()
|
|
shift_labels = labels[..., 1:].contiguous()
|
|
# Flatten the tokens
|
|
loss_fct = CrossEntropyLoss()
|
|
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
|
shift_labels = shift_labels.view(-1)
|
|
# Enable model/pipeline parallelism
|
|
shift_labels = shift_labels.to(shift_logits.device)
|
|
loss = loss_fct(shift_logits, shift_labels)
|
|
|
|
if not return_dict:
|
|
output = (logits,) + outputs[1:]
|
|
return (loss,) + output if loss is not None else output
|
|
|
|
return CausalLMOutputWithPast(
|
|
loss=loss,
|
|
logits=logits,
|
|
past_key_values=outputs.past_key_values,
|
|
hidden_states=outputs.hidden_states,
|
|
attentions=outputs.attentions,
|
|
)
|
|
|
|
|
|
AutoConfig.register("mplug_owl2", MPLUGOwl2Config)
|
|
AutoModelForCausalLM.register(MPLUGOwl2Config, MPLUGOwl2LlamaForCausalLM)
|
|
AutoConfig.register("mplug_owl2_1", MPLUGOwl2QwenConfig)
|
|
AutoModelForCausalLM.register(MPLUGOwl2QwenConfig, MPLUGOwl2QWenForCausalLM)
|
|
|
|
replace_llama_modality_adaptive()
|
|
|
|
if __name__ == "__main__":
|
|
config = MPLUGOwl2Config.from_pretrained('/cpfs01/shared/public/test/vicuna-7b-v1.5/')
|
|
from icecream import ic
|
|
|
|
# config = MPLUGOwl2Config()
|
|
model = MPLUGOwl2LlamaForCausalLM(config)
|
|
|
|
images = torch.randn(2, 3, 448, 448)
|
|
input_ids = torch.cat([
|
|
torch.ones(8).long(), torch.tensor([-1] * 1).long(), torch.ones(8).long(), torch.tensor([-1] * 1).long(),
|
|
torch.ones(8).long()
|
|
], dim=0).unsqueeze(0)
|
|
labels = input_ids.clone()
|
|
labels[labels < 0] = -100
|
|
|
|
# image_feature = model.encode_images(images)
|
|
# ic(image_feature.shape)
|
|
|
|
output = model(images=images, input_ids=input_ids, labels=labels)
|
|
ic(output.loss)
|
|
ic(output.logits.shape)
|
|
|
|
model.save_pretrained('/cpfs01/shared/public/test/tmp_owl')
|