|
| 1 | +# coding=utf-8 |
| 2 | +# Adapted from |
| 3 | +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/gptj/modeling_gptj.py |
| 4 | +# Copyright 2023 The vLLM team. |
| 5 | +# Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved. |
| 6 | +# |
| 7 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 8 | +# you may not use this file except in compliance with the License. |
| 9 | +# You may obtain a copy of the License at |
| 10 | +# |
| 11 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 12 | +# |
| 13 | +# Unless required by applicable law or agreed to in writing, software |
| 14 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 15 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 16 | +# See the License for the specific language governing permissions and |
| 17 | +# limitations under the License. |
| 18 | +"""Inference-only GPT-J model compatible with HuggingFace weights. |
| 19 | +
|
| 20 | +The input of the model is flattened to a 1D tensor of tokens. The model uses |
| 21 | +InputMetadata to extract the original 2D shape of the input. |
| 22 | +""" |
| 23 | +from typing import Dict, List, Optional, Tuple |
| 24 | + |
| 25 | +import torch |
| 26 | +from torch import nn |
| 27 | +from transformers import GPTJConfig |
| 28 | + |
| 29 | +from vllm.model_executor.input_metadata import InputMetadata |
| 30 | +from vllm.model_executor.layers.activation import get_act_fn |
| 31 | +from vllm.model_executor.layers.attention import PagedAttentionWithRoPE |
| 32 | +from vllm.model_executor.layers.sampler import Sampler |
| 33 | +from vllm.model_executor.weight_utils import (hf_model_weights_iterator, |
| 34 | + load_tensor_parallel_weights) |
| 35 | +from vllm.model_executor.parallel_utils.parallel_state import ( |
| 36 | + get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size) |
| 37 | +from vllm.model_executor.parallel_utils.tensor_parallel import ( |
| 38 | + VocabParallelEmbedding, ColumnParallelLinear, RowParallelLinear) |
| 39 | +from vllm.sequence import SequenceOutputs |
| 40 | + |
| 41 | +KVCache = Tuple[torch.Tensor, torch.Tensor] |
| 42 | + |
| 43 | + |
| 44 | +class GPTJAttention(nn.Module): |
| 45 | + |
| 46 | + def __init__(self, config: GPTJConfig): |
| 47 | + super().__init__() |
| 48 | + self.total_num_heads = config.num_attention_heads |
| 49 | + self.hidden_size = config.hidden_size |
| 50 | + self.head_size = self.hidden_size // self.total_num_heads |
| 51 | + |
| 52 | + self.qkv_proj = ColumnParallelLinear(config.hidden_size, |
| 53 | + 3 * config.hidden_size, |
| 54 | + bias=False, |
| 55 | + gather_output=False, |
| 56 | + perform_initialization=False) |
| 57 | + self.out_proj = RowParallelLinear(config.hidden_size, |
| 58 | + config.hidden_size, |
| 59 | + bias=False, |
| 60 | + input_is_parallel=True, |
| 61 | + perform_initialization=False) |
| 62 | + |
| 63 | + tp_world_size = get_tensor_model_parallel_world_size() |
| 64 | + assert self.total_num_heads % tp_world_size == 0 |
| 65 | + self.num_heads = self.total_num_heads // tp_world_size |
| 66 | + |
| 67 | + scaling = self.head_size**-0.5 |
| 68 | + assert config.rotary |
| 69 | + assert config.rotary_dim % 2 == 0 |
| 70 | + self.attn = PagedAttentionWithRoPE(self.num_heads, self.head_size, |
| 71 | + scaling, config.rotary_dim) |
| 72 | + self.warmup = False |
| 73 | + |
| 74 | + def forward( |
| 75 | + self, |
| 76 | + position_ids: torch.Tensor, |
| 77 | + hidden_states: torch.Tensor, |
| 78 | + kv_cache: KVCache, |
| 79 | + input_metadata: InputMetadata, |
| 80 | + cache_event: Optional[torch.cuda.Event], |
| 81 | + ) -> torch.Tensor: |
| 82 | + qkv, _ = self.qkv_proj(hidden_states) |
| 83 | + q, k, v = qkv.chunk(chunks=3, dim=-1) |
| 84 | + k_cache, v_cache = kv_cache |
| 85 | + attn_output = self.attn(position_ids, q, k, v, k_cache, v_cache, |
| 86 | + input_metadata, cache_event) |
| 87 | + attn_output, _ = self.out_proj(attn_output) |
| 88 | + return attn_output |
| 89 | + |
| 90 | + |
| 91 | +class GPTJMLP(nn.Module): |
| 92 | + |
| 93 | + def __init__(self, intermediate_size: int, config: GPTJConfig): |
| 94 | + super().__init__() |
| 95 | + hidden_size = config.n_embd |
| 96 | + self.fc_in = ColumnParallelLinear(hidden_size, |
| 97 | + intermediate_size, |
| 98 | + gather_output=False, |
| 99 | + perform_initialization=False) |
| 100 | + self.fc_out = RowParallelLinear(intermediate_size, |
| 101 | + hidden_size, |
| 102 | + input_is_parallel=True, |
| 103 | + perform_initialization=False) |
| 104 | + self.act = get_act_fn(config.activation_function) |
| 105 | + |
| 106 | + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| 107 | + hidden_states, _ = self.fc_in(hidden_states) |
| 108 | + hidden_states = self.act(hidden_states) |
| 109 | + hidden_states, _ = self.fc_out(hidden_states) |
| 110 | + return hidden_states |
| 111 | + |
| 112 | + |
| 113 | +class GPTJBlock(nn.Module): |
| 114 | + |
| 115 | + def __init__(self, config: GPTJConfig): |
| 116 | + super().__init__() |
| 117 | + if config.n_inner is None: |
| 118 | + inner_dim = 4 * config.n_embd |
| 119 | + else: |
| 120 | + inner_dim = config.n_inner |
| 121 | + self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) |
| 122 | + self.attn = GPTJAttention(config) |
| 123 | + self.mlp = GPTJMLP(inner_dim, config) |
| 124 | + |
| 125 | + def forward( |
| 126 | + self, |
| 127 | + position_ids: torch.Tensor, |
| 128 | + hidden_states: torch.Tensor, |
| 129 | + kv_cache: KVCache, |
| 130 | + input_metadata: InputMetadata, |
| 131 | + cache_event: Optional[torch.cuda.Event], |
| 132 | + ) -> torch.Tensor: |
| 133 | + residual = hidden_states |
| 134 | + hidden_states = self.ln_1(hidden_states) |
| 135 | + attn_output = self.attn( |
| 136 | + position_ids=position_ids, |
| 137 | + hidden_states=hidden_states, |
| 138 | + kv_cache=kv_cache, |
| 139 | + input_metadata=input_metadata, |
| 140 | + cache_event=cache_event, |
| 141 | + ) |
| 142 | + mlp_output = self.mlp(hidden_states) |
| 143 | + hidden_states = attn_output + mlp_output + residual |
| 144 | + return hidden_states |
| 145 | + |
| 146 | + |
| 147 | +class GPTJModel(nn.Module): |
| 148 | + |
| 149 | + def __init__(self, config: GPTJConfig): |
| 150 | + super().__init__() |
| 151 | + self.config = config |
| 152 | + self.embed_dim = config.n_embd |
| 153 | + self.wte = VocabParallelEmbedding(config.vocab_size, |
| 154 | + self.embed_dim, |
| 155 | + perform_initialization=False) |
| 156 | + self.h = nn.ModuleList( |
| 157 | + [GPTJBlock(config) for _ in range(config.n_layer)]) |
| 158 | + self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) |
| 159 | + |
| 160 | + def forward( |
| 161 | + self, |
| 162 | + input_ids: torch.Tensor, |
| 163 | + position_ids: torch.Tensor, |
| 164 | + kv_caches: List[KVCache], |
| 165 | + input_metadata: InputMetadata, |
| 166 | + cache_events: Optional[List[torch.cuda.Event]], |
| 167 | + ) -> torch.Tensor: |
| 168 | + hidden_states = self.wte(input_ids) |
| 169 | + for i in range(len(self.h)): |
| 170 | + if cache_events is None: |
| 171 | + cache_event = None |
| 172 | + else: |
| 173 | + cache_event = cache_events[i] |
| 174 | + layer = self.h[i] |
| 175 | + hidden_states = layer( |
| 176 | + position_ids, |
| 177 | + hidden_states, |
| 178 | + kv_caches[i], |
| 179 | + input_metadata, |
| 180 | + cache_event, |
| 181 | + ) |
| 182 | + hidden_states = self.ln_f(hidden_states) |
| 183 | + return hidden_states |
| 184 | + |
| 185 | + |
| 186 | +class GPTJForCausalLM(nn.Module): |
| 187 | + |
| 188 | + def __init__(self, config: GPTJConfig): |
| 189 | + super().__init__() |
| 190 | + self.config = config |
| 191 | + assert not config.tie_word_embeddings |
| 192 | + self.transformer = GPTJModel(config) |
| 193 | + self.lm_head = ColumnParallelLinear(config.n_embd, |
| 194 | + config.vocab_size, |
| 195 | + gather_output=False, |
| 196 | + perform_initialization=False) |
| 197 | + self.sampler = Sampler(config.vocab_size) |
| 198 | + |
| 199 | + def forward( |
| 200 | + self, |
| 201 | + input_ids: torch.Tensor, |
| 202 | + positions: torch.Tensor, |
| 203 | + kv_caches: List[KVCache], |
| 204 | + input_metadata: InputMetadata, |
| 205 | + cache_events: Optional[List[torch.cuda.Event]], |
| 206 | + ) -> Dict[int, SequenceOutputs]: |
| 207 | + hidden_states = self.transformer(input_ids, positions, kv_caches, |
| 208 | + input_metadata, cache_events) |
| 209 | + next_tokens = self.sampler(self.lm_head.weight, hidden_states, |
| 210 | + input_metadata, self.lm_head.bias) |
| 211 | + return next_tokens |
| 212 | + |
| 213 | + _column_parallel_weights = [ |
| 214 | + "wte.weight", "fc_in.weight", "fc_in.bias", "lm_head.weight", |
| 215 | + "lm_head.bias" |
| 216 | + ] |
| 217 | + _row_parallel_weights = ["out_proj.weight", "fc_out.weight"] |
| 218 | + |
| 219 | + def load_weights(self, |
| 220 | + model_name_or_path: str, |
| 221 | + cache_dir: Optional[str] = None, |
| 222 | + use_np_cache: bool = False): |
| 223 | + tp_rank = get_tensor_model_parallel_rank() |
| 224 | + state_dict = self.state_dict() |
| 225 | + for name, loaded_weight in hf_model_weights_iterator( |
| 226 | + model_name_or_path, cache_dir, use_np_cache): |
| 227 | + if "attn.bias" in name or "attn.masked_bias" in name: |
| 228 | + continue |
| 229 | + |
| 230 | + is_attention_weight = False |
| 231 | + for stride_id, att_weight_name in enumerate( |
| 232 | + ["q_proj", "k_proj", "v_proj"]): |
| 233 | + if att_weight_name not in name: |
| 234 | + continue |
| 235 | + param = state_dict[name.replace(att_weight_name, "qkv_proj")] |
| 236 | + shard_size = param.shape[1] |
| 237 | + loaded_weight = loaded_weight[shard_size * tp_rank:shard_size * |
| 238 | + (tp_rank + 1)] |
| 239 | + param_slice = param.data[shard_size * stride_id:shard_size * |
| 240 | + (stride_id + 1)] |
| 241 | + assert param_slice.shape == loaded_weight.shape |
| 242 | + param_slice.copy_(loaded_weight) |
| 243 | + is_attention_weight = True |
| 244 | + break |
| 245 | + if is_attention_weight: |
| 246 | + continue |
| 247 | + |
| 248 | + param = state_dict[name] |
| 249 | + load_tensor_parallel_weights(param, loaded_weight, name, |
| 250 | + self._column_parallel_weights, |
| 251 | + self._row_parallel_weights, tp_rank) |
0 commit comments