jupyterjazz commited on
Commit
ae40cb9
1 Parent(s): 3eb20d0

fix: 0 is not none

Browse files

Signed-off-by: jupyterjazz <[email protected]>

Files changed (4) hide show
  1. mha.py +1 -1
  2. mlp.py +1 -1
  3. modeling_lora.py +2 -2
  4. modeling_xlm_roberta.py +2 -2
mha.py CHANGED
@@ -646,7 +646,7 @@ class MHA(nn.Module):
646
  if not self.cross_attn and self.num_heads_kv == self.num_heads:
647
  assert x_kv is None and mixer_subset is None
648
  lora_kwargs = {}
649
- if task:
650
  lora_kwargs['task'] = task
651
  lora_kwargs['residual'] = self.return_residual
652
 
 
646
  if not self.cross_attn and self.num_heads_kv == self.num_heads:
647
  assert x_kv is None and mixer_subset is None
648
  lora_kwargs = {}
649
+ if task is not None:
650
  lora_kwargs['task'] = task
651
  lora_kwargs['residual'] = self.return_residual
652
 
mlp.py CHANGED
@@ -49,7 +49,7 @@ class Mlp(nn.Module):
49
 
50
  def forward(self, x, task):
51
  lora_kwargs = {}
52
- if task:
53
  lora_kwargs['task'] = task
54
  y = self.fc1(x, **lora_kwargs)
55
  y = self.activation(y)
 
49
 
50
  def forward(self, x, task):
51
  lora_kwargs = {}
52
+ if task is not None:
53
  lora_kwargs['task'] = task
54
  y = self.fc1(x, **lora_kwargs)
55
  y = self.activation(y)
modeling_lora.py CHANGED
@@ -181,7 +181,7 @@ class LoRAParametrization(nn.Module):
181
 
182
  def new_forward(self, input, task, residual=False):
183
  task_idx = adaptation_map[task] if task else None
184
- if task_idx:
185
  weights = self.parametrizations.weight[0].lora_forward(self.weight, current_task=task_idx)
186
  else:
187
  weights = self.weight
@@ -210,7 +210,7 @@ class LoRAParametrization(nn.Module):
210
 
211
  def new_forward(self, input, task):
212
  task_idx = adaptation_map[task] if task else None
213
- if task_idx:
214
  weights = self.parametrizations.weight[0].lora_forward(self.weight, current_task=task_idx)
215
  else:
216
  weights = self.weight
 
181
 
182
  def new_forward(self, input, task, residual=False):
183
  task_idx = adaptation_map[task] if task else None
184
+ if task_idx is not None:
185
  weights = self.parametrizations.weight[0].lora_forward(self.weight, current_task=task_idx)
186
  else:
187
  weights = self.weight
 
210
 
211
  def new_forward(self, input, task):
212
  task_idx = adaptation_map[task] if task else None
213
+ if task_idx is not None:
214
  weights = self.parametrizations.weight[0].lora_forward(self.weight, current_task=task_idx)
215
  else:
216
  weights = self.weight
modeling_xlm_roberta.py CHANGED
@@ -314,7 +314,7 @@ class XLMRobertaPooler(nn.Module):
314
  # We "pool" the model by simply taking the hidden state corresponding
315
  # to the first token.
316
  lora_kwargs = {}
317
- if task:
318
  lora_kwargs['task'] = task
319
 
320
  first_token_tensor = hidden_states[:, 0] if pool else hidden_states
@@ -551,7 +551,7 @@ class XLMRobertaModel(XLMRobertaPreTrainedModel):
551
  else:
552
  range_iter = range(0, len(sentences), batch_size)
553
  lora_kwargs = {}
554
- if task:
555
  lora_kwargs['task'] = task
556
  for i in range_iter:
557
  encoded_input = self.tokenizer(
 
314
  # We "pool" the model by simply taking the hidden state corresponding
315
  # to the first token.
316
  lora_kwargs = {}
317
+ if task is not None:
318
  lora_kwargs['task'] = task
319
 
320
  first_token_tensor = hidden_states[:, 0] if pool else hidden_states
 
551
  else:
552
  range_iter = range(0, len(sentences), batch_size)
553
  lora_kwargs = {}
554
+ if task is not None:
555
  lora_kwargs['task'] = task
556
  for i in range_iter:
557
  encoded_input = self.tokenizer(