yfdeng commited on
Commit
ff1db35
·
1 Parent(s): 4f39c09

disable diffusion to avoid hf version bug

Browse files
Files changed (1) hide show
  1. Anymate/model.py +15 -15
Anymate/model.py CHANGED
@@ -4,11 +4,11 @@ from ThirdParty.michelangelo.utils.misc import get_config_from_file, instantiate
4
  # from ThirdParty.PointLLM.pointllm.model.pointllm import PointLLMLlamaForCausalLM
5
  from ThirdParty.michelangelo.models.modules.distributions import DiagonalGaussianDistribution
6
  from ThirdParty.michelangelo.models.modules.embedder import components_from_spherical_harmonics
7
- from Anymate.utils.diffusion_encoder import TransformerEncoder
8
  from Anymate.models.joint import TransformerDecoder, ImplicitTransformerDecoder, TriPlaneDecoder
9
  from Anymate.models.conn import AttendjointsDecoder_con_combine, AttendjointsDecoder_con_token
10
  from Anymate.models.skin import AttendjointsDecoder_combine, AttendjointsDecoder_multi
11
- from Anymate.models.diffusion import Pointe_Diffusion, Cross_Attention_Diffusion
12
 
13
  class Encoder(nn.Module):
14
  def __init__(self,
@@ -172,15 +172,15 @@ class EncoderDecoder(nn.Module):
172
  synthesis_kwargs = {'num_fp16_res': 0, 'conv_clamp': None, 'fused_modconv_default': 'inference_only'}
173
  )
174
 
175
- elif decoder == 'Pointe_Diffusion':
176
- self.only_embed = False
177
- self.return_latents = True
178
- self.decoder = Pointe_Diffusion(**kwargs)
179
 
180
- elif decoder == 'Cross_Attention_Diffusion':
181
- self.only_embed = False
182
- self.return_latents = True
183
- self.decoder = Cross_Attention_Diffusion(**kwargs)
184
 
185
  elif decoder == 'attendjoints_combine':
186
  self.only_embed = False
@@ -294,11 +294,11 @@ class EncoderDecoder(nn.Module):
294
  ),
295
  nn.Linear(513, 1, dtype=dtype)
296
  ])
297
- if encoder == 'transformer':
298
- self.points_cloud_embed = nn.Linear(
299
- 768, 768, device=device, dtype=dtype
300
- )
301
- self.encoder = TransformerEncoder(device=device,dtype=dtype, num_latents=kwargs['num_latents'])
302
 
303
 
304
 
 
4
  # from ThirdParty.PointLLM.pointllm.model.pointllm import PointLLMLlamaForCausalLM
5
  from ThirdParty.michelangelo.models.modules.distributions import DiagonalGaussianDistribution
6
  from ThirdParty.michelangelo.models.modules.embedder import components_from_spherical_harmonics
7
+ # from Anymate.utils.diffusion_encoder import TransformerEncoder
8
  from Anymate.models.joint import TransformerDecoder, ImplicitTransformerDecoder, TriPlaneDecoder
9
  from Anymate.models.conn import AttendjointsDecoder_con_combine, AttendjointsDecoder_con_token
10
  from Anymate.models.skin import AttendjointsDecoder_combine, AttendjointsDecoder_multi
11
+ # from Anymate.models.diffusion import Pointe_Diffusion, Cross_Attention_Diffusion
12
 
13
  class Encoder(nn.Module):
14
  def __init__(self,
 
172
  synthesis_kwargs = {'num_fp16_res': 0, 'conv_clamp': None, 'fused_modconv_default': 'inference_only'}
173
  )
174
 
175
+ # elif decoder == 'Pointe_Diffusion':
176
+ # self.only_embed = False
177
+ # self.return_latents = True
178
+ # self.decoder = Pointe_Diffusion(**kwargs)
179
 
180
+ # elif decoder == 'Cross_Attention_Diffusion':
181
+ # self.only_embed = False
182
+ # self.return_latents = True
183
+ # self.decoder = Cross_Attention_Diffusion(**kwargs)
184
 
185
  elif decoder == 'attendjoints_combine':
186
  self.only_embed = False
 
294
  ),
295
  nn.Linear(513, 1, dtype=dtype)
296
  ])
297
+ # if encoder == 'transformer':
298
+ # self.points_cloud_embed = nn.Linear(
299
+ # 768, 768, device=device, dtype=dtype
300
+ # )
301
+ # self.encoder = TransformerEncoder(device=device,dtype=dtype, num_latents=kwargs['num_latents'])
302
 
303
 
304