runtime error

ipelines/pixart_alpha/pipeline_pixart_alpha.py", line 371, in encode_prompt prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "/usr/local/lib/python3.10/site-packages/transformers/models/t5/modeling_t5.py", line 1975, in forward encoder_outputs = self.encoder( File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "/usr/local/lib/python3.10/site-packages/transformers/models/t5/modeling_t5.py", line 1110, in forward layer_outputs = layer_module( File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "/usr/local/lib/python3.10/site-packages/transformers/models/t5/modeling_t5.py", line 694, in forward self_attention_outputs = self.layer[0]( File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "/usr/local/lib/python3.10/site-packages/transformers/models/t5/modeling_t5.py", line 601, in forward attention_output = self.SelfAttention( File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "/usr/local/lib/python3.10/site-packages/transformers/models/t5/modeling_t5.py", line 520, in forward query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/linear.py", line 114, in forward return F.linear(input, self.weight, self.bias) RuntimeError: "addmm_impl_cpu_" not implemented for 'Half'

Container logs:

Fetching error logs...