AttributeError: cannot assign module before Module.__init__() call

原因

调用了自定义的类,但是在自定义的类的__init__函数下面没有写super( XXX, self ).init()

错误案例

import torch
import torch.nn as nn

class SelfAttention(nn.Module):
    """ Self-Attention """

    def __init__(self, n_head, d_k, d_v, d_x, d_o):
        #super(SelfAttention, self).__init__()#SelfAttention 是类名
        self.wq = nn.Parameter(torch.Tensor(d_x, d_k))
        self.wk = nn.Parameter(torch.Tensor(d_x, d_k))
        self.wv = nn.Parameter(torch.Tensor(d_x, d_v))

        self.mha = MultiHeadAttention(n_head=n_head, d_k_=d_k, d_v_=d_v, d_k=d_k, d_v=d_v, d_o=d_o)

        self.init_parameters()

    def init_parameters(self):
        for param in self.parameters():
            stdv = 1. / np.power(param.size(-1), 0.5)
            param.data.uniform_(-stdv, stdv)

    def forward(self, x, mask=None):
        q = torch.matmul(x, self.wq)   
        k = torch.matmul(x, self.wk)
        v = torch.matmul(x, self.wv)

        attn, output = self.mha(q, k, v, mask=mask)

        return attn, output


if __name__ == "__main__":
    n_x = 4
    d_x = 80
    batch = 2
    x = torch.randn(batch, n_x, d_x)
    mask = torch.zeros(batch, n_x, n_x).bool()

    selfattn = SelfAttention(n_head=8, d_k=128, d_v=64, d_x=80, d_o=80)
    attn, output = selfattn(x, mask=mask)

    print(attn.size())
    print(output.size())

在这里插入图片描述

正确案例

import torch
import torch.nn as nn

class SelfAttention(nn.Module):
    """ Self-Attention """

    def __init__(self, n_head, d_k, d_v, d_x, d_o):
        super(SelfAttention, self).__init__()#SelfAttention 是类名
        self.wq = nn.Parameter(torch.Tensor(d_x, d_k))
        self.wk = nn.Parameter(torch.Tensor(d_x, d_k))
        self.wv = nn.Parameter(torch.Tensor(d_x, d_v))

        self.mha = MultiHeadAttention(n_head=n_head, d_k_=d_k, d_v_=d_v, d_k=d_k, d_v=d_v, d_o=d_o)

        self.init_parameters()

    def init_parameters(self):
        for param in self.parameters():
            stdv = 1. / np.power(param.size(-1), 0.5)
            param.data.uniform_(-stdv, stdv)

    def forward(self, x, mask=None):
        q = torch.matmul(x, self.wq)   
        k = torch.matmul(x, self.wk)
        v = torch.matmul(x, self.wv)

        attn, output = self.mha(q, k, v, mask=mask)

        return attn, output


if __name__ == "__main__":
    n_x = 4
    d_x = 80
    batch = 2
    x = torch.randn(batch, n_x, d_x)
    mask = torch.zeros(batch, n_x, n_x).bool()

    selfattn = SelfAttention(n_head=8, d_k=128, d_v=64, d_x=80, d_o=80)
    attn, output = selfattn(x, mask=mask)

    print(attn.size())
    print(output.size())

在这里插入图片描述

相关推荐

最近更新

  1. docker php8.1+nginx base 镜像 dockerfile 配置

    2024-03-15 05:44:02       98 阅读
  2. Could not load dynamic library ‘cudart64_100.dll‘

    2024-03-15 05:44:02       106 阅读
  3. 在Django里面运行非项目文件

    2024-03-15 05:44:02       87 阅读
  4. Python语言-面向对象

    2024-03-15 05:44:02       96 阅读

热门阅读

  1. exec 和 xargs 命令的用法区别,优缺点

    2024-03-15 05:44:02       48 阅读
  2. git基础命令(二)

    2024-03-15 05:44:02       59 阅读
  3. 使用docker搭建mongodb

    2024-03-15 05:44:02       36 阅读
  4. Linux系统之部署react-tetris俄罗斯方块小游戏

    2024-03-15 05:44:02       36 阅读
  5. uniapp顶部状态栏设置(适配刘海屏)

    2024-03-15 05:44:02       44 阅读
  6. 计算机网络之网络层概念整理(上)

    2024-03-15 05:44:02       35 阅读
  7. Spring中的注释

    2024-03-15 05:44:02       37 阅读
  8. SpringCloudGateway之限流集成篇

    2024-03-15 05:44:02       39 阅读