PVT:引入金字塔结构的视觉 Transformer
引入
原版的 Vision Transformer 模型是柱状结构的,意味着模型只能输出一个层级的特征PVT(Pyramid Vision Transformer)模型通过引入金字塔结构实现了多个不同层级的特征输出使得其能够更加高效地处理高分辨率的图像,也能无缝的接入各种下游任务本次就使用 Paddle 来实现 PVT 模型,并加载最新预训练模型对齐模型精度当然这个模型也已经加入 Paddle-Image-Models 豪华套餐,欢迎使用 PPIM 加载并使用该模型相关资料
论文:Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions最新项目:whai362/PVT解读文章:大白话 Pyramid Vision Transformer金字塔结构
计算机视觉中 cnn backbone 经过多年的发展,沉淀了一些通用的设计模式
免费影视、动漫、音乐、游戏、小说资源长期稳定更新! 👉 点此立即查看 👈
最为典型的就是金字塔结构
简单的概括就是:
feature map 的分辨率随着网络加深,逐渐减小feature map 的通道数随着网络加深,逐渐增大大致的结构图如下:
PVT 模型
简单概括 PVT 模型的最大改变,就是在每个 Stage 中通过 Patch Embedding 来逐渐降低输入的分辨率
模型结构图如下:
除此之外,为了在保证 feature map 分辨率和全局感受野的同时降低计算量,模型也对 Attention 的方式做了一定的修改
即把 key(K)和 value(V)的长和宽分别缩小到以前的 1/R_i
Attention 的结构图如下:
模型性能精度表如下:
模型搭建
依赖安装
本模型基于 ViT 模型搭建,所以需要依赖 PPIM 中的 ViT 模型In [1]!pip install ppim==1.0.6登录后复制
模块修改
基于 ViT 模型,对其中的 Attention、Block 和 Patch Embedded 模块进行修改In [2]import numpy as npimport paddleimport paddle.nn as nnimport ppim.models.vit as vitfrom ppim.models.vit import trunc_normal_, zeros_, ones_# 修改版 Attentionclass Attention(nn.Layer): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1): super().__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.q = nn.Linear(dim, dim, bias_attr=qkv_bias) self.kv = nn.Linear(dim, dim * 2, bias_attr=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.sr_ratio = sr_ratio if sr_ratio > 1: self.sr = nn.Conv2D( dim, dim, kernel_size=sr_ratio, stride=sr_ratio) self.norm = nn.LayerNorm(dim) def forward(self, x, H, W): B, N, C = x.shape q = self.q(x).reshape((B, N, self.num_heads, C // self.num_heads)).transpose((0, 2, 1, 3)) if self.sr_ratio > 1: x_ = x.transpose((0, 2, 1)).reshape((B, C, H, W)) x_ = self.sr(x_).reshape((B, C, -1)).transpose((0, 2, 1)) x_ = self.norm(x_) kv = self.kv(x_).reshape((B, -1, 2, self.num_heads, C // self.num_heads)).transpose((2, 0, 3, 1, 4)) else: kv = self.kv(x).reshape((B, -1, 2, self.num_heads, C // self.num_heads)).transpose((2, 0, 3, 1, 4)) k, v = kv[0], kv[1] attn = (q.matmul(k.transpose((0, 1, 3, 2)))) * self.scale attn = nn.functional.softmax(attn, axis=-1) attn = self.attn_drop(attn) x = (attn.matmul(v)).transpose((0, 2, 1, 3)).reshape((-1, N, C)) x = self.proj(x) x = self.proj_drop(x) return x# 替换 ViT Block 中的 Attentionclass Block(vit.Block): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, epsilon=1e-6, sr_ratio=1): super(Block, self).__init__(dim, num_heads, mlp_ratio, qkv_bias, qk_scale, drop, attn_drop, drop_path, act_layer, norm_layer, epsilon) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio) def forward(self, x, H, W): x = x + self.drop_path(self.attn(self.norm1(x), H, W)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x# 向 ViT PatchEmbed 中添加一个 LN 层class PatchEmbed(vit.PatchEmbed): def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super(PatchEmbed, self).__init__( img_size, patch_size, in_chans, embed_dim) self.norm = nn.LayerNorm(embed_dim) def forward(self, x): B, C, H, W = x.shape x = self.proj(x).flatten(2).transpose((0, 2, 1)) x = self.norm(x) H, W = H // self.patch_size[0], W // self.patch_size[1] return x, (H, W)登录后复制 构建 PVT 模型
In [3]# 替换 Block 和 Patch Embedded# 每个 Stage 前加入 Patch Embeddedclass PyramidVisionTransformer(nn.Layer): def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dims=[64, 128, 256, 512], num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, epsilon=1e-6, depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], class_dim=1000): super().__init__() self.class_dim = class_dim self.depths = depths # patch_embed self.patch_embed1 = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dims[0]) self.patch_embed2 = PatchEmbed(img_size=img_size // 4, patch_size=2, in_chans=embed_dims[0], embed_dim=embed_dims[1]) self.patch_embed3 = PatchEmbed(img_size=img_size // 8, patch_size=2, in_chans=embed_dims[1], embed_dim=embed_dims[2]) self.patch_embed4 = PatchEmbed(img_size=img_size // 16, patch_size=2, in_chans=embed_dims[2], embed_dim=embed_dims[3]) # pos_embed self.pos_embed1 = self.create_parameter( shape=(1, self.patch_embed1.num_patches, embed_dims[0]), default_initializer=zeros_) self.add_parameter("pos_embed1", self.pos_embed1) self.pos_drop1 = nn.Dropout(p=drop_rate) self.pos_embed2 = self.create_parameter( shape=(1, self.patch_embed2.num_patches, embed_dims[1]), default_initializer=zeros_) self.add_parameter("pos_embed2", self.pos_embed2) self.pos_drop2 = nn.Dropout(p=drop_rate) self.pos_embed3 = self.create_parameter( shape=(1, self.patch_embed3.num_patches, embed_dims[2]), default_initializer=zeros_) self.add_parameter("pos_embed3", self.pos_embed3) self.pos_drop3 = nn.Dropout(p=drop_rate) self.pos_embed4 = self.create_parameter( shape=(1, self.patch_embed4.num_patches + 1, embed_dims[3]), default_initializer=zeros_) self.add_parameter("pos_embed4", self.pos_embed4) self.pos_drop4 = nn.Dropout(p=drop_rate) # transformer encoder dpr = np.linspace(0, drop_path_rate, sum(depths)) cur = 0 self.block1 = nn.LayerList([Block( dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, epsilon=epsilon, sr_ratio=sr_ratios[0]) for i in range(depths[0])]) cur += depths[0] self.block2 = nn.LayerList([Block( dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, epsilon=epsilon, sr_ratio=sr_ratios[1]) for i in range(depths[1])]) cur += depths[1] self.block3 = nn.LayerList([Block( dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, epsilon=epsilon, sr_ratio=sr_ratios[2]) for i in range(depths[2])]) cur += depths[2] self.block4 = nn.LayerList([Block( dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, epsilon=epsilon, sr_ratio=sr_ratios[3]) for i in range(depths[3])]) self.norm = norm_layer(embed_dims[3]) # cls_token self.cls_token = self.create_parameter( shape=(1, 1, embed_dims[3]), default_initializer=zeros_) self.add_parameter("cls_token", self.cls_token) # classification head if class_dim > 0: self.head = nn.Linear(embed_dims[3], class_dim) # init weights trunc_normal_(self.pos_embed1) trunc_normal_(self.pos_embed2) trunc_normal_(self.pos_embed3) trunc_normal_(self.pos_embed4) trunc_normal_(self.cls_token) self.apply(self._init_weights) def reset_drop_path(self, drop_path_rate): dpr = np.linspace(0, drop_path_rate, sum(self.depths)) cur = 0 for i in range(self.depths[0]): self.block1[i].drop_path.drop_prob = dpr[cur + i] cur += self.depths[0] for i in range(self.depths[1]): self.block2[i].drop_path.drop_prob = dpr[cur + i] cur += self.depths[1] for i in range(self.depths[2]): self.block3[i].drop_path.drop_prob = dpr[cur + i] cur += self.depths[2] for i in range(self.depths[3]): self.block4[i].drop_path.drop_prob = dpr[cur + i] def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight) if isinstance(m, nn.Linear) and m.bias is not None: zeros_(m.bias) elif isinstance(m, nn.LayerNorm): zeros_(m.bias) ones_(m.weight) def forward_features(self, x): B = x.shape[0] # stage 1 x, (H, W) = self.patch_embed1(x) x = x + self.pos_embed1 x = self.pos_drop1(x) for blk in self.block1: x = blk(x, H, W) x = x.reshape((B, H, W, -1)).transpose((0, 3, 1, 2)) # stage 2 x, (H, W) = self.patch_embed2(x) x = x + self.pos_embed2 x = self.pos_drop2(x) for blk in self.block2: x = blk(x, H, W) x = x.reshape((B, H, W, -1)).transpose((0, 3, 1, 2)) # stage 3 x, (H, W) = self.patch_embed3(x) x = x + self.pos_embed3 x = self.pos_drop3(x) for blk in self.block3: x = blk(x, H, W) x = x.reshape((B, H, W, -1)).transpose((0, 3, 1, 2)) # stage 4 x, (H, W) = self.patch_embed4(x) cls_tokens = self.cls_token.expand((B, -1, -1)) x = paddle.concat((cls_tokens, x), axis=1) x = x + self.pos_embed4 x = self.pos_drop4(x) for blk in self.block4: x = blk(x, H, W) x = self.norm(x) return x[:, 0] def forward(self, x): x = self.forward_features(x) if self.class_dim > 0: x = self.head(x) return xdef pvt_ti(**kwargs): model = PyramidVisionTransformer( patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True, norm_layer=nn.LayerNorm, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], **kwargs) return modeldef pvt_s(**kwargs): model = PyramidVisionTransformer( patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True, norm_layer=nn.LayerNorm, depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], **kwargs) return modeldef pvt_m(**kwargs): model = PyramidVisionTransformer( patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True, norm_layer=nn.LayerNorm, depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1], **kwargs) return modeldef pvt_l(**kwargs): model = PyramidVisionTransformer( patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True, norm_layer=nn.LayerNorm, depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], **kwargs) return model登录后复制 模型测试
In [5]# 实例化模型model = pvt_ti()# 测试模型前向计算out = model(paddle.randn((1, 3, 224, 224)))# 打印输出形状print(out.shape)登录后复制
[1, 1000]登录后复制
模型精度验证
解压数据集
In [ ]# 解压数据集!mkdir ~/data/ILSVRC2012!tar -xf ~/data/data68594/ILSVRC2012_img_val.tar -C ~/data/ILSVRC2012登录后复制
模型验证
In [2]import osimport cv2import numpy as npimport paddleimport paddle.vision.transforms as Tfrom ppim import pvt_lfrom PIL import Image# 构建数据集# backend cv2class ILSVRC2012(paddle.io.Dataset): def __init__(self, root, label_list, transform, backend='pil'): self.transform = transform self.root = root self.label_list = label_list self.backend = backend self.load_datas() def load_datas(self): self.imgs = [] self.labels = [] with open(self.label_list, 'r') as f: for line in f: img, label = line[:-1].split(' ') self.imgs.append(os.path.join(self.root, img)) self.labels.append(int(label)) def __getitem__(self, idx): label = self.labels[idx] image = self.imgs[idx] if self.backend=='cv2': image = cv2.imread(image) else: image = Image.open(image).convert('RGB') image = self.transform(image) return image.astype('float32'), np.array(label).astype('int64') def __len__(self): return len(self.imgs)# 配置模型model, val_transforms = pvt_l(pretrained=True)model = paddle.Model(model)model.prepare(metrics=paddle.metric.Accuracy(topk=(1, 5)))# 配置数据集val_dataset = ILSVRC2012('data/ILSVRC2012', transform=val_transforms, label_list='data/data68594/val_list.txt')# 模型验证model.evaluate(val_dataset, batch_size=128)登录后复制 {'acc_top1': 0.8174, 'acc_top5': 0.95874}登录后复制 代码解释
游乐网为非赢利性网站,所展示的游戏/软件/文章内容均来自于互联网或第三方用户上传分享,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系youleyoucom@outlook.com。
同类文章
工信部发布防范 OpenClaw(“龙虾”)开源智能体安全风险“六要六不要”建议
工信部发布“六要六不要”,为OpenClaw(“龙虾”)开源智能体安全风险划出红线 近日,工业和信息化部网络安全威胁和漏洞信息共享平台发布了一份重磅文件,针对当前热门的OpenClaw(因其图标酷似龙虾,业内常昵称为“龙虾”)开源智能体,提出了清晰的安全使用指引——“六要六不要”。这份建议可不是空穴
荣耀 CEO 李健:荣耀机器人全栈自研,将聚焦消费市场
荣耀CEO李健详解机器人战略:全栈自研,聚焦三大核心消费场景 荣耀春季旗舰新品发布会圆满结束后,关于公司未来发展的蓝图更加清晰。在随后的媒体沟通会上,荣耀CEO李健不仅公布了年度销售目标,更首次系统性地阐述了荣耀在机器人领域的完整战略规划与市场布局。 在探讨机器人业务发展方向时,李健明确了荣耀的坚定
别只盯着“上门装龙虾赚26万”!看懂OpenClaw背后的“意图入口”大战
别再只关注“上门装龙虾赚26万”!深度解读OpenClaw背后的“意图入口”新战争 最近科技行业的热潮,充满了戏剧性的现实色彩。一只“红色龙虾”AI智能体搅动了整个市场:有人通过提供安装服务,收取每次五百元,短短几天就赚取二十六万元收入;腾讯大厦前甚至排起长队,大家竞相领取免费的安装体验权限。这场全
openclaw安装配置
一、系统要求 在开始安装 OpenClaw 之前,请务必确认您的计算机满足以下最低配置要求。这如同搭建房屋前检查地基,是确保后续安装流程顺利、软件稳定运行的前提。更高的硬件配置将为复杂任务处理和流畅体验提供有力保障。 操作系统:支持 Windows 10 及以上版本、macOS 最新稳定版,以及主流
自研第一个SKILL-openclaw入门
自研第一个SKILL:手把手教你开发openclaw自定义技能 当你成功构建好openclaw之后,如何让它真正“智能”起来?关键在于为其开发SKILL——这些技能是openclaw的“内功心法”,决定了它能帮你做什么、做多好。 本文将带你亲自动手,从零开始开发你的第一个openclaw自定义技能,
- 日榜
- 周榜
- 月榜
相关攻略
2015-03-10 11:25
2015-03-10 11:05
2021-08-04 13:30
2015-03-10 11:22
2015-03-10 12:39
2022-05-16 18:57
2025-05-23 13:43
2025-05-23 14:01
热门教程
- 游戏攻略
- 安卓教程
- 苹果教程
- 电脑教程

