pythonday50

作业:

1.好好理解下resnet18的模型结构

2.尝试对vgg16+cbam进行微调策略

import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torchvision import models
from torch.utils.data import DataLoader
import time
import copy# Check for CUDA availability
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")# --- CBAM Module Implementation ---
class ChannelAttention(nn.Module):def __init__(self, in_planes, ratio=16):super(ChannelAttention, self).__init__()self.avg_pool = nn.AdaptiveAvgPool2d(1)self.max_pool = nn.AdaptiveMaxPool2d(1)self.fc = nn.Sequential(nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False),nn.ReLU(),nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False))self.sigmoid = nn.Sigmoid()def forward(self, x):avg_out = self.fc(self.avg_pool(x))max_out = self.fc(self.max_pool(x))out = avg_out + max_outreturn self.sigmoid(out)class SpatialAttention(nn.Module):def __init__(self, kernel_size=7):super(SpatialAttention, self).__init__()assert kernel_size in (3, 7), 'kernel size must be 3 or 7'padding = 3 if kernel_size == 7 else 1self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)self.sigmoid = nn.Sigmoid()def forward(self, x):avg_out = torch.mean(x, dim=1, keepdim=True)max_out, _ = torch.max(x, dim=1, keepdim=True)x_cat = torch.cat([avg_out, max_out], dim=1)x_att = self.conv1(x_cat)return self.sigmoid(x_att)class CBAM(nn.Module):def __init__(self, in_planes, ratio=16, kernel_size=7):super(CBAM, self).__init__()self.ca = ChannelAttention(in_planes, ratio)self.sa = SpatialAttention(kernel_size)def forward(self, x):x = x * self.ca(x)  # Apply channel attentionx = x * self.sa(x)  # Apply spatial attentionreturn x# --- VGG16 with CBAM Model ---
class VGG16_CBAM(nn.Module):def __init__(self, num_classes=10, pretrained=True):super(VGG16_CBAM, self).__init__()vgg_base = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1 if pretrained else None)self.features = vgg_base.features# CBAM module after feature extraction# VGG16 features output 512 channelsself.cbam = CBAM(in_planes=512) # Adaptive average pooling to get a fixed size output (e.g., 7x7 for original VGG classifier, or 1x1)# VGG's original avgpool is AdaptiveAvgPool2d(output_size=(7, 7))# If we keep this, input to classifier is 512 * 7 * 7self.avgpool = vgg_base.avgpool # Original VGG classifier:# (0): Linear(in_features=25088, out_features=4096, bias=True)# (1): ReLU(inplace=True)# (2): Dropout(p=0.5, inplace=False)# (3): Linear(in_features=4096, out_features=4096, bias=True)# (4): ReLU(inplace=True)# (5): Dropout(p=0.5, inplace=False)# (6): Linear(in_features=4096, out_features=1000, bias=True)# Modify the last layer of the classifier for the new number of classes# For CIFAR-10, num_classes = 10# The input to the classifier is 512 * 7 * 7 = 25088# Or if we change avgpool to nn.AdaptiveAvgPool2d((1,1)), then input is 512# Option 1: Keep original avgpool, input to classifier is 25088num_ftrs = vgg_base.classifier[0].in_features # Should be 25088# Option 2: Adapt for smaller feature map (e.g. 1x1 output from avgpool)# self.avgpool = nn.AdaptiveAvgPool2d((1,1)) # Output 512x1x1# num_ftrs = 512self.classifier = nn.Sequential(nn.Linear(num_ftrs, 4096),nn.ReLU(True),nn.Dropout(p=0.5),nn.Linear(4096, 4096),nn.ReLU(True),nn.Dropout(p=0.5),nn.Linear(4096, num_classes))# Initialize weights of the new classifier (optional, often helps)for m in self.classifier.modules():if isinstance(m, nn.Linear):nn.init.xavier_uniform_(m.weight)if m.bias is not None:nn.init.constant_(m.bias, 0)def forward(self, x):x = self.features(x)x = self.cbam(x)x = self.avgpool(x)x = torch.flatten(x, 1)x = self.classifier(x)return x
# --- Data Preparation ---
# CIFAR-10 specific transforms
# VGG expects 224x224 images
transform_train = transforms.Compose([transforms.Resize(256),transforms.RandomResizedCrop(224),transforms.RandomHorizontalFlip(),transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), # CIFAR-10 stats
])
transform_test = transforms.Compose([transforms.Resize(256),transforms.CenterCrop(224),transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = DataLoader(trainset, batch_size=32, shuffle=True, num_workers=4, pin_memory=True) # Adjust batch_size based on GPU VRAM
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = DataLoader(testset, batch_size=64, shuffle=False, num_workers=4, pin_memory=True)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
num_classes = len(classes)
# --- Training and Evaluation Functions ---
def train_model_epochs(model, criterion, optimizer, dataloader, num_epochs=10, accumulation_steps=1):model.train()scaler = torch.cuda.amp.GradScaler(enabled=torch.cuda.is_available()) # For mixed precisionfor epoch in range(num_epochs):running_loss = 0.0running_corrects = 0total_samples = 0optimizer.zero_grad() # Zero out gradients before starting accumulation for an effective batchfor i, (inputs, labels) in enumerate(dataloader):inputs, labels = inputs.to(device), labels.to(device)with torch.cuda.amp.autocast(enabled=torch.cuda.is_available()): # Mixed precision contextoutputs = model(inputs)loss = criterion(outputs, labels)loss = loss / accumulation_steps # Scale lossscaler.scale(loss).backward() # Scale loss and call backwardif (i + 1) % accumulation_steps == 0 or (i + 1) == len(dataloader):scaler.step(optimizer) # Perform optimizer stepscaler.update() # Update scaleroptimizer.zero_grad() # Zero out gradients for the next effective batch_, preds = torch.max(outputs, 1)running_loss += loss.item() * inputs.size(0) * accumulation_steps # Unscale loss for loggingrunning_corrects += torch.sum(preds == labels.data)total_samples += inputs.size(0)if (i + 1) % 100 == 0: # Log every 100 mini-batchesprint(f'Epoch [{epoch+1}/{num_epochs}], Batch [{i+1}/{len(dataloader)}], Loss: {loss.item()*accumulation_steps:.4f}')epoch_loss = running_loss / total_samplesepoch_acc = running_corrects.double() / total_samplesprint(f'Epoch {epoch+1}/{num_epochs} - Train Loss: {epoch_loss:.4f}, Acc: {epoch_acc:.4f}')return model
def evaluate_model(model, criterion, dataloader):model.eval()running_loss = 0.0running_corrects = 0total_samples = 0with torch.no_grad():for inputs, labels in dataloader:inputs, labels = inputs.to(device), labels.to(device)with torch.cuda.amp.autocast(enabled=torch.cuda.is_available()):outputs = model(inputs)loss = criterion(outputs, labels)_, preds = torch.max(outputs, 1)running_loss += loss.item() * inputs.size(0)running_corrects += torch.sum(preds == labels.data)total_samples += inputs.size(0)epoch_loss = running_loss / total_samplesepoch_acc = running_corrects.double() / total_samplesprint(f'Test Loss: {epoch_loss:.4f}, Acc: {epoch_acc:.4f}\n')return epoch_acc
# --- Fine-tuning Strategy Implementation ---
model_vgg_cbam = VGG16_CBAM(num_classes=num_classes, pretrained=True).to(device)
criterion = nn.CrossEntropyLoss()
accumulation_steps = 2 # Simulate larger batch size: 32*2 = 64 effective
# **Phase 1: Train only the CBAM and the new classifier**
print("--- Phase 1: Training CBAM and Classifier ---")
# Freeze all layers in features
for param in model_vgg_cbam.features.parameters():param.requires_grad = False
# Ensure CBAM and classifier parameters are trainable
for param in model_vgg_cbam.cbam.parameters():param.requires_grad = True
for param in model_vgg_cbam.classifier.parameters():param.requires_grad = True
# Collect parameters to optimize for phase 1
params_to_optimize_phase1 = []
for name, param in model_vgg_cbam.named_parameters():if param.requires_grad:params_to_optimize_phase1.append(param)print(f"Phase 1 optimizing: {name}")
optimizer_phase1 = optim.AdamW(params_to_optimize_phase1, lr=1e-3, weight_decay=1e-4)
# scheduler_phase1 = optim.lr_scheduler.StepLR(optimizer_phase1, step_size=5, gamma=0.1) # Optional scheduler
# Reduced epochs for quicker demonstration, increase for better results
epochs_phase1 = 10 # e.g., 10-15 epochs
model_vgg_cbam = train_model_epochs(model_vgg_cbam, criterion, optimizer_phase1, trainloader, num_epochs=epochs_phase1, accumulation_steps=accumulation_steps)
evaluate_model(model_vgg_cbam, criterion, testloader)
# **Phase 2: Unfreeze some later layers of the backbone (e.g., last VGG block) and train with a smaller LR**
print("\n--- Phase 2: Fine-tuning later backbone layers, CBAM, and Classifier ---")
# VGG16 features layers: total 31 layers (conv, relu, pool)
# Last block (conv5_1, relu, conv5_2, relu, conv5_3, relu, pool5) starts around index 24
# Unfreeze layers from index 24 onwards (last conv block)
# Note: VGG feature blocks end at indices: 4 (block1), 9 (block2), 16 (block3), 23 (block4), 30 (block5)
# Let's unfreeze block 5 (layers 24-30) and block 4 (layers 17-23)
unfreeze_from_layer_idx = 17 # Start of block4
for i, child in enumerate(model_vgg_cbam.features.children()):if i >= unfreeze_from_layer_idx:print(f"Phase 2 Unfreezing feature layer: {i} - {type(child)}")for param in child.parameters():param.requires_grad = True# else: # Keep earlier layers frozen#     for param in child.parameters():#         param.requires_grad = False # This is already done from phase 1, but good to be explicit if starting from scratch# Differential learning rates
# Backbone layers (newly unfrozen) get a smaller LR
# CBAM and classifier get a slightly larger LR (or same as backbone if preferred)
lr_backbone_phase2 = 1e-5
lr_head_phase2 = 5e-5 # CBAM and classifierparams_group_phase2 = [{'params': [], 'lr': lr_backbone_phase2, 'name': 'fine_tune_features'}, # For later backbone layers{'params': model_vgg_cbam.cbam.parameters(), 'lr': lr_head_phase2, 'name': 'cbam'},{'params': model_vgg_cbam.classifier.parameters(), 'lr': lr_head_phase2, 'name': 'classifier'}
]# Add only newly unfrozen feature layers to the optimizer group
for i, child in enumerate(model_vgg_cbam.features.children()):if i >= unfreeze_from_layer_idx:params_group_phase2[0]['params'].extend(list(child.parameters()))print(f"Phase 2 optimizing feature layer: {i} with lr {lr_backbone_phase2}")# Ensure early backbone layers are NOT in optimizer if they are frozen (param.requires_grad == False)
# The AdamW constructor below will only consider params with requires_grad=True from the list
optimizer_phase2 = optim.AdamW([p for p_group in params_group_phase2 for p in p_group['params'] if p.requires_grad], lr=lr_head_phase2 # Default LR, overridden by group LRs
)
# More explicit group definition:
optimizer_phase2 = optim.AdamW([{'params': [p for p in params_group_phase2[0]['params'] if p.requires_grad], 'lr': lr_backbone_phase2},{'params': [p for p in params_group_phase2[1]['params'] if p.requires_grad], 'lr': lr_head_phase2},{'params': [p for p in params_group_phase2[2]['params'] if p.requires_grad], 'lr': lr_head_phase2}
], weight_decay=1e-4)epochs_phase2 = 15 # e.g., 15-20 epochs
model_vgg_cbam = train_model_epochs(model_vgg_cbam, criterion, optimizer_phase2, trainloader, num_epochs=epochs_phase2, accumulation_steps=accumulation_steps)
evaluate_model(model_vgg_cbam, criterion, testloader)# **Phase 3: Unfreeze all layers and train with a very small learning rate**
print("\n--- Phase 3: Fine-tuning all layers with very small LR ---")
for param in model_vgg_cbam.features.parameters():param.requires_grad = True # Unfreeze all feature layers# Single very small learning rate for all parameters
lr_phase3 = 2e-6 # Or differential (earlier layers even smaller)
params_group_phase3 = [{'params': list(model_vgg_cbam.features[:unfreeze_from_layer_idx].parameters()), 'lr': lr_phase3 * 0.1, 'name':'early_features'}, # Earlier backbone layers{'params': list(model_vgg_cbam.features[unfreeze_from_layer_idx:].parameters()), 'lr': lr_phase3, 'name':'later_features'}, # Later backbone layers{'params': model_vgg_cbam.cbam.parameters(), 'lr': lr_phase3 * 2, 'name':'cbam'}, # CBAM slightly higher{'params': model_vgg_cbam.classifier.parameters(), 'lr': lr_phase3 * 2, 'name':'classifier'} # Classifier slightly higher
]
optimizer_phase3 = optim.AdamW(params_group_phase3, weight_decay=1e-5) # default LR is not used here# optimizer_phase3 = optim.AdamW(model_vgg_cbam.parameters(), lr=lr_phase3, weight_decay=1e-5) # Simpler: one LR for allepochs_phase3 = 15 # e.g., 15-20 epochs
model_vgg_cbam = train_model_epochs(model_vgg_cbam, criterion, optimizer_phase3, trainloader, num_epochs=epochs_phase3, accumulation_steps=accumulation_steps)
evaluate_model(model_vgg_cbam, criterion, testloader)print("Fine-tuning complete!")# Save the final model (optional)
# torch.save(model_vgg_cbam.state_dict(), 'vgg16_cbam_cifar10_final.pth')

@浙大疏锦行

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。
如若转载,请注明出处:http://www.pswp.cn/news/909736.shtml
繁体地址,请注明出处:http://hk.pswp.cn/news/909736.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

天猫618高增长背后:电商迈入价值战新周期

作者 | 曾响铃 文 | 响铃说 这次618,来“真”的了。 天猫618玩法变得极致简单,只设了“官方立减”的85折的基础优惠,再叠加行业品类券、国补等优惠,最高立减可达50%,十分直观。 让消费者省心的结果也是显而易见的&…

tauri+vue自动更新客户端打包配置

拉取最新代码打开项目根目录下"~.tauri\myapp.key"文件并复制内容 打开项目的powershell窗口,输入如下内容并回车 $env:TAURI_SIGNING_PRIVATE_KEY"复制的myapp.key" $env:TAURI_SIGNING_PRIVATE_KEY_PASSWORD""然后修改tauri.conf.…

硬件------51单片机

一.基本概念 1.裸机程序 BSP BSP:bord suppord pack 板级支持包 就是程序编写的内容是没有操作系统的,直接通过代码去控制寄存器,让硬件按照要求去工作。 主要内容:51单片机 IMAX6ULL 2.linux驱动部分 在裸机BSP程序的基础…

java 基础方法 list分页

新增一个list 泛型分类方法 hutools没这个方法, mybatis 里面的方法不好用 故新增此方法 package com.common.base.util.page;import lombok.Data;import java.util.List;/*** className: VoPage* description: list分页* author: chenyuanlong* date: 2025年6月16日 0016 上午…

操作系统期末复习--操作系统初识以及进程与线程

操作系统概念与主要功能 操作系统的概念 在信息化时代,软件是计算机系统的灵魂,而作为软件核心的操作系统,已与现代计算机系统密不可分、融为一体。计算机系统自下而上大致分为4部分:硬件、操作系统、应用程序和用户 操作系统管…

使用jhat查看dump.hprof文件内具体对象的属性值信息

jhat是JDK自带的堆转储分析工具,可以用来查看.hprof文件中对象的具体内容。本文演示使用的是JKD8. 一、启动jhat 执行启动命令。 jhat -J-Xmx4g your_heap_dump.hprof -J-Xmx4g表示为jhat分配4GB内存,根据你自己情况调整大小。your_heap_dump.hprof是…

freeRTOS之队列(queue)

一.概述 1.介绍 队列(queue)可以用于"任务到任务"、“任务到中断”、"中断到任务"直接传输信息。 2.核心功能 线程安全:自动处理多任务访问时的互斥问题。 数据复制:入队时复制数据(而非引用),…

【python】typing用法

一、基础类型提示 1. 基本类型注解 # 变量类型注解 age: int 30 name: str "Alice" is_student: bool False height: float 1.752. 函数注解 def greet(name: str, age: int) -> str:return f"Hello {name}, you are {age} years old!"二、组合类…

web前端开发核心基础:Html结构分析,head,body,不同标签的作用

前端技术协同关系 协作流程:HTML构建页面框架—>css美化样式(选择器属性)—>JavaScript实现交互(类似于python的脚本语言)扩展基础:在上面三项基础上学习Vue\React、构建工具WePack和浏览器工作原理…

精益数据分析(105/126):移动应用核心指标解析与用户分层营收策略

精益数据分析(105/126):移动应用核心指标解析与用户分层营收策略 在移动应用市场竞争白热化的今天,单纯追求下载量已无法保证商业成功,精细化运营核心指标成为盈利关键。本文将深入解析每日活跃用户平均营收&#xff…

被CC攻击了,对服务器有什么影响?

博客正文: 最近,不少网站管理员和运维人员反映遭遇了CC攻击,导致服务器性能异常甚至瘫痪。那么,CC攻击究竟会对服务器造成哪些影响?本文将为你简要解析CC攻击的原理及其带来的危害,帮助你更好地理解并应对…

Tensorflow安装出现dependency conflict错误

Python版本: 3.11.4 pip版本已升到最新 电脑上有mac的原装Python2.x,我装的3.11.4,还有个什么依赖的3.9 运行 pip3 install tensorflow 出现类似以下错误 (我报错的是另一个不是tensorflow—estimator,但基本就是…

2025年HTTP半开与错误攻击防御指南:原理拆解与实战防护

你以为限流就能防住HTTP攻击?黑客用协议畸形包AI调度正在撕裂传统防线! 一、HTTP半开攻击:慢速绞杀服务器资源 ▶ 攻击原理剖析 HTTP半开攻击(如Slowloris)是一种应用层DoS攻击,通过建立大量半开连接耗尽…

Mybatis(XML映射文件、动态SQL)

目录 基础操作 准备: 删除: 新增: 更新: 查询: 条件查询: XML映射文件 动态SQL if foreach sql&include 基础操作 准备: 准备数据库表 创建一个新的springboot工程&#xff0…

python校园拼团系统

目录 技术栈介绍具体实现截图系统设计研究方法:设计步骤设计流程核心代码部分展示研究方法详细视频演示试验方案论文大纲源码获取/详细视频演示 技术栈介绍 Django-SpringBoot-php-Node.js-flask 本课题的研究方法和研究步骤基本合理,难度适中&#xf…

多模态大语言模型arxiv论文略读(127)

When SAM2 Meets Video Camouflaged Object Segmentation: A Comprehensive Evaluation and Adaptation ➡️ 论文标题:When SAM2 Meets Video Camouflaged Object Segmentation: A Comprehensive Evaluation and Adaptation ➡️ 论文作者:Yuli Zhou, …

剑指offer32_二叉搜索树的后序遍历序列

二叉搜索树的后序遍历序列 输入一个整数数组,判断该数组是不是某二叉搜索树的后序遍历的结果。 如果是则返回true,否则返回false。 假设输入的数组的任意两个数字都互不相同。 数据范围 数组长度 [ 0 , 1000 ] [0,1000] [0,1000]。 样例 输入&…

《仿盒马》app开发技术分享-- 订单结合优惠券结算(端云一体)

技术栈 Appgallery connect 开发准备 上一节我们已经实现了优惠券的选择,并且成功的把券后的价格也展示给用户,不能使用的优惠券我们也用友好的方式告知用户,这一节我们来实现优惠券内容的下一步,优惠券内容结合订单进行结算提…

Python+Selenium+Pytest+POM自动化测试框架封装

🍅 点击文末小卡片 ,免费获取软件测试全套资料,资料在手,涨薪更快 1、测试框架简介 1)测试框架的优点 代码复用率高,如果不使用框架的话,代码会显得很冗余。可以组装日志、报告、邮件等一些…

宋代大模型:智能重构下的文明再发现

引言:当汴京城遇见生成式AI 一幅动态的《清明上河图》正通过全息投影技术演绎汴京城的市井百态。这个虚实交融的场景,恰似宋代大模型技术的隐喻——以人工智能为纽带,连接起东京梦华的繁盛图景与数字时代的文明重构。作为人工智能与历史学交…