在工业生产中,印刷质量直接关系到产品的外观和用户体验。印刷缺陷检测作为质量控制的关键环节,传统方法依赖人工目检或简单图像处理,效率低、精度有限。随着深度学习的发展,基于卷积神经网络(CNN)的方法渐渐成为主流。本文将结合代码,详细先容一种基于加强型U-Net的印刷缺陷检测技术,该技术结合了留意力机制和混合丧失函数,实现高精度的缺陷分割。
一、模型架构
(一)加强型U-Net
U-Net因其在医学图像分割中的乐成而广为人知。本文提出的加强型U-Net在此基础上进行了改进。编码器部分通过多次卷积和池化操作提取图像的多尺度特性,而解码器部分则通过转置卷积逐步规复图像的空间分辨率。在每个解码器块中,参加了跳跃毗连,将编码器部分的特性图与解码器部分的特性图进行融合,有效解决了梯度消散问题,同时保留了图像的细节信息。
class EnhancedUNet(nn.Module):
def __init__(self):
super().__init__()
features = Config.init_features
self._initialize = nn.init.kaiming_normal_
# 编码器
self.encoder1 = self._build_block(Config.in_channels, features)
self.pool1 = nn.MaxPool2d(2)
self.encoder2 = self._build_block(features, features * 2)
self.pool2 = nn.MaxPool2d(2)
self.encoder3 = self._build_block(features * 2, features * 4)
self.pool3 = nn.MaxPool2d(2)
self.encoder4 = self._build_block(features * 4, features * 8)
self.pool4 = nn.MaxPool2d(2)
# 瓶颈层
self.bottleneck = self._build_block(features * 8, features * 16)
# 解码器
self.upconv4 = nn.ConvTranspose2d(features * 16, features * 8, 2, 2)
self.decoder4 = self._build_block(features * 16, features * 8)
self.upconv3 = nn.ConvTranspose2d(features * 8, features * 4, 2, 2)
self.decoder3 = self._build_block(features * 8, features * 4)
self.upconv2 = nn.ConvTranspose2d(features * 4, features * 2, 2, 2)
self.decoder2 = self._build_block(features * 4, features * 2)
self.upconv1 = nn.ConvTranspose2d(features * 2, features, 2, 2)
self.decoder1 = self._build_block(features * 2, features)
# 留意力机制
self.attention4 = AttentionBlock(features * 8, features * 8)
self.attention3 = AttentionBlock(features * 4, features * 4)
self.attention2 = AttentionBlock(features * 2, features * 2)
self.attention1 = AttentionBlock(features * 1, features * 1)
self.final_conv = nn.Conv2d(features, Config.out_channels, 1)
self._initialize_weights()
def _build_block(self, in_ch, out_ch):
return nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
)
(二)留意力机制
为了进一步提拔模型对缺陷地区的关注度,引入了留意力机制。在解码器的每个块中,使用留意力模块对特性图进行加权。留意力模块通过卷积和激活函数学习特性图中每个位置的重要性权重,然后将权重与特性图相乘,使模型更加关注与缺陷相关的特性。这种留意力机制不但加强了模型对缺陷的敏感度,还进步了分割结果的准确性。
class AttentionBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)
self.conv3 = nn.Conv2d(out_channels, out_channels, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.sigmoid(x)
return x
二、数据处理与加强
(一)数据集
实行使用了COCO格式的数据集,包罗印刷品的图像及其对应的解释信息。数据集分为训练集和验证集,分别用于模型训练和性能评估。
class PrintDefectDataset(Dataset):
def __init__(self, image_dir, annotation_path, transform=None, mode='train'):
self.image_dir = image_dir
self.transform = transform
self.mode = mode
self.target_size = (Config.img_size, Config.img_size)
if not os.path.isdir(image_dir):
raise ValueError(f"Image directory not found: {image_dir}")
if not os.path.isfile(annotation_path):
raise ValueError(f"Annotation file not found: {annotation_path}")
with open(annotation_path) as f:
data = json.load(f)
self._validate_data(data)
self.images = {img['id']: img for img in data['images']}
self.annotations = defaultdict(list)
for ann in data['annotations']:
img_id = ann['image_id']
self.annotations[img_id].append(ann)
self.ids = [img_id for img_id in self.images.keys() if img_id in self.annotations]
self._analyze_dataset(data)
def __len__(self):
return len(self.ids)
def __getitem__(self, idx):
img_id = self.ids[idx]
img_info = self.images[img_id]
img_path = os.path.join(self.image_dir, img_info['file_name'])
image = self._load_image(img_path)
mask = self._create_mask(img_id, img_info)
if self.transform and self.mode == 'train':
transformed = self.transform(image=image, mask=mask)
image, mask = transformed['image'], transformed['mask']
self._validate_sample(image, mask)
return (
torch.from_numpy(image).unsqueeze(0).float(),
torch.from_numpy(mask).unsqueeze(0).float()
)
(二)数据加强
为了增加数据的多样性,进步模型的泛化能力,接纳了多种数据加强方法。包括弹性变更、网格畸变、平移缩放旋转、随机亮度对比度调整、高斯模糊、水平翻转、垂直翻转、旋转、粗Dropout和高斯噪声等。这些加强操作在训练过程中随机应用,使模型能够学习到差别条件下的图像特性,加强其鲁棒性。
train_transform = A.Compose([
A.Resize(img_size, img_size, interpolation=cv2.INTER_LANCZOS4),
A.OneOf([
A.ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03, interpolation=cv2.INTER_NEAREST, p=0.5),
A.GridDistortion(num_steps=5, distort_limit=0.3, interpolation=cv2.INTER_NEAREST, p=0.5)
], p=0.7),
A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=30, interpolation=cv2.INTER_NEAREST, border_mode=cv2.BORDER_CONSTANT, p=0.5),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5),
A.GaussianBlur(blur_limit=3, p=0.2),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
A.Rotate(limit=30, interpolation=cv2.INTER_NEAREST, p=0.5),
A.CoarseDropout(max_holes=8, max_height=32, max_width=32, p=0.3),
A.GaussNoise(var_limit=(0.001, 0.01), mean=0, per_channel=True, p=0.2),
], additional_targets={'mask': 'mask'})
三、训练计谋
(一)丧失函数
提出了混合丧失函数,结合了Focal Loss和Dice Loss。Focal Loss用于解决种别不均衡问题,使模型更加关注难分类的样本。Dice Loss则直接优化分割的Dice系数,进步分割结果的连通性和准确性。混合丧失函数的使用,使模型在训练过程中能够更好地均衡差别种别和地区的丧失,提拔团体性能。
class HybridLoss(nn.Module):
def __init__(self, alpha=0.75, gamma=2.0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.bce = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, pred, target):
# Focal Loss
bce_loss = self.bce(pred, target)
pt = torch.exp(-bce_loss)
focal_loss = self.alpha * (1 - pt) ** self.gamma * bce_loss
focal_loss = focal_loss.mean()
# Dice Loss
pred_sigmoid = torch.sigmoid(pred)
smooth = 1e-7
intersection = 2.0 * (pred_sigmoid * target).sum(dim=(1, 2, 3))
union = pred_sigmoid.sum(dim=(1, 2, 3)) + target.sum(dim=(1, 2, 3))
dice_loss = 1 - (intersection + smooth) / (union + smooth)
dice_loss = dice_loss.mean()
return focal_loss + dice_loss
(二)优化器与学习率调度
选择AdamW优化器,其在处理权重衰减方面表现良好,有助于防止过拟合。同时,接纳余弦退火学习率调度计谋,在训练过程中逐步调整学习率,使模型能够更快地收敛到最优解。
self.optimizer = optim.AdamW(self.model.parameters(), lr=config.lr, weight_decay=1e-5)
self.scheduler = CosineAnnealingLR(self.optimizer, T_max=config.epochs, eta_min=1e-7)
(三)早停机制
为了防止过拟合,设置了早停机制。在验证集上,如果连续一定数量的轮次(如15轮)模型性能没有提拔,则提前制止训练,保存当前最佳模型。
if self.early_stop_counter >= self.config.early_stop:
print("Early stopping triggered!")
break
四、实行结果与分析
(一)训练过程
在训练过程中,观察到训练丧失和验证丧失渐渐下降,验证F1分数和IoU(交并比)渐渐上升。这表明模型在训练过程中渐渐学习到了图像的特性和缺陷的模式,性能不断提拔。
def train(self, train_loader, val_loader):
print("Start training...")
metrics = {
'train_loss': [],
'val_loss': [],
'val_f1': [],
'val_iou': []
}
for epoch in range(1, self.config.epochs + 1):
epoch_start = time.time()
train_loss = self.train_epoch(train_loader)
val_loss, val_f1, val_iou = self.evaluate(val_loader)
epoch_time = time.time() - epoch_start
print(f"\nEpoch {epoch:03d}/{self.config.epochs} | Time: {epoch_time:.1f}s")
print(f"Train Loss: {train_loss:.4f} | Val Loss: {val_loss:.4f} | Val F1: {val_f1:.4f} | Val IoU: {val_iou:.4f}")
metrics['train_loss'].append(train_loss)
metrics['val_loss'].append(val_loss)
metrics['val_f1'].append(val_f1)
metrics['val_iou'].append(val_iou)
if val_f1 > self.best_f1 or (val_f1 == self.best_f1 and val_iou > self.best_iou):
self.best_f1 = val_f1
self.best_iou = val_iou
self.early_stop_counter = 0
self.save_model()
self.visualize_predictions(val_loader)
visual.save_metrics(metrics, self.config.output_dir)
else:
self.early_stop_counter += 1
print(f"No improvement for {self.early_stop_counter} epochs")
if self.early_stop_counter >= self.config.early_stop:
print("Early stopping triggered!")
break
self.scheduler.step()
visual.plot_metrics(metrics, self.config.output_dir)
(二)可视化结果
通过对验证集的预测结果进行可视化,可以看到模型能够准确地分割出印刷缺陷的地区。预测结果与真实标签(Ground Truth)的对比表现,模型的分割结果与真原形况高度符合,说明模型具有良好的分割能力。
def visualize_predictions(self, loader, num_samples=3):
self.model.eval()
with torch.no_grad():
for idx, (images, masks) in enumerate(loader):
if idx >= num_samples:
break
images = images.to(self.config.device)
outputs = self.model(images)
preds = torch.sigmoid(outputs).cpu().numpy()
plt.figure(figsize=(18, 6))
plt.subplot(1, 3, 1)
plt.imshow(images[0].cpu().squeeze().numpy(), cmap='gray')
plt.title('Original Image')
plt.subplot(1, 3, 2)
plt.imshow(masks[0].cpu().squeeze().numpy(), cmap='jet')
plt.title('Ground Truth')
plt.subplot(1, 3, 3)
plt.imshow(preds[0].squeeze(), cmap='jet', vmin=0, vmax=1)
plt.title(f'Prediction (F1: {self.calculate_f1(preds[0][0], masks[0].cpu().numpy().squeeze()):.3f}, IoU: {self.calculate_iou(preds[0][0], masks[0].cpu().numpy().squeeze()):.3f})')
plt.show()
(三)性能指标
在验证集上,模型取得了较高的F1分数和IoU值。F1分数权衡了模型的精确率和召回率的均衡,而IoU则反映了分割结果与真实标签的重叠水平。这些指标的高值表明模型在缺陷检测使掷中具有较高的准确性和鲁棒性。
def evaluate(self, loader):
self.model.eval()
total_loss = 0
all_f1 = []
all_iou = []
progress = tqdm(loader, desc="Evaluating", ncols=100)
with torch.no_grad():
for images, masks in progress:
images = images.to(self.config.device)
masks = masks.to(self.config.device)
outputs = self.model(images)
loss = self.criterion(outputs, masks)
total_loss += loss.item()
preds = torch.sigmoid(outputs).cpu().numpy()
targets = masks.cpu().numpy().astype(np.uint8)
batch_f1 = [self.calculate_f1(p[0], t[0]) for p, t in zip(preds, targets)]
batch_iou = [self.calculate_iou(p[0], t[0]) for p, t in zip(preds, targets)]
all_f1.extend(batch_f1)
all_iou.extend(batch_iou)
progress.set_postfix(f1=f"{np.mean(batch_f1):.4f}", iou=f"{np.mean(batch_iou):.4f}")
return total_loss / len(loader), np.mean(all_f1), np.mean(all_iou)
下篇博客继续更新优化后的代码。。。。。
五、总结与展望
本文先容了一种基于加强型U-Net的印刷缺陷检测技术。通过引入留意力机制和混合丧失函数,模型能够有效地分割印刷缺陷,进步检测的准确性和鲁棒性。实行结果表明,该方法在印刷缺陷检测使掷中具有良好的性能。
未来的研究可以进一步优化模型架构,探索更有效的留意力机制和丧失函数。同时,可以将该技术应用于其他范例的工业缺陷检测,如电子元件检测、纺织品缺陷检测等,为工业质量控制提供更有力的支持。
免责声明:如果侵犯了您的权益,请联系站长,我们会及时删除侵权内容,谢谢合作!更多信息从访问主页:qidao123.com:ToB企服之家,中国第一个企服评测及商务社交产业平台。 |