AI-CNN-验证码识别
1 需求
GitHub - xhh890921/cnn-captcha-pytorch: 小黑黑讲AI,AI实战项目《验证码识别》
2 接口
3 示例
config.json
{"train_data_path": "./data/train-digit/","test_data_path": "./data/test-digit/","train_num": 2000,"test_num": 1000,"characters": "0123456789","digit_num": 1,"img_width": 200,"img_height": 100,"resize_width": 128,"resize_height": 128,"batch_size": 128,"epoch_num": 200,"learning_rate": 0.0001,"model_save_path": "./model/","model_name": "captcha.1digit.2k","test_model_path": "./model/captcha.1digit.2k"
}
generate.py
# 导入验证码模块ImageCaptcha和随机数模块random
from captcha.image import ImageCaptcha
import random# 定义函数generate_data,用于生成验证码图片
# num是需要生成的验证码图片数量
# count是验证码图中包含的字符数量
# chars保存验证码中包含的字符
# path是图片结果的保存路径
# width是height是图片的宽和高
def generate_data(num, count, chars, path, width, height):# 使用变量i,循环生成num个验证码图片for i in range(num):# 打印当前的验证码编号print("generate %d"%(i))# 使用ImageCaptcha,创建验证码生成器generatorgenerator = ImageCaptcha(width=width, height=height)random_str = "" #保存验证码图片上的字符# 向random_str中,循环添加count个字符for j in range(count):# 每个字符,使用random.choice,随机的从chars中选择choose = random.choice(chars)random_str += choose# 调用generate_image,生成验证码图片imgimg = generator.generate_image(random_str)# 在验证码上加干扰点generator.create_noise_dots(img, '#000000', 4, 40)# 在验证码上加干扰线generator.create_noise_curve(img, '#000000')# 设置文件名,命名规则为,验证码字符串random_str,加下划线,加数据编号file_name = path + random_str + '_' + str(i) + '.jpg'img.save(file_name) # 保存文件import json
import osif __name__ == '__main__':# 使用open函数,打开config.json配置文件with open("config.json", "r") as f:# 使用json.load读取解析json,结果保存在configconfig = json.load(f)# 接着从配置中获取各项参数# 具体使用config加中括号中括号中为参数名,这样的方式读取配置内容train_data_path = config["train_data_path"] # 训练数据路径test_data_path = config["test_data_path"] # 测试数据路径train_num = config["train_num"] # 训练样本个数test_num = config["test_num"] # 测试样本个数characters = config["characters"] # 验证码使用的字符集digit_num = config["digit_num"] # 图片上的字符数量img_width = config["img_width"] # 图片的宽度img_height = config["img_height"] # 图片的高度# 检查数据路径上的文件夹是否存在# 如果不存在,则创建保存数据的文件夹if not os.path.exists(train_data_path):os.makedirs(train_data_path)if not os.path.exists(test_data_path):os.makedirs(test_data_path)# 调用generate_data,生成训练数据generate_data(train_num, digit_num, characters,train_data_path, img_width, img_height)# 调用generate_data,生成测试数据generate_data(test_num, digit_num, characters,test_data_path, img_width, img_height)
dataset.py
from torch.utils.data import Dataset
from PIL import Image
import torch
import os# 设置CaptchaDataset继承Dataset,用于读取验证码数据
class CaptchaDataset(Dataset):# init函数用于初始化# 函数传入数据的路径data_dir和数据转换对象transform# 将验证码使用的字符集characters,通过参数传入def __init__(self, data_dir, transform, characters):self.file_list = list() #保存每个训练数据的路径# 使用os.listdir,获取data_dir中的全部文件files = os.listdir(data_dir)for file in files: #遍历files# 将目录路径与文件名组合为文件路径path = os.path.join(data_dir, file)# 将path添加到file_list列表self.file_list.append(path)# 将数据转换对象transform保存到类中self.transform = transform# 创建一个字符到数字的字典self.char2int = {}# 在创建字符到数字的字典时,使用外界传入的字符集charactersfor i, char in enumerate(characters):self.char2int[char] = idef __len__(self):# 直接返回数据集中的样本数量# 重写该方法后可以使用len(dataset)语法,来获取数据集的大小return len(self.file_list)# 函数传入索引index,函数应当返回与该索引对应的数据和标签# 通过dataset[i],就可以获取到第i个样本了def __getitem__(self, index):file_path = self.file_list[index] #获取数据的路径# 打开文件,并使用convert('L'),将图片转换为灰色# 不需要通过颜色来判断验证码中的字符,转为灰色后,可以提升模型的鲁棒性image = Image.open(file_path).convert('L')# 使用transform转换数据,将图片数据转为张量数据image = self.transform(image)# 获取该数据图片中的字符标签label_char = os.path.basename(file_path).split('_')[0]# 在获取到该数据图片中的字符标签label_char后label = list()for char in label_char: # 遍历字符串label_char# 将其中的字符转为数字,添加到列表label中label.append(self.char2int[char])# 将label转为张量,作为训练数据的标签label = torch.tensor(label, dtype=torch.long)return image, label #返回image和labelfrom torch.utils.data import DataLoader
from torchvision import transforms
import jsonif __name__ == '__main__':with open("config.json", "r") as f:config = json.load(f)height = config["resize_height"] # 图片的高度width = config["resize_width"] # 图片的宽度# 定义数据转换对象transform# 使用transforms.Compose定义数据预处理流水线# 在transform添加Resize和ToTensor两个数据处理操作transform = transforms.Compose([transforms.Resize((height, width)), # 将图片缩放到指定的大小transforms.ToTensor()]) # 将图片数据转换为张量data_path = config["train_data_path"] # 训练数据储存路径characters = config["characters"] # 验证码使用的字符集batch_size = config["batch_size"]epoch_num = config["epoch_num"]# 定义CaptchaDataset对象datasetdataset = CaptchaDataset(data_path, transform, characters)# 定义数据加载器data_load# 其中参数dataset是数据集# batch_size=8代表每个小批量数据的大小是8# shuffle = True表示每个epoch,都会随机打乱数据的顺序data_load = DataLoader(dataset,batch_size = batch_size,shuffle = True)# 编写一个循环,模拟小批量梯度下降迭代时的数据读取# 外层循环,代表了整个训练数据集的迭代轮数,3个epoch就是3轮循环# 对于每个epoch,都会遍历全部的训练数据for epoch in range(epoch_num):print("epoch = %d"%(epoch))# 内层循环代表了,在一个迭代轮次中,以小批量的方式# 使用dataloader对数据进行遍历# batch_idx表示当前遍历的批次# data和label表示这个批次的训练数据和标记for batch_idx, (data, label) in enumerate(data_load):print("batch_idx = %d label = %s"%(batch_idx, label))
model.py
import torch.nn as nn# 设置类CNNModel,它继承了torch.nn中的Module模块
class CNNModel(nn.Module):# 定义卷积神经网络# 修改初始化函数init的参数列表# 需要将训练图片的高height、宽width、# 图片中的字符数量digit_num,类别数量class_num传入def __init__(self, height, width, digit_num, class_num):super(CNNModel, self).__init__()self.digit_num = digit_num # 将digit_num保存在类中# 定义第1个卷积层组conv1# 其中包括了1个卷积层# 1个ReLU激活函数和1个2乘2的最大池化self.conv1 = nn.Sequential(# 卷积层使用Conv2d定义# 包括了1个输入通道,8个输出通道# 卷积核的大小是3乘3的# 使用padding='same'进行填充# 这样可以保证输入和输出的特征图大小相同nn.Conv2d(1, 32, kernel_size=3, padding='same'),nn.ReLU(),nn.MaxPool2d(2),nn.Dropout(0.25))# 第2个卷积层组,和conv1具有相同的结self.conv2 = nn.Sequential(# 包括8个输入通道和16个输出通道nn.Conv2d(32, 64, kernel_size=3, padding='same'),nn.ReLU(),nn.MaxPool2d(2),nn.Dropout(0.25))# 第3个卷积层组,和conv1具有相同的结self.conv3 = nn.Sequential(# 包括16个输入通道和16个输出通道nn.Conv2d(64, 64, kernel_size=3, padding='same'),nn.ReLU(),nn.MaxPool2d(2),nn.Dropout(0.25))# 完成三个卷积层的计算后,计算全连接层的输入数据数量input_num# 它等于图片的高和宽,分别除以8,再乘以输出特征图的个数16# 除以8的原因是,由于经过了3个2*2的最大池化# 因此图片的高和宽,都被缩小到原来的1/8input_num = (height//8) * (width//8) * 64self.fc1 = nn.Sequential(nn.Linear(input_num, 1024),nn.ReLU(),nn.Dropout(0.25))# 将输出层的神经元个数设置为class_numself.fc2 = nn.Sequential(nn.Linear(1024, class_num),)# 后面训练会使用交叉熵损失函数CrossEntropyLoss# softmax函数会定义在损失函数中,所以这里就不显示的定义了# 前向传播函数# 函数输入一个四维张量x# 这四个维度分别是样本数量、输入通道、图片的高度和宽度def forward(self, x): # [n, 1, 128, 128]# 将输入张量x按照顺序,输入至每一层中进行计算# 每层都会使张量x的维度发生变化out = self.conv1(x) # [n, 8, 64, 64]out = self.conv2(out) # [n, 16, 32, 32]out = self.conv3(out) # [n, 16, 16, 16]# 使用view函数,将张量的维度从n*16*16*16转为n*4096out = out.view(out.size(0), -1) # [n, 4096]out = self.fc1(out) # [n, 128]# 经过3个卷积层与2个全连接层后,会计算得到n*40的张量out = self.fc2(out) # [n, 40]# 使用初始化时传入的digit_num# 也就是将模型的最终输出,修改为n*digit_num*字符种类out = out.view(out.size(0), self.digit_num, -1)return outimport json
if __name__ == '__main__':with open("config.json", "r") as f:config = json.load(f)height = config["resize_height"] # 图片的高度width = config["resize_width"] # 图片的宽度characters = config["characters"] # 验证码使用的字符集digit_num = config["digit_num"]class_num = len(characters) * digit_num# 定义一个CNNModelUp1实例model = CNNModel(height, width, digit_num, class_num)print(model) #将其打印,观察打印结果可以了解模型的结构print("")
train.py
# 直接导入dataset.py中的CaptchaDataset类
from dataset import CaptchaDataset
# 直接导入model.py中的CNNModel类
from model import CNNModelimport torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from torch import optim
import json
import osif __name__ == '__main__':# 打开配置文件with open("config.json", "r") as f:config = json.load(f)# 读取resize_height和resize_width两个参数# 它们代表图片数据最终缩放的高和宽,用于创建transformheight = config["resize_height"] # 图片的高度width = config["resize_width"] # 图片的宽度# 定义数据转换对象transform# 使用transforms.Compose定义数据预处理流水线# 在transform添加Resize和ToTensor两个数据处理操作transform = transforms.Compose([transforms.RandomRotation(10), # 添加旋转方案transforms.Resize((height, width)), # 将图片缩放到指定的大小transforms.ToTensor()]) # 将图片数据转换为张量train_data_path = config["train_data_path"] # 获取训练数据路径characters = config["characters"] # 验证码字符集batch_size = config["batch_size"] # 批量大小epoch_num = config["epoch_num"] # 迭代轮数digit_num = config["digit_num"] # 字符个数learning_rate = config["learning_rate"] #迭代速率# 计算类别个数class_num,等于使用的字符数量*字符个数class_num = len(characters) * digit_nummodel_save_path = config["model_save_path"] #获取模型的保存路径model_name = config["model_name"] #模型名称model_save_name = model_save_path + "/" + model_name# 创建模型文件夹if not os.path.exists(model_save_path):os.makedirs(model_save_path)print("resize_height = %d"%(height))print("resize_width = %d" %(width))print("train_data_path = %s"%(train_data_path))print("characters = %s" % (characters))print("batch_size = %d" % (batch_size))print("epoch_num = %d" % (epoch_num))print("digit_num = %d" % (digit_num))print("class_num = %d" % (class_num))print("learning_rate = %lf" % (learning_rate))print("model_save_name = %s" % (model_save_name))print("")# 定义CaptchaDataset对象train_datatrain_data = CaptchaDataset(train_data_path, transform, characters)# 使用DataLoader,定义数据加载器train_load# 其中参数train_data是训练集# batch_size=64代表每个小批量数据的大小是64# shuffle = True表示每一轮训练,都会随机打乱数据的顺序train_load = DataLoader(train_data,batch_size = batch_size,shuffle = True)# 训练集有3000个数据,由于每个小批量大小是64,# 3000个数据就会分成47个小批量,前46个小批量包括64个数据,# 最后一个小批量包括56个数据。46*64+56=3000# 定义设备对象device,这里如果cuda可用则使用GPU,否则使用CPUdevice = torch.device("cuda" if torch.cuda.is_available() else "cpu")# 创建一个CNNModel模型对象,并转移到GPU上model = CNNModel(height, width, digit_num, class_num).to(device)model.train() # 需要指定迭代速率。默认情况下是0.001,我们将迭代速率修改0.0001# 因为面对更复杂的数据,较小的迭代速率可以使迭代更稳定optimizer = optim.Adam(model.parameters(), lr=learning_rate)criterion = nn.CrossEntropyLoss() # 创建一个交叉熵损失函数print("Begin training:")# 提升迭代轮数,从50轮训练提升至200轮训练for epoch in range(epoch_num): # 外层循环,代表了整个训练数据集的遍历次数# 内层循环代表了,在一个epoch中,以批量的方式,使用train_load对于数据进行遍历# batch_idx 表示当前遍历的批次# (data, label) 表示这个批次的训练数据和标记。for batch_idx, (data, label) in enumerate(train_load):# 将数据data和标签label转移到GPU上data, label = data.to(device), label.to(device)# 使用当前的模型,预测训练数据data,结果保存在output中output = model(data)# 修改损失值loss的计算方法# 将4位验证码的每一位的损失,都累加到一起loss = torch.tensor(0.0).to(device)for i in range(digit_num): #使用i,循环4位验证码# 每一位验证码的模型计算输出为output[:, i, :]# 标记为label[:, i]# 交叉熵损失函数criterion,计算一位验证码的损失# 将4位验证码的损失,累加到lossloss += criterion(output[:, i, :], label[:, i])loss.backward() # 计算损失函数关于模型参数的梯度optimizer.step() # 更新模型参数optimizer.zero_grad() # 将梯度清零,以便于下一次迭代# 计算训练时每个batch的正确率accpredicted = torch.argmax(output, dim=2)correct = (predicted == label).all(dim=1).sum().item()acc = correct / data.size(0)# 对于每个epoch,每训练10个batch,打印一次当前的损失if batch_idx % 10 == 0:print(f"Epoch {epoch + 1}/{epoch_num} "f"| Batch {batch_idx}/{len(train_load)} "f"| Loss: {loss.item():.4f} "f"| accuracy {correct}/{data.size(0)}={acc:.3f}")# 每10轮训练,就保存一次checkpoint模型,用来调试使用if (epoch + 1) % 10 == 0:checkpoint = model_save_path + "/check.epoch" + str(epoch+1)torch.save(model.state_dict(), checkpoint)print("checkpoint saved : %s" % (checkpoint))# 程序的最后,使用配置中的路径,保存训练结果torch.save(model.state_dict(), model_save_name)print("model saved : %s" % (model_save_name))
test.py
from dataset import CaptchaDataset
from model import CNNModelimport torch
from torch.utils.data import DataLoader
import torchvision.transforms as transformsimport jsonif __name__ == '__main__':with open("config.json", "r") as f:config = json.load(f)height = config["resize_height"] # 图片的高度width = config["resize_width"] # 图片的宽度# 定义数据转换对象transform# 将图片缩放到指定的大小,并将图片数据转换为张量transform = transforms.Compose([transforms.Resize((height, width)),transforms.ToTensor()])test_data_path = config["test_data_path"] # 训练数据储存路径characters = config["characters"] # 验证码使用的字符集digit_num = config["digit_num"]class_num = len(characters) * digit_numtest_model_path = config["test_model_path"]print("resize_height = %d" % (height))print("resize_width = %d" % (width))print("test_data_path = %s" % (test_data_path))print("characters = %s" % (characters))print("digit_num = %d" % (digit_num))print("class_num = %d" % (class_num))print("test_model_path = %s" % (test_model_path))print("")# 使用CaptchaDataset构造测试数据集test_data = CaptchaDataset(test_data_path, transform, characters)# 使用DataLoader读取test_data# 不需要设置任何参数,这样会一个一个数据的读取test_loader = DataLoader(test_data)# 定义设备对象device,这里如果cuda可用则使用GPU,否则使用CPUdevice = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")# 创建一个CNNModel模型对象,并转移到GPU上model = CNNModel(height, width, digit_num, class_num).to(device)model.eval()# 调用load_state_dict,读取已经训练好的模型文件captcha.digitmodel.load_state_dict(torch.load(test_model_path))right = 0 # 设置right变量,保存预测正确的样本数量all = 0 # all保存全部的样本数量# 遍历test_loader中的数据# x表示样本的特征张量,y表示样本的标签for (x, y) in test_loader:x, y = x.to(device), y.to(device) # 转移数据至GPUpred = model(x) # 使用模型预测x的结果,保存在pred中# 使用pred.argmax(dim=2).squeeze(0),获取4位验证码数据的预测结果# y.squeeze(0)是4验证码的标记结果if torch.equal(pred.argmax(dim=2).squeeze(0),y.squeeze(0)):right += 1 # 如果相同,那么right加1all += 1 # 每次循环,all变量加1# 循环结束后,计算模型的正确率acc = right * 1.0 / allprint("test accuracy = %d / %d = %.3lf" % (right, all, acc))
D:\Python310\python.exe D:/project/PycharmProjects/CNN/train.py
resize_height = 128
resize_width = 128
train_data_path = ./data/train-digit/
characters = 0123456789
batch_size = 128
epoch_num = 200
digit_num = 1
class_num = 10
learning_rate = 0.000100
model_save_name = ./model//captcha.1digit.2kBegin training:
Epoch 1/200 | Batch 0/16 | Loss: 2.3091 | accuracy 15/128=0.117
Epoch 1/200 | Batch 10/16 | Loss: 2.3238 | accuracy 10/128=0.078
Epoch 2/200 | Batch 0/16 | Loss: 2.3016 | accuracy 14/128=0.109
Epoch 2/200 | Batch 10/16 | Loss: 2.3000 | accuracy 15/128=0.117
Epoch 3/200 | Batch 0/16 | Loss: 2.3062 | accuracy 13/128=0.102
Epoch 3/200 | Batch 10/16 | Loss: 2.3053 | accuracy 12/128=0.094
Epoch 4/200 | Batch 0/16 | Loss: 2.3071 | accuracy 15/128=0.117
Epoch 4/200 | Batch 10/16 | Loss: 2.3018 | accuracy 18/128=0.141
Epoch 5/200 | Batch 0/16 | Loss: 2.2999 | accuracy 14/128=0.109
Epoch 5/200 | Batch 10/16 | Loss: 2.3003 | accuracy 17/128=0.133
Epoch 6/200 | Batch 0/16 | Loss: 2.3056 | accuracy 10/128=0.078
Epoch 6/200 | Batch 10/16 | Loss: 2.3008 | accuracy 17/128=0.133
Epoch 7/200 | Batch 0/16 | Loss: 2.3007 | accuracy 10/128=0.078
Epoch 7/200 | Batch 10/16 | Loss: 2.3061 | accuracy 10/128=0.078
Epoch 8/200 | Batch 0/16 | Loss: 2.3027 | accuracy 16/128=0.125
Epoch 8/200 | Batch 10/16 | Loss: 2.3041 | accuracy 11/128=0.086
Epoch 9/200 | Batch 0/16 | Loss: 2.3063 | accuracy 14/128=0.109
Epoch 9/200 | Batch 10/16 | Loss: 2.3000 | accuracy 12/128=0.094
Epoch 10/200 | Batch 0/16 | Loss: 2.2981 | accuracy 17/128=0.133
Epoch 10/200 | Batch 10/16 | Loss: 2.3018 | accuracy 17/128=0.133
checkpoint saved : ./model//check.epoch10
Epoch 11/200 | Batch 0/16 | Loss: 2.3048 | accuracy 13/128=0.102
Epoch 11/200 | Batch 10/16 | Loss: 2.3009 | accuracy 18/128=0.141
Epoch 12/200 | Batch 0/16 | Loss: 2.3007 | accuracy 5/128=0.039
Epoch 12/200 | Batch 10/16 | Loss: 2.3052 | accuracy 13/128=0.102
Epoch 13/200 | Batch 0/16 | Loss: 2.3016 | accuracy 15/128=0.117
Epoch 13/200 | Batch 10/16 | Loss: 2.2970 | accuracy 16/128=0.125
Epoch 14/200 | Batch 0/16 | Loss: 2.2986 | accuracy 19/128=0.148
Epoch 14/200 | Batch 10/16 | Loss: 2.3021 | accuracy 14/128=0.109
Epoch 15/200 | Batch 0/16 | Loss: 2.2987 | accuracy 17/128=0.133
Epoch 15/200 | Batch 10/16 | Loss: 2.3041 | accuracy 14/128=0.109
Epoch 16/200 | Batch 0/16 | Loss: 2.2994 | accuracy 16/128=0.125
Epoch 16/200 | Batch 10/16 | Loss: 2.3019 | accuracy 16/128=0.125
Epoch 17/200 | Batch 0/16 | Loss: 2.2933 | accuracy 14/128=0.109
Epoch 17/200 | Batch 10/16 | Loss: 2.2991 | accuracy 12/128=0.094
Epoch 18/200 | Batch 0/16 | Loss: 2.3012 | accuracy 16/128=0.125
Epoch 18/200 | Batch 10/16 | Loss: 2.3045 | accuracy 13/128=0.102
Epoch 19/200 | Batch 0/16 | Loss: 2.2907 | accuracy 25/128=0.195
Epoch 19/200 | Batch 10/16 | Loss: 2.3016 | accuracy 10/128=0.078
Epoch 20/200 | Batch 0/16 | Loss: 2.3050 | accuracy 13/128=0.102
Epoch 20/200 | Batch 10/16 | Loss: 2.2988 | accuracy 14/128=0.109
checkpoint saved : ./model//check.epoch20
Epoch 21/200 | Batch 0/16 | Loss: 2.2999 | accuracy 17/128=0.133
Epoch 21/200 | Batch 10/16 | Loss: 2.2937 | accuracy 15/128=0.117
Epoch 22/200 | Batch 0/16 | Loss: 2.3047 | accuracy 16/128=0.125
Epoch 22/200 | Batch 10/16 | Loss: 2.2853 | accuracy 18/128=0.141
Epoch 23/200 | Batch 0/16 | Loss: 2.2850 | accuracy 19/128=0.148
Epoch 23/200 | Batch 10/16 | Loss: 2.2959 | accuracy 13/128=0.102
Epoch 24/200 | Batch 0/16 | Loss: 2.2884 | accuracy 18/128=0.141
Epoch 24/200 | Batch 10/16 | Loss: 2.2940 | accuracy 18/128=0.141
Epoch 25/200 | Batch 0/16 | Loss: 2.2775 | accuracy 18/128=0.141
Epoch 25/200 | Batch 10/16 | Loss: 2.2858 | accuracy 15/128=0.117
Epoch 26/200 | Batch 0/16 | Loss: 2.2522 | accuracy 27/128=0.211
Epoch 26/200 | Batch 10/16 | Loss: 2.3032 | accuracy 16/128=0.125
Epoch 27/200 | Batch 0/16 | Loss: 2.2583 | accuracy 24/128=0.188
Epoch 27/200 | Batch 10/16 | Loss: 2.2422 | accuracy 28/128=0.219
Epoch 28/200 | Batch 0/16 | Loss: 2.2255 | accuracy 29/128=0.227
Epoch 28/200 | Batch 10/16 | Loss: 2.2325 | accuracy 16/128=0.125
Epoch 29/200 | Batch 0/16 | Loss: 2.1752 | accuracy 28/128=0.219
Epoch 29/200 | Batch 10/16 | Loss: 2.2192 | accuracy 23/128=0.180
Epoch 30/200 | Batch 0/16 | Loss: 2.2291 | accuracy 18/128=0.141
Epoch 30/200 | Batch 10/16 | Loss: 2.1861 | accuracy 25/128=0.195
checkpoint saved : ./model//check.epoch30
Epoch 31/200 | Batch 0/16 | Loss: 2.1700 | accuracy 35/128=0.273
Epoch 31/200 | Batch 10/16 | Loss: 2.0598 | accuracy 33/128=0.258
Epoch 32/200 | Batch 0/16 | Loss: 2.1042 | accuracy 29/128=0.227
Epoch 32/200 | Batch 10/16 | Loss: 2.0796 | accuracy 27/128=0.211
Epoch 33/200 | Batch 0/16 | Loss: 2.1144 | accuracy 23/128=0.180
Epoch 33/200 | Batch 10/16 | Loss: 2.1632 | accuracy 26/128=0.203
Epoch 34/200 | Batch 0/16 | Loss: 2.0593 | accuracy 38/128=0.297
Epoch 34/200 | Batch 10/16 | Loss: 2.0564 | accuracy 37/128=0.289
Epoch 35/200 | Batch 0/16 | Loss: 1.9282 | accuracy 42/128=0.328
Epoch 35/200 | Batch 10/16 | Loss: 2.0059 | accuracy 36/128=0.281
Epoch 36/200 | Batch 0/16 | Loss: 2.0065 | accuracy 35/128=0.273
Epoch 36/200 | Batch 10/16 | Loss: 1.9090 | accuracy 42/128=0.328
Epoch 37/200 | Batch 0/16 | Loss: 1.9358 | accuracy 39/128=0.305
Epoch 37/200 | Batch 10/16 | Loss: 1.9197 | accuracy 45/128=0.352
Epoch 38/200 | Batch 0/16 | Loss: 1.9248 | accuracy 42/128=0.328
Epoch 38/200 | Batch 10/16 | Loss: 1.9072 | accuracy 40/128=0.312
Epoch 39/200 | Batch 0/16 | Loss: 1.9429 | accuracy 41/128=0.320
Epoch 39/200 | Batch 10/16 | Loss: 1.9401 | accuracy 39/128=0.305
Epoch 40/200 | Batch 0/16 | Loss: 1.8600 | accuracy 44/128=0.344
Epoch 40/200 | Batch 10/16 | Loss: 1.8164 | accuracy 46/128=0.359
checkpoint saved : ./model//check.epoch40
Epoch 41/200 | Batch 0/16 | Loss: 1.8458 | accuracy 48/128=0.375
Epoch 41/200 | Batch 10/16 | Loss: 1.7130 | accuracy 54/128=0.422
Epoch 42/200 | Batch 0/16 | Loss: 1.6807 | accuracy 53/128=0.414
Epoch 42/200 | Batch 10/16 | Loss: 1.8174 | accuracy 41/128=0.320
Epoch 43/200 | Batch 0/16 | Loss: 1.8646 | accuracy 40/128=0.312
Epoch 43/200 | Batch 10/16 | Loss: 1.6046 | accuracy 54/128=0.422
Epoch 44/200 | Batch 0/16 | Loss: 1.7627 | accuracy 43/128=0.336
Epoch 44/200 | Batch 10/16 | Loss: 1.7279 | accuracy 48/128=0.375
Epoch 45/200 | Batch 0/16 | Loss: 1.6728 | accuracy 50/128=0.391
Epoch 45/200 | Batch 10/16 | Loss: 1.6171 | accuracy 53/128=0.414
Epoch 46/200 | Batch 0/16 | Loss: 1.6969 | accuracy 51/128=0.398
Epoch 46/200 | Batch 10/16 | Loss: 1.6196 | accuracy 48/128=0.375
Epoch 47/200 | Batch 0/16 | Loss: 1.6617 | accuracy 56/128=0.438
Epoch 47/200 | Batch 10/16 | Loss: 1.5410 | accuracy 67/128=0.523
Epoch 48/200 | Batch 0/16 | Loss: 1.6146 | accuracy 55/128=0.430
Epoch 48/200 | Batch 10/16 | Loss: 1.7213 | accuracy 44/128=0.344
Epoch 49/200 | Batch 0/16 | Loss: 1.5919 | accuracy 61/128=0.477
Epoch 49/200 | Batch 10/16 | Loss: 1.5982 | accuracy 51/128=0.398
Epoch 50/200 | Batch 0/16 | Loss: 1.6092 | accuracy 59/128=0.461
Epoch 50/200 | Batch 10/16 | Loss: 1.4322 | accuracy 65/128=0.508
checkpoint saved : ./model//check.epoch50
Epoch 51/200 | Batch 0/16 | Loss: 1.5115 | accuracy 65/128=0.508
Epoch 51/200 | Batch 10/16 | Loss: 1.5191 | accuracy 58/128=0.453
Epoch 52/200 | Batch 0/16 | Loss: 1.5553 | accuracy 64/128=0.500
Epoch 52/200 | Batch 10/16 | Loss: 1.5587 | accuracy 60/128=0.469
Epoch 53/200 | Batch 0/16 | Loss: 1.5137 | accuracy 61/128=0.477
Epoch 53/200 | Batch 10/16 | Loss: 1.3685 | accuracy 67/128=0.523
Epoch 54/200 | Batch 0/16 | Loss: 1.6554 | accuracy 50/128=0.391
Epoch 54/200 | Batch 10/16 | Loss: 1.4803 | accuracy 59/128=0.461
Epoch 55/200 | Batch 0/16 | Loss: 1.3825 | accuracy 66/128=0.516
Epoch 55/200 | Batch 10/16 | Loss: 1.4612 | accuracy 62/128=0.484
Epoch 56/200 | Batch 0/16 | Loss: 1.3605 | accuracy 73/128=0.570
Epoch 56/200 | Batch 10/16 | Loss: 1.4856 | accuracy 66/128=0.516
Epoch 57/200 | Batch 0/16 | Loss: 1.5354 | accuracy 51/128=0.398
Epoch 57/200 | Batch 10/16 | Loss: 1.4573 | accuracy 59/128=0.461
Epoch 58/200 | Batch 0/16 | Loss: 1.3566 | accuracy 61/128=0.477
Epoch 58/200 | Batch 10/16 | Loss: 1.3901 | accuracy 63/128=0.492
Epoch 59/200 | Batch 0/16 | Loss: 1.3130 | accuracy 70/128=0.547
Epoch 59/200 | Batch 10/16 | Loss: 1.1667 | accuracy 76/128=0.594
Epoch 60/200 | Batch 0/16 | Loss: 1.3881 | accuracy 70/128=0.547
Epoch 60/200 | Batch 10/16 | Loss: 1.2703 | accuracy 68/128=0.531
checkpoint saved : ./model//check.epoch60
Epoch 61/200 | Batch 0/16 | Loss: 1.4010 | accuracy 62/128=0.484
Epoch 61/200 | Batch 10/16 | Loss: 1.3181 | accuracy 72/128=0.562
Epoch 62/200 | Batch 0/16 | Loss: 1.2716 | accuracy 69/128=0.539
Epoch 62/200 | Batch 10/16 | Loss: 1.3523 | accuracy 62/128=0.484
Epoch 63/200 | Batch 0/16 | Loss: 1.2137 | accuracy 78/128=0.609
Epoch 63/200 | Batch 10/16 | Loss: 1.2490 | accuracy 75/128=0.586
Epoch 64/200 | Batch 0/16 | Loss: 1.2601 | accuracy 77/128=0.602
Epoch 64/200 | Batch 10/16 | Loss: 1.2207 | accuracy 72/128=0.562
Epoch 65/200 | Batch 0/16 | Loss: 1.1812 | accuracy 73/128=0.570
Epoch 65/200 | Batch 10/16 | Loss: 1.2019 | accuracy 74/128=0.578
Epoch 66/200 | Batch 0/16 | Loss: 1.0996 | accuracy 77/128=0.602
Epoch 66/200 | Batch 10/16 | Loss: 1.1076 | accuracy 72/128=0.562
Epoch 67/200 | Batch 0/16 | Loss: 1.2806 | accuracy 71/128=0.555
Epoch 67/200 | Batch 10/16 | Loss: 1.2237 | accuracy 74/128=0.578
Epoch 68/200 | Batch 0/16 | Loss: 1.1196 | accuracy 81/128=0.633
Epoch 68/200 | Batch 10/16 | Loss: 1.1982 | accuracy 78/128=0.609
Epoch 69/200 | Batch 0/16 | Loss: 1.0038 | accuracy 93/128=0.727
Epoch 69/200 | Batch 10/16 | Loss: 1.2466 | accuracy 72/128=0.562
Epoch 70/200 | Batch 0/16 | Loss: 1.0274 | accuracy 79/128=0.617
Epoch 70/200 | Batch 10/16 | Loss: 1.0536 | accuracy 82/128=0.641
checkpoint saved : ./model//check.epoch70
Epoch 71/200 | Batch 0/16 | Loss: 1.1594 | accuracy 79/128=0.617
Epoch 71/200 | Batch 10/16 | Loss: 1.0447 | accuracy 80/128=0.625
Epoch 72/200 | Batch 0/16 | Loss: 1.2550 | accuracy 68/128=0.531
Epoch 72/200 | Batch 10/16 | Loss: 1.1217 | accuracy 79/128=0.617
Epoch 73/200 | Batch 0/16 | Loss: 1.0504 | accuracy 78/128=0.609
Epoch 73/200 | Batch 10/16 | Loss: 1.2043 | accuracy 77/128=0.602
Epoch 74/200 | Batch 0/16 | Loss: 1.0929 | accuracy 74/128=0.578
Epoch 74/200 | Batch 10/16 | Loss: 1.0416 | accuracy 82/128=0.641
Epoch 75/200 | Batch 0/16 | Loss: 0.9702 | accuracy 89/128=0.695
Epoch 75/200 | Batch 10/16 | Loss: 0.9303 | accuracy 95/128=0.742
Epoch 76/200 | Batch 0/16 | Loss: 0.8531 | accuracy 93/128=0.727
Epoch 76/200 | Batch 10/16 | Loss: 1.0092 | accuracy 87/128=0.680
Epoch 77/200 | Batch 0/16 | Loss: 1.0739 | accuracy 78/128=0.609
Epoch 77/200 | Batch 10/16 | Loss: 1.0276 | accuracy 81/128=0.633
Epoch 78/200 | Batch 0/16 | Loss: 0.9078 | accuracy 91/128=0.711
Epoch 78/200 | Batch 10/16 | Loss: 0.9602 | accuracy 80/128=0.625
Epoch 79/200 | Batch 0/16 | Loss: 0.9347 | accuracy 85/128=0.664
Epoch 79/200 | Batch 10/16 | Loss: 0.9257 | accuracy 87/128=0.680
Epoch 80/200 | Batch 0/16 | Loss: 1.0276 | accuracy 84/128=0.656
Epoch 80/200 | Batch 10/16 | Loss: 0.8795 | accuracy 88/128=0.688
checkpoint saved : ./model//check.epoch80
Epoch 81/200 | Batch 0/16 | Loss: 0.7719 | accuracy 96/128=0.750
Epoch 81/200 | Batch 10/16 | Loss: 0.9031 | accuracy 90/128=0.703
Epoch 82/200 | Batch 0/16 | Loss: 0.8802 | accuracy 91/128=0.711
Epoch 82/200 | Batch 10/16 | Loss: 0.8708 | accuracy 88/128=0.688
Epoch 83/200 | Batch 0/16 | Loss: 0.8398 | accuracy 91/128=0.711
Epoch 83/200 | Batch 10/16 | Loss: 0.7149 | accuracy 99/128=0.773
Epoch 84/200 | Batch 0/16 | Loss: 0.7306 | accuracy 101/128=0.789
Epoch 84/200 | Batch 10/16 | Loss: 0.8610 | accuracy 92/128=0.719
Epoch 85/200 | Batch 0/16 | Loss: 0.8118 | accuracy 92/128=0.719
Epoch 85/200 | Batch 10/16 | Loss: 0.8698 | accuracy 94/128=0.734
Epoch 86/200 | Batch 0/16 | Loss: 0.7987 | accuracy 93/128=0.727
Epoch 86/200 | Batch 10/16 | Loss: 0.7173 | accuracy 101/128=0.789
Epoch 87/200 | Batch 0/16 | Loss: 0.7868 | accuracy 93/128=0.727
Epoch 87/200 | Batch 10/16 | Loss: 0.9372 | accuracy 80/128=0.625
Epoch 88/200 | Batch 0/16 | Loss: 0.8355 | accuracy 91/128=0.711
Epoch 88/200 | Batch 10/16 | Loss: 0.7740 | accuracy 93/128=0.727
Epoch 89/200 | Batch 0/16 | Loss: 0.8853 | accuracy 86/128=0.672
Epoch 89/200 | Batch 10/16 | Loss: 0.7612 | accuracy 91/128=0.711
Epoch 90/200 | Batch 0/16 | Loss: 0.6926 | accuracy 99/128=0.773
Epoch 90/200 | Batch 10/16 | Loss: 0.6736 | accuracy 97/128=0.758
checkpoint saved : ./model//check.epoch90
Epoch 91/200 | Batch 0/16 | Loss: 0.7096 | accuracy 95/128=0.742
Epoch 91/200 | Batch 10/16 | Loss: 0.7188 | accuracy 103/128=0.805
Epoch 92/200 | Batch 0/16 | Loss: 0.7054 | accuracy 96/128=0.750
Epoch 92/200 | Batch 10/16 | Loss: 0.6021 | accuracy 110/128=0.859
Epoch 93/200 | Batch 0/16 | Loss: 0.7780 | accuracy 96/128=0.750
Epoch 93/200 | Batch 10/16 | Loss: 0.7090 | accuracy 103/128=0.805
Epoch 94/200 | Batch 0/16 | Loss: 0.6440 | accuracy 102/128=0.797
Epoch 94/200 | Batch 10/16 | Loss: 0.8302 | accuracy 88/128=0.688
Epoch 95/200 | Batch 0/16 | Loss: 0.7757 | accuracy 96/128=0.750
Epoch 95/200 | Batch 10/16 | Loss: 0.6106 | accuracy 104/128=0.812
Epoch 96/200 | Batch 0/16 | Loss: 0.6474 | accuracy 96/128=0.750
Epoch 96/200 | Batch 10/16 | Loss: 0.6675 | accuracy 102/128=0.797
Epoch 97/200 | Batch 0/16 | Loss: 0.5350 | accuracy 106/128=0.828
Epoch 97/200 | Batch 10/16 | Loss: 0.8105 | accuracy 93/128=0.727
Epoch 98/200 | Batch 0/16 | Loss: 0.7731 | accuracy 87/128=0.680
Epoch 98/200 | Batch 10/16 | Loss: 0.6888 | accuracy 96/128=0.750
Epoch 99/200 | Batch 0/16 | Loss: 0.6044 | accuracy 106/128=0.828
Epoch 99/200 | Batch 10/16 | Loss: 0.5313 | accuracy 101/128=0.789
Epoch 100/200 | Batch 0/16 | Loss: 0.7274 | accuracy 96/128=0.750
Epoch 100/200 | Batch 10/16 | Loss: 0.6472 | accuracy 100/128=0.781
checkpoint saved : ./model//check.epoch100
Epoch 101/200 | Batch 0/16 | Loss: 0.6915 | accuracy 98/128=0.766
Epoch 101/200 | Batch 10/16 | Loss: 0.5370 | accuracy 109/128=0.852
Epoch 102/200 | Batch 0/16 | Loss: 0.5760 | accuracy 104/128=0.812
Epoch 102/200 | Batch 10/16 | Loss: 0.7622 | accuracy 93/128=0.727
Epoch 103/200 | Batch 0/16 | Loss: 0.5385 | accuracy 102/128=0.797
Epoch 103/200 | Batch 10/16 | Loss: 0.6802 | accuracy 103/128=0.805
Epoch 104/200 | Batch 0/16 | Loss: 0.5285 | accuracy 110/128=0.859
Epoch 104/200 | Batch 10/16 | Loss: 0.5555 | accuracy 110/128=0.859
Epoch 105/200 | Batch 0/16 | Loss: 0.6075 | accuracy 102/128=0.797
Epoch 105/200 | Batch 10/16 | Loss: 0.5659 | accuracy 101/128=0.789
Epoch 106/200 | Batch 0/16 | Loss: 0.4936 | accuracy 108/128=0.844
Epoch 106/200 | Batch 10/16 | Loss: 0.6707 | accuracy 102/128=0.797
Epoch 107/200 | Batch 0/16 | Loss: 0.5391 | accuracy 105/128=0.820
Epoch 107/200 | Batch 10/16 | Loss: 0.4698 | accuracy 105/128=0.820
Epoch 108/200 | Batch 0/16 | Loss: 0.4267 | accuracy 108/128=0.844
Epoch 108/200 | Batch 10/16 | Loss: 0.5509 | accuracy 102/128=0.797
Epoch 109/200 | Batch 0/16 | Loss: 0.4462 | accuracy 107/128=0.836
Epoch 109/200 | Batch 10/16 | Loss: 0.5380 | accuracy 105/128=0.820
Epoch 110/200 | Batch 0/16 | Loss: 0.4637 | accuracy 110/128=0.859
Epoch 110/200 | Batch 10/16 | Loss: 0.4375 | accuracy 109/128=0.852
checkpoint saved : ./model//check.epoch110
Epoch 111/200 | Batch 0/16 | Loss: 0.5567 | accuracy 105/128=0.820
Epoch 111/200 | Batch 10/16 | Loss: 0.4808 | accuracy 108/128=0.844
Epoch 112/200 | Batch 0/16 | Loss: 0.4961 | accuracy 109/128=0.852
Epoch 112/200 | Batch 10/16 | Loss: 0.5008 | accuracy 104/128=0.812
Epoch 113/200 | Batch 0/16 | Loss: 0.4603 | accuracy 112/128=0.875
Epoch 113/200 | Batch 10/16 | Loss: 0.4817 | accuracy 108/128=0.844
Epoch 114/200 | Batch 0/16 | Loss: 0.3971 | accuracy 111/128=0.867
Epoch 114/200 | Batch 10/16 | Loss: 0.4703 | accuracy 105/128=0.820
Epoch 115/200 | Batch 0/16 | Loss: 0.5089 | accuracy 102/128=0.797
Epoch 115/200 | Batch 10/16 | Loss: 0.4242 | accuracy 112/128=0.875
Epoch 116/200 | Batch 0/16 | Loss: 0.5037 | accuracy 103/128=0.805
Epoch 116/200 | Batch 10/16 | Loss: 0.4972 | accuracy 102/128=0.797
Epoch 117/200 | Batch 0/16 | Loss: 0.4382 | accuracy 109/128=0.852
Epoch 117/200 | Batch 10/16 | Loss: 0.3487 | accuracy 116/128=0.906
Epoch 118/200 | Batch 0/16 | Loss: 0.3746 | accuracy 112/128=0.875
Epoch 118/200 | Batch 10/16 | Loss: 0.3572 | accuracy 114/128=0.891
Epoch 119/200 | Batch 0/16 | Loss: 0.3941 | accuracy 110/128=0.859
Epoch 119/200 | Batch 10/16 | Loss: 0.4587 | accuracy 110/128=0.859
Epoch 120/200 | Batch 0/16 | Loss: 0.3700 | accuracy 114/128=0.891
Epoch 120/200 | Batch 10/16 | Loss: 0.3846 | accuracy 112/128=0.875
checkpoint saved : ./model//check.epoch120
Epoch 121/200 | Batch 0/16 | Loss: 0.4735 | accuracy 110/128=0.859
Epoch 121/200 | Batch 10/16 | Loss: 0.5561 | accuracy 104/128=0.812
Epoch 122/200 | Batch 0/16 | Loss: 0.3554 | accuracy 115/128=0.898
Epoch 122/200 | Batch 10/16 | Loss: 0.4541 | accuracy 113/128=0.883
Epoch 123/200 | Batch 0/16 | Loss: 0.4274 | accuracy 110/128=0.859
Epoch 123/200 | Batch 10/16 | Loss: 0.3901 | accuracy 112/128=0.875
Epoch 124/200 | Batch 0/16 | Loss: 0.3440 | accuracy 118/128=0.922
Epoch 124/200 | Batch 10/16 | Loss: 0.3341 | accuracy 113/128=0.883
Epoch 125/200 | Batch 0/16 | Loss: 0.3978 | accuracy 111/128=0.867
Epoch 125/200 | Batch 10/16 | Loss: 0.4012 | accuracy 113/128=0.883
Epoch 126/200 | Batch 0/16 | Loss: 0.3910 | accuracy 114/128=0.891
Epoch 126/200 | Batch 10/16 | Loss: 0.4164 | accuracy 113/128=0.883
Epoch 127/200 | Batch 0/16 | Loss: 0.3342 | accuracy 114/128=0.891
Epoch 127/200 | Batch 10/16 | Loss: 0.3473 | accuracy 120/128=0.938
Epoch 128/200 | Batch 0/16 | Loss: 0.3794 | accuracy 111/128=0.867
Epoch 128/200 | Batch 10/16 | Loss: 0.4186 | accuracy 110/128=0.859
Epoch 129/200 | Batch 0/16 | Loss: 0.3165 | accuracy 117/128=0.914
Epoch 129/200 | Batch 10/16 | Loss: 0.3586 | accuracy 112/128=0.875
Epoch 130/200 | Batch 0/16 | Loss: 0.3648 | accuracy 113/128=0.883
Epoch 130/200 | Batch 10/16 | Loss: 0.4095 | accuracy 115/128=0.898
checkpoint saved : ./model//check.epoch130
Epoch 131/200 | Batch 0/16 | Loss: 0.3751 | accuracy 114/128=0.891
Epoch 131/200 | Batch 10/16 | Loss: 0.2695 | accuracy 122/128=0.953
Epoch 132/200 | Batch 0/16 | Loss: 0.3491 | accuracy 115/128=0.898
Epoch 132/200 | Batch 10/16 | Loss: 0.2876 | accuracy 118/128=0.922
Epoch 133/200 | Batch 0/16 | Loss: 0.3161 | accuracy 116/128=0.906
Epoch 133/200 | Batch 10/16 | Loss: 0.3067 | accuracy 115/128=0.898
Epoch 134/200 | Batch 0/16 | Loss: 0.3532 | accuracy 117/128=0.914
Epoch 134/200 | Batch 10/16 | Loss: 0.3171 | accuracy 116/128=0.906
Epoch 135/200 | Batch 0/16 | Loss: 0.3430 | accuracy 113/128=0.883
Epoch 135/200 | Batch 10/16 | Loss: 0.3494 | accuracy 116/128=0.906
Epoch 136/200 | Batch 0/16 | Loss: 0.3088 | accuracy 116/128=0.906
Epoch 136/200 | Batch 10/16 | Loss: 0.3662 | accuracy 115/128=0.898
Epoch 137/200 | Batch 0/16 | Loss: 0.3178 | accuracy 117/128=0.914
Epoch 137/200 | Batch 10/16 | Loss: 0.4010 | accuracy 112/128=0.875
Epoch 138/200 | Batch 0/16 | Loss: 0.3349 | accuracy 114/128=0.891
Epoch 138/200 | Batch 10/16 | Loss: 0.3311 | accuracy 114/128=0.891
Epoch 139/200 | Batch 0/16 | Loss: 0.3263 | accuracy 115/128=0.898
Epoch 139/200 | Batch 10/16 | Loss: 0.3045 | accuracy 117/128=0.914
Epoch 140/200 | Batch 0/16 | Loss: 0.2755 | accuracy 117/128=0.914
Epoch 140/200 | Batch 10/16 | Loss: 0.2942 | accuracy 116/128=0.906
checkpoint saved : ./model//check.epoch140
Epoch 141/200 | Batch 0/16 | Loss: 0.2904 | accuracy 115/128=0.898
Epoch 141/200 | Batch 10/16 | Loss: 0.2317 | accuracy 121/128=0.945
Epoch 142/200 | Batch 0/16 | Loss: 0.4009 | accuracy 112/128=0.875
Epoch 142/200 | Batch 10/16 | Loss: 0.2950 | accuracy 117/128=0.914
Epoch 143/200 | Batch 0/16 | Loss: 0.2833 | accuracy 114/128=0.891
Epoch 143/200 | Batch 10/16 | Loss: 0.2006 | accuracy 121/128=0.945
Epoch 144/200 | Batch 0/16 | Loss: 0.3718 | accuracy 117/128=0.914
Epoch 144/200 | Batch 10/16 | Loss: 0.4305 | accuracy 106/128=0.828
Epoch 145/200 | Batch 0/16 | Loss: 0.2323 | accuracy 118/128=0.922
Epoch 145/200 | Batch 10/16 | Loss: 0.2974 | accuracy 120/128=0.938
Epoch 146/200 | Batch 0/16 | Loss: 0.2393 | accuracy 120/128=0.938
Epoch 146/200 | Batch 10/16 | Loss: 0.2414 | accuracy 120/128=0.938
Epoch 147/200 | Batch 0/16 | Loss: 0.2520 | accuracy 117/128=0.914
Epoch 147/200 | Batch 10/16 | Loss: 0.1956 | accuracy 123/128=0.961
Epoch 148/200 | Batch 0/16 | Loss: 0.3122 | accuracy 112/128=0.875
Epoch 148/200 | Batch 10/16 | Loss: 0.2806 | accuracy 119/128=0.930
Epoch 149/200 | Batch 0/16 | Loss: 0.2155 | accuracy 120/128=0.938
Epoch 149/200 | Batch 10/16 | Loss: 0.2039 | accuracy 119/128=0.930
Epoch 150/200 | Batch 0/16 | Loss: 0.2909 | accuracy 115/128=0.898
Epoch 150/200 | Batch 10/16 | Loss: 0.2923 | accuracy 119/128=0.930
checkpoint saved : ./model//check.epoch150
Epoch 151/200 | Batch 0/16 | Loss: 0.2236 | accuracy 119/128=0.930
Epoch 151/200 | Batch 10/16 | Loss: 0.2395 | accuracy 116/128=0.906
Epoch 152/200 | Batch 0/16 | Loss: 0.2158 | accuracy 122/128=0.953
Epoch 152/200 | Batch 10/16 | Loss: 0.3395 | accuracy 115/128=0.898
Epoch 153/200 | Batch 0/16 | Loss: 0.1672 | accuracy 122/128=0.953
Epoch 153/200 | Batch 10/16 | Loss: 0.2050 | accuracy 122/128=0.953
Epoch 154/200 | Batch 0/16 | Loss: 0.1663 | accuracy 123/128=0.961
Epoch 154/200 | Batch 10/16 | Loss: 0.3110 | accuracy 115/128=0.898
Epoch 155/200 | Batch 0/16 | Loss: 0.2082 | accuracy 121/128=0.945
Epoch 155/200 | Batch 10/16 | Loss: 0.1615 | accuracy 126/128=0.984
Epoch 156/200 | Batch 0/16 | Loss: 0.1987 | accuracy 120/128=0.938
Epoch 156/200 | Batch 10/16 | Loss: 0.2378 | accuracy 120/128=0.938
Epoch 157/200 | Batch 0/16 | Loss: 0.2627 | accuracy 119/128=0.930
Epoch 157/200 | Batch 10/16 | Loss: 0.2107 | accuracy 119/128=0.930
Epoch 158/200 | Batch 0/16 | Loss: 0.2405 | accuracy 117/128=0.914
Epoch 158/200 | Batch 10/16 | Loss: 0.1911 | accuracy 121/128=0.945
Epoch 159/200 | Batch 0/16 | Loss: 0.2335 | accuracy 116/128=0.906
Epoch 159/200 | Batch 10/16 | Loss: 0.1842 | accuracy 124/128=0.969
Epoch 160/200 | Batch 0/16 | Loss: 0.1570 | accuracy 122/128=0.953
Epoch 160/200 | Batch 10/16 | Loss: 0.2303 | accuracy 118/128=0.922
checkpoint saved : ./model//check.epoch160
Epoch 161/200 | Batch 0/16 | Loss: 0.1888 | accuracy 122/128=0.953
Epoch 161/200 | Batch 10/16 | Loss: 0.1389 | accuracy 123/128=0.961
Epoch 162/200 | Batch 0/16 | Loss: 0.2047 | accuracy 121/128=0.945
Epoch 162/200 | Batch 10/16 | Loss: 0.1748 | accuracy 120/128=0.938
Epoch 163/200 | Batch 0/16 | Loss: 0.1451 | accuracy 124/128=0.969
Epoch 163/200 | Batch 10/16 | Loss: 0.1395 | accuracy 124/128=0.969
Epoch 164/200 | Batch 0/16 | Loss: 0.1824 | accuracy 120/128=0.938
Epoch 164/200 | Batch 10/16 | Loss: 0.1795 | accuracy 120/128=0.938
Epoch 165/200 | Batch 0/16 | Loss: 0.1478 | accuracy 123/128=0.961
Epoch 165/200 | Batch 10/16 | Loss: 0.1997 | accuracy 123/128=0.961
Epoch 166/200 | Batch 0/16 | Loss: 0.1808 | accuracy 120/128=0.938
Epoch 166/200 | Batch 10/16 | Loss: 0.1875 | accuracy 119/128=0.930
Epoch 167/200 | Batch 0/16 | Loss: 0.1764 | accuracy 118/128=0.922
Epoch 167/200 | Batch 10/16 | Loss: 0.1592 | accuracy 124/128=0.969
Epoch 168/200 | Batch 0/16 | Loss: 0.2030 | accuracy 118/128=0.922
Epoch 168/200 | Batch 10/16 | Loss: 0.1260 | accuracy 123/128=0.961
Epoch 169/200 | Batch 0/16 | Loss: 0.1836 | accuracy 119/128=0.930
Epoch 169/200 | Batch 10/16 | Loss: 0.2194 | accuracy 120/128=0.938
Epoch 170/200 | Batch 0/16 | Loss: 0.2251 | accuracy 120/128=0.938
Epoch 170/200 | Batch 10/16 | Loss: 0.1552 | accuracy 123/128=0.961
checkpoint saved : ./model//check.epoch170
Epoch 171/200 | Batch 0/16 | Loss: 0.0859 | accuracy 127/128=0.992
Epoch 171/200 | Batch 10/16 | Loss: 0.1966 | accuracy 121/128=0.945
Epoch 172/200 | Batch 0/16 | Loss: 0.1674 | accuracy 120/128=0.938
Epoch 172/200 | Batch 10/16 | Loss: 0.1515 | accuracy 124/128=0.969
Epoch 173/200 | Batch 0/16 | Loss: 0.1992 | accuracy 115/128=0.898
Epoch 173/200 | Batch 10/16 | Loss: 0.1338 | accuracy 123/128=0.961
Epoch 174/200 | Batch 0/16 | Loss: 0.1419 | accuracy 124/128=0.969
Epoch 174/200 | Batch 10/16 | Loss: 0.1699 | accuracy 121/128=0.945
Epoch 175/200 | Batch 0/16 | Loss: 0.2120 | accuracy 120/128=0.938
Epoch 175/200 | Batch 10/16 | Loss: 0.2010 | accuracy 119/128=0.930
Epoch 176/200 | Batch 0/16 | Loss: 0.2256 | accuracy 120/128=0.938
Epoch 176/200 | Batch 10/16 | Loss: 0.1252 | accuracy 122/128=0.953
Epoch 177/200 | Batch 0/16 | Loss: 0.1566 | accuracy 123/128=0.961
Epoch 177/200 | Batch 10/16 | Loss: 0.1291 | accuracy 122/128=0.953
Epoch 178/200 | Batch 0/16 | Loss: 0.1606 | accuracy 120/128=0.938
Epoch 178/200 | Batch 10/16 | Loss: 0.1472 | accuracy 125/128=0.977
Epoch 179/200 | Batch 0/16 | Loss: 0.1642 | accuracy 121/128=0.945
Epoch 179/200 | Batch 10/16 | Loss: 0.1051 | accuracy 125/128=0.977
Epoch 180/200 | Batch 0/16 | Loss: 0.2038 | accuracy 121/128=0.945
Epoch 180/200 | Batch 10/16 | Loss: 0.1333 | accuracy 122/128=0.953
checkpoint saved : ./model//check.epoch180
Epoch 181/200 | Batch 0/16 | Loss: 0.2143 | accuracy 120/128=0.938
Epoch 181/200 | Batch 10/16 | Loss: 0.1642 | accuracy 121/128=0.945
Epoch 182/200 | Batch 0/16 | Loss: 0.1173 | accuracy 123/128=0.961
Epoch 182/200 | Batch 10/16 | Loss: 0.1296 | accuracy 125/128=0.977
Epoch 183/200 | Batch 0/16 | Loss: 0.1144 | accuracy 126/128=0.984
Epoch 183/200 | Batch 10/16 | Loss: 0.1317 | accuracy 124/128=0.969
Epoch 184/200 | Batch 0/16 | Loss: 0.1667 | accuracy 124/128=0.969
Epoch 184/200 | Batch 10/16 | Loss: 0.0716 | accuracy 126/128=0.984
Epoch 185/200 | Batch 0/16 | Loss: 0.1296 | accuracy 122/128=0.953
Epoch 185/200 | Batch 10/16 | Loss: 0.1412 | accuracy 124/128=0.969
Epoch 186/200 | Batch 0/16 | Loss: 0.1750 | accuracy 121/128=0.945
Epoch 186/200 | Batch 10/16 | Loss: 0.1369 | accuracy 121/128=0.945
Epoch 187/200 | Batch 0/16 | Loss: 0.2256 | accuracy 121/128=0.945
Epoch 187/200 | Batch 10/16 | Loss: 0.1291 | accuracy 122/128=0.953
Epoch 188/200 | Batch 0/16 | Loss: 0.1657 | accuracy 120/128=0.938
Epoch 188/200 | Batch 10/16 | Loss: 0.0768 | accuracy 126/128=0.984
Epoch 189/200 | Batch 0/16 | Loss: 0.1616 | accuracy 122/128=0.953
Epoch 189/200 | Batch 10/16 | Loss: 0.1312 | accuracy 121/128=0.945
Epoch 190/200 | Batch 0/16 | Loss: 0.1196 | accuracy 126/128=0.984
Epoch 190/200 | Batch 10/16 | Loss: 0.0910 | accuracy 128/128=1.000
checkpoint saved : ./model//check.epoch190
Epoch 191/200 | Batch 0/16 | Loss: 0.1195 | accuracy 123/128=0.961
Epoch 191/200 | Batch 10/16 | Loss: 0.1772 | accuracy 121/128=0.945
Epoch 192/200 | Batch 0/16 | Loss: 0.1274 | accuracy 124/128=0.969
Epoch 192/200 | Batch 10/16 | Loss: 0.1134 | accuracy 123/128=0.961
Epoch 193/200 | Batch 0/16 | Loss: 0.1581 | accuracy 123/128=0.961
Epoch 193/200 | Batch 10/16 | Loss: 0.0965 | accuracy 126/128=0.984
Epoch 194/200 | Batch 0/16 | Loss: 0.1425 | accuracy 123/128=0.961
Epoch 194/200 | Batch 10/16 | Loss: 0.1087 | accuracy 124/128=0.969
Epoch 195/200 | Batch 0/16 | Loss: 0.1437 | accuracy 122/128=0.953
Epoch 195/200 | Batch 10/16 | Loss: 0.1568 | accuracy 123/128=0.961
Epoch 196/200 | Batch 0/16 | Loss: 0.0746 | accuracy 127/128=0.992
Epoch 196/200 | Batch 10/16 | Loss: 0.1321 | accuracy 124/128=0.969
Epoch 197/200 | Batch 0/16 | Loss: 0.1514 | accuracy 121/128=0.945
Epoch 197/200 | Batch 10/16 | Loss: 0.1016 | accuracy 126/128=0.984
Epoch 198/200 | Batch 0/16 | Loss: 0.1348 | accuracy 123/128=0.961
Epoch 198/200 | Batch 10/16 | Loss: 0.1297 | accuracy 123/128=0.961
Epoch 199/200 | Batch 0/16 | Loss: 0.1765 | accuracy 121/128=0.945
Epoch 199/200 | Batch 10/16 | Loss: 0.1166 | accuracy 122/128=0.953
Epoch 200/200 | Batch 0/16 | Loss: 0.0859 | accuracy 126/128=0.984
Epoch 200/200 | Batch 10/16 | Loss: 0.1667 | accuracy 121/128=0.945
checkpoint saved : ./model//check.epoch200
model saved : ./model//captcha.1digit.2kProcess finished with exit code 0
D:\Python310\python.exe D:/project/PycharmProjects/CNN/test.py
resize_height = 128
resize_width = 128
test_data_path = ./data/test-digit/
characters = 0123456789
digit_num = 1
class_num = 10
test_model_path = ./model/captcha.1digit.2ktest accuracy = 859 / 1000 = 0.859
4 参考资料
相关文章:
AI-CNN-验证码识别
1 需求 GitHub - xhh890921/cnn-captcha-pytorch: 小黑黑讲AI,AI实战项目《验证码识别》 2 接口 3 示例 config.json {"train_data_path": "./data/train-digit/","test_data_path": "./data/test-digit/","train_…...

React 前端框架全面教程:从入门到进阶
React 前端框架全面教程:从入门到进阶 引言 在现代前端开发中,React 作为一款流行的 JavaScript 库,以其组件化、声明式的特性和强大的生态系统,成为了开发者的首选。无论是构建单页应用(SPA)还是复杂的用…...

重拾CSS,前端样式精读-布局(弹性盒)
前言 本文收录于CSS系列文章中,欢迎阅读指正 接着上篇布局文章继续介绍当前流行的布局方式 Flexbox布局 长久以来,CSS 布局中唯一可靠且跨浏览器兼容的创建工具只有floats和positioning。这两个工具大部分情况下都很好使,但是在某些方面它…...

Python 使用 LSTM 进行情感分析:处理文本序列数据的指南
使用 LSTM 进行情感分析:处理文本序列数据的指南 长短期记忆网络(LSTM)是一种适合处理序列数据的深度学习模型,广泛应用于情感分析、语音识别、文本生成等领域。它通过在训练过程中“记住”过去的数据特征来理解和预测序列数据的…...
MySQL:INSERT IGNORE 语句的用法
INSERT IGNORE 语句 在MySQL中,INSERT IGNORE 语句用于尝试向表中插入一行数据,但如果插入操作会导致表中唯一索引或主键的冲突,MySQL将忽略该操作并继续执行,而不会引发错误。这意味着,如果表中已经存在具有相同唯一…...

java模拟进程调度
先来先服务优先级调度短作业优先调度响应比优先调度 代码 import java.util.ArrayList; import java.util.Comparator; import java.util.List; import java.util.Scanner;class Main {static class tasks{int id;//序号char jinchengname;//进程名int jinchengId;//double a…...
大模型AI在教育领域有哪些创业机会?
大模型AI在教育领域有很多创业机会,尤其是在个性化学习、教学辅助、教育资源优化等方面。以下是一些潜在的创业机会: 个性化学习平台 学习路径定制:根据学生的学习数据与兴趣,为他们设计个性化的学习路径,提供适合的课…...

网页上视频没有提供下载权限怎么办?
以腾讯会议录屏没有提供下载权限为例,该怎么办呢? 最好的办法就是找到管理员,开启下载权限。如果找不到呢,那就用这个办法下载。 1.打开Microsoft Edge浏览器的扩展 2.搜索“视频下载”,选择“视频下载Pro” 3.点击“…...
【去哪里找开源商城项目】
有很多途径可以找到开源项目,以下是一些常用的方法: 开源代码托管平台:许多开源项目都托管在平台上,例如GitHub、GitLab和Bitbucket。你可以在这些平台上浏览项目,搜索关键词,查看项目的星级和贡献者数量等…...

ei会议检索:第二届网络、通信与智能计算国际会议(NCIC 2024)
第二届网络、通信与智能计算国际会议(NCIC 2024)将于2024年11月22-25日在北京信息科技大学召开,聚焦网络、通信与智能计算,欢迎国内外学者投稿交流,录用文章将在Springer出版,并提交EI等检索。 NCIC 2024&a…...
vue添加省市区
主要参考“element”框架:Element - The worlds most popular Vue UI framework <div class"block"><span class"demonstration">默认 click 触发子菜单</span><el-cascaderv-model"value":options"optio…...
运维监控丨16条常用的Kafka看板监控配置与告警规则
本期我们针对企业运维监控的场景,介绍一些监控配置和告警规则。可以根据Kafka集群和业务的具体要求,灵活调整和扩展这些监控配置及告警规则。在实际应用场景中,需要综合运用多种监控工具(例如Prometheus、Grafana、Zabbix等&#…...

ECharts饼图,配置标注示例
const color ["#00FFB4", "#5498FD", "#6F54FD", "#FD5454", "#FDA354",]const datas [{ value: 100, name: "一年级" },{ value: 70, name: "二年级" },{ value: 184, name: "三年级" },{…...

【大象数据集】大象图像识别 目标检测 机器视觉(含数据集)
一、背景意义 在信息时代,数据的收集和分析技术得到了飞速发展。深度学习算法的出现,为处理和分析这些复杂的鱼类数据集提供了强大的工具。深度学习具有强大的模式识别和特征提取能力,能够从海量的数据中自动学习和发现规律,为鱼…...

LN 在 LLMs 中的不同位置 有什么区别么
Layer Normalization(LN)是一种在深度学习中用于稳定和加速神经网络训练的归一化技术。它通过对单个样本的所有激活进行归一化来工作,与Batch Normalization(BN)不同,BN是对一个mini-batch中的所有样本的激…...
【代码随想录Day57】图论Part08
拓扑排序精讲 题目链接/文章讲解:代码随想录 import java.util.*;public class Main {public static void main(String[] args) {Scanner scanner new Scanner(System.in);// 读取文件数量 n 和依赖关系数量 mint n scanner.nextInt();int m scanner.nextInt()…...

记录一次mmpretrain训练数据并转onnx推理
目录 1.前言 2.代码 3.数据形态【分类用】 4.配置文件 5.训练 6.测试-分析-混淆矩阵等等,测试图片效果等 7.导出onnx 8.onnx推理 9.docker环境简单补充 1.前言 好久没有做图像分类了,于是想用商汤的mmclassification快速搞一波,发现已…...

shodan5,参数使用,批量查找Mongodb未授权登录,jenkins批量挖掘
查找美国安全局漏洞 nww.nsa.gov(美国安全局官方网站) net参数使用 搜索指定的ip网段 shodan search --limit 10 --fields ip_str,port net:208.88.84.0/24 (老美国家安全局的一个网段)可能直接访问不太行,可以使用host参数,得到域名再去…...

telnet 密码模式 访问路由器
telnet 密码访问华为路由器 模拟被访问路由 sy [Huawei]int g0/0/0 //选中 g0/0/0端口 [Huawei-GigabitEthernet0/0/0]ip add 192.168.1.1 24 //设置端口ip [Huawei]user-interface vty 0 4 //配置vty [Huawei-ui-vty0-4]set authentication password cipher huawei123 //设置…...

文心一言 VS 讯飞星火 VS chatgpt (380)-- 算法导论24.4 12题
十二、给出一个有效算法来解决 A x ⩽ b Ax⩽b Ax⩽b 的差分约束系统,这里 b b b 的所有元素为实数,而变量 x i x_i xi 中某个给定的子集是整数。如果要写代码,请用go语言。 文心一言: 差分约束系统问题通常用于解决带有约…...
【根据当天日期输出明天的日期(需对闰年做判定)。】2022-5-15
缘由根据当天日期输出明天的日期(需对闰年做判定)。日期类型结构体如下: struct data{ int year; int month; int day;};-编程语言-CSDN问答 struct mdata{ int year; int month; int day; }mdata; int 天数(int year, int month) {switch (month){case 1: case 3:…...
FFmpeg 低延迟同屏方案
引言 在实时互动需求激增的当下,无论是在线教育中的师生同屏演示、远程办公的屏幕共享协作,还是游戏直播的画面实时传输,低延迟同屏已成为保障用户体验的核心指标。FFmpeg 作为一款功能强大的多媒体框架,凭借其灵活的编解码、数据…...

蓝桥杯3498 01串的熵
问题描述 对于一个长度为 23333333的 01 串, 如果其信息熵为 11625907.5798, 且 0 出现次数比 1 少, 那么这个 01 串中 0 出现了多少次? #include<iostream> #include<cmath> using namespace std;int n 23333333;int main() {//枚举 0 出现的次数//因…...
LeetCode - 199. 二叉树的右视图
题目 199. 二叉树的右视图 - 力扣(LeetCode) 思路 右视图是指从树的右侧看,对于每一层,只能看到该层最右边的节点。实现思路是: 使用深度优先搜索(DFS)按照"根-右-左"的顺序遍历树记录每个节点的深度对于…...
LangChain知识库管理后端接口:数据库操作详解—— 构建本地知识库系统的基础《二》
这段 Python 代码是一个完整的 知识库数据库操作模块,用于对本地知识库系统中的知识库进行增删改查(CRUD)操作。它基于 SQLAlchemy ORM 框架 和一个自定义的装饰器 with_session 实现数据库会话管理。 📘 一、整体功能概述 该模块…...
WebRTC从入门到实践 - 零基础教程
WebRTC从入门到实践 - 零基础教程 目录 WebRTC简介 基础概念 工作原理 开发环境搭建 基础实践 三个实战案例 常见问题解答 1. WebRTC简介 1.1 什么是WebRTC? WebRTC(Web Real-Time Communication)是一个支持网页浏览器进行实时语音…...

【Linux】Linux安装并配置RabbitMQ
目录 1. 安装 Erlang 2. 安装 RabbitMQ 2.1.添加 RabbitMQ 仓库 2.2.安装 RabbitMQ 3.配置 3.1.启动和管理服务 4. 访问管理界面 5.安装问题 6.修改密码 7.修改端口 7.1.找到文件 7.2.修改文件 1. 安装 Erlang 由于 RabbitMQ 是用 Erlang 编写的,需要先安…...
智能职业发展系统:AI驱动的职业规划平台技术解析
智能职业发展系统:AI驱动的职业规划平台技术解析 引言:数字时代的职业革命 在当今瞬息万变的就业市场中,传统的职业规划方法已无法满足个人和企业的需求。据统计,全球每年有超过2亿人面临职业转型困境,而企业也因此遭…...

动态规划-1035.不相交的线-力扣(LeetCode)
一、题目解析 光看题目要求和例图,感觉这题好麻烦,直线不能相交啊,每个数字只属于一条连线啊等等,但我们结合题目所给的信息和例图的内容,这不就是最长公共子序列吗?,我们把最长公共子序列连线起…...
手动给中文分词和 直接用神经网络RNN做有什么区别
手动分词和基于神经网络(如 RNN)的自动分词在原理、实现方式和效果上有显著差异,以下是核心对比: 1. 实现原理对比 对比维度手动分词(规则 / 词典驱动)神经网络 RNN 分词(数据驱动)…...