使用PyTorch高效读取二进制数据集进行训练

使用pickle制作类cifar10二进制格式的数据集

使用pytorc框架来训练(以猫狗大战数据集为例)

此方法是为了实现阿里云PAI studio上可视化训练模型时使用的数据格式。

一、制作类cifar10二进制格式数据

import os, cv2
from pickled import *
from load_data import *

data_path = './data_n/test'
file_list = './data_n/test.txt'
save_path = './bin'

if __name__ == '__main__':
  data, label, lst = read_data(file_list, data_path, shape=128)
  pickled(save_path, data, label, lst, bin_num=1)

read_data模块

import cv2
import os
import numpy as np

DATA_LEN = 49152
CHANNEL_LEN = 16384
SHAPE = 128


def imread(im_path, shape=None, color="RGB", mode=cv2.IMREAD_UNCHANGED):
  im = cv2.imread(im_path, cv2.IMREAD_UNCHANGED)
  if color == "RGB":
    im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
    # im = np.transpose(im, [2, 1, 0])
  if shape != None:
    assert isinstance(shape, int) 
    im = cv2.resize(im, (shape, shape))
  return im


def read_data(filename, data_path, shape=None, color='RGB'):
  """
     filename (str): a file 
       data file is stored in such format:
         image_name  label
     data_path (str): image data folder
     return (numpy): a array of image and a array of label
  """ 
  if os.path.isdir(filename):
    print("Can't found data file!")
  else:
    f = open(filename)
    lines = f.read().splitlines()
    count = len(lines)
    data = np.zeros((count, DATA_LEN), dtype=np.uint8)
    #label = np.zeros(count, dtype=np.uint8)
    lst = [ln.split(' ')[0] for ln in lines]
    label = [int(ln.split(' ')[1]) for ln in lines]
    
    idx = 0
    s, c = SHAPE, CHANNEL_LEN
    for ln in lines:
      fname, lab = ln.split(' ')
      im = imread(os.path.join(data_path, fname), shape=s, color='RGB')
      '''
      im = cv2.imread(os.path.join(data_path, fname), cv2.IMREAD_UNCHANGED)
      im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
      im = cv2.resize(im, (s, s))
      '''
#      print(len(np.reshape(im[:,:,0], c))) # 1024
      data[idx, :c] = np.reshape(im[:, :, 0], c)
      data[idx, c:2*c] = np.reshape(im[:, :, 1], c)
      data[idx, 2*c:] = np.reshape(im[:, :, 2], c)
      label[idx] = int(lab)
      idx = idx + 1
      
    return data, label, lst


pickled模块

import os
import pickle

BIN_COUNTS = 5


def pickled(savepath, data, label, fnames, bin_num=BIN_COUNTS, mode="train"):
  '''
    savepath (str): save path
    data (array): image data, a nx3072 array
    label (list): image label, a list with length n
    fnames (str list): image names, a list with length n
    bin_num (int): save data in several files
    mode (str): {'train', 'test'}
  '''
  assert os.path.isdir(savepath)
  total_num = len(fnames)
  samples_per_bin = total_num / bin_num
  assert samples_per_bin > 0
  idx = 0
  for i in range(bin_num): 
    start = int(i*samples_per_bin)
    end = int((i+1)*samples_per_bin)

    print(start)
    print(end)
    
    if end <= total_num:
      dict = {'data': data[start:end, :],
              'labels': label[start:end],
              'filenames': fnames[start:end]}
    else:
      dict = {'data': data[start:, :],
              'labels': label[start:],
              'filenames': fnames[start:]}
    if mode == "train":
      dict['batch_label'] = "training batch {} of {}".format(idx, bin_num)
    else:
      dict['batch_label'] = "testing batch {} of {}".format(idx, bin_num)
      
#    with open(os.path.join(savepath, 'data_batch_'+str(idx)), 'wb') as fi:
    with open(os.path.join(savepath, 'batch_tt' + str(idx)), 'wb') as fi:
      pickle.dump(dict, fi)
    idx = idx + 1


def unpickled(filename):
  #assert os.path.isdir(filename)
  assert os.path.isfile(filename)
  with open(filename, 'rb') as fo:
    dict = pickle.load(fo)
  return dict


测试生成的二进制数据


import os
import pickle
import numpy as np
import cv2

def load_batch(fpath):
    with open(fpath, 'rb') as f:
        d = pickle.load(f)

    data = d["data"]
    labels = d["labels"]
    return data, labels


def load_data(dirname, one_hot=False):
    X_train = []
    Y_train = []

    for i in range(0):
        fpath = os.path.join(dirname, 'data_batch_' + str(i))
        print(fpath)
        data, labels = load_batch(fpath)
        if i == 0:
            X_train = data
            Y_train = labels
        else:
            X_train = np.concatenate([X_train, data], axis=0)
            Y_train = np.concatenate([Y_train, labels], axis=0)

    ftpath = os.path.join(dirname, 'batch_tt0')
    X_test, Y_test = load_batch(ftpath)

    X_test = np.dstack((X_test[:, :16384], X_test[:, 16384:32768],
                             X_test[:, 32768:]))
    X_test = np.reshape(X_test, [-1, 128, 128, 3])
    print(X_test.shape)
    xx_test = np.transpose(X_test,(0, 3, 1, 2))
    print(xx_test.shape)
#    print(X_test[2])
    imgs = X_test[2:4]
    img = imgs[1]
    print(img.shape)
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    cv2.imshow('img', img)
    cv2.waitKey(0)


if __name__ == '__main__':
    dirname = 'test'
    load_data(dirname)

二、使用制作好的数据训练

import torch
import os
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from PIL import Image
import pickle
import numpy as np

#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

def load_batch(fpath):
    with open(fpath, 'rb') as f:
        d = pickle.load(f)

    data = d["data"]
    labels = d["labels"]
    return data, labels

def load_data(dirname, one_hot=False, train=False):
    print(dirname)
    if train:
        X_train = []
        Y_train = []

        for i in range(1):
            fpath = os.path.join(dirname, 'data_batch_' + str(i))
            print(fpath)
            data, labels = load_batch(fpath)
            if i == 0:
                X_train = data
                Y_train = labels
            else:
                X_train = np.concatenate([X_train, data], axis=0)
                Y_train = np.concatenate([Y_train, labels], axis=0)

        X_train = np.dstack((X_train[:, :16384], X_train[:, 16384:32768],
                             X_train[:, 32768:]))
        X_train = np.reshape(X_train, [-1, 128, 128, 3])
 #       X_train = np.transpose(X_train, (0, 3, 1, 2))
        return X_train, Y_train

    else:
        ftpath = os.path.join(dirname, 'test_batch_0')
        print(ftpath)
        X_test, Y_test = load_batch(ftpath)
        X_test = np.dstack((X_test[:, :16384], X_test[:, 16384:32768],
                            X_test[:, 32768:]))
        X_test = np.reshape(X_test, [-1, 128, 128, 3])
        # 这里不需要转化数据格式[n, h, w, c]
 #       X_test = np.transpose(X_test, (0, 3, 1, 2))

        return X_test, Y_test


class MyDataset(torch.utils.data.Dataset):

    def __init__(self, namedir, transform=None, train=False):
        super().__init__()
        self.namedir = namedir
        self.transform = transform
        self.train = train
        self.datas, self.labels = load_data(self.namedir, train=self.train)

    def __getitem__(self, index):
#        print(index)
        imgs = self.datas[index]
#        print(imgs.shape)
#        print(imgs)
        img_labes = int(self.labels[index])
 #       print(img_labes)

        if self.transform is not None:
            imgs = self.transform(imgs)

        return imgs, img_labes

    def __len__(self):
        return len(self.labels)


class MyDataset_s(torch.utils.data.Dataset):
    def __init__(self, datatxt, transform=None):
        super().__init__()
        imgs = []
        fh = open(datatxt, 'r')

        for line in fh:
            line = line.rstrip()
            words = line.split()
            imgs.append((words[0], int(words[1])))
        self.imgs = imgs
        self.transform = transform

    def __getitem__(self, index):
        fn, label = self.imgs[index]
        img = Image.open(fn).convert('RGB')
        if self.transform is not None:
            img = self.transform(img)
        return img, label

    def __len__(self):
        return len(self.imgs)

mean = [0.5071, 0.4867, 0.4408]
stdv = [0.2675, 0.2565, 0.2761]

transform = transforms.Compose([

#    transforms.Resize([224, 224]),

#    transforms.RandomHorizontalFlip(),

    transforms.ToTensor(),

    transforms.Normalize(mean=mean, std=stdv)

])

train_data = MyDataset(namedir='data\\train\\', transform=transform, train=True)
trainloader = torch.utils.data.DataLoader(dataset=train_data, batch_size=4, shuffle=True)
test_data = MyDataset(namedir='data\\val\\', transform=transform, train=False)
testloader = torch.utils.data.DataLoader(dataset=test_data, batch_size=4, shuffle=True)

classes = ('cat', 'dog')

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)
        self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
        self.conv4 = nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1)
        #        self.conv5 = nn.Conv2d(32, 16, kernel_size=3, stride=1, padding=1)
        self.fc1 = nn.Linear(32 * 8 * 8, 256)
        self.fc2 = nn.Linear(256, 64)
        self.fc3 = nn.Linear(64, 2)

    def forward(self, x):                       # (n, 3, 128, 128)
        x = self.pool(F.relu(self.conv1(x)))    # (n, 16, 64, 64)
        x = self.pool(F.relu(self.conv2(x)))    # (n, 32, 32, 32)
        x = self.pool(F.relu(self.conv3(x)))    # (n, 64, 16, 16)
        x = self.pool(F.relu(self.conv4(x)))    # (n, 32, 8, 8)

        #        x = self.pool(F.relu(self.conv5(x)))
        # print(x)
        x = x.view(-1, 32 * 8 * 8)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)

        return x


class VGG16(nn.Module):
    def __init__(self):
        super(VGG16, self).__init__()
        # 3 * 224 * 224

        self.conv1_1 = nn.Conv2d(3, 64, 3)  # 64 * 222 * 222
        self.conv1_2 = nn.Conv2d(64, 64, 3, padding=(1, 1))  # 64 * 222 * 222
        self.maxpool1 = nn.MaxPool2d((2, 2), padding=(1, 1))  # pooling 64 * 112 * 112
        self.conv2_1 = nn.Conv2d(64, 128, 3)  # 128 * 110 * 110
        self.conv2_2 = nn.Conv2d(128, 128, 3, padding=(1, 1))  # 128 * 110 * 110
        self.maxpool2 = nn.MaxPool2d((2, 2), padding=(1, 1))  # pooling 128 * 56 * 56
        self.conv3_1 = nn.Conv2d(128, 256, 3)  # 256 * 54 * 54
        self.conv3_2 = nn.Conv2d(256, 256, 2, padding=(1, 1))  # 256 * 54 * 54
        self.conv3_3 = nn.Conv2d(256, 256, 3, padding=(1, 1))  # 256 * 54 * 54
        self.maxpool3 = nn.MaxPool2d((2, 2), padding=(1, 1))  # 256 * 28 * 28
        self.conv4_1 = nn.Conv2d(256, 512, 3)  # 512 * 26 * 26
        self.conv4_2 = nn.Conv2d(512, 512, 3, padding=(1, 1))  # 512 * 26 * 26
        self.conv4_3 = nn.Conv2d(512, 512, 3, padding=(1, 1))  # 512 * 26 * 26
        self.maxpool4 = nn.MaxPool2d((2, 2), padding=(1, 1))  # pooling 512 * 14 * 14
        self.conv5_1 = nn.Conv2d(512, 512, 3)  # 512 * 12 * 12
        self.conv5_2 = nn.Conv2d(512, 512, 3, padding=(1, 1))  # 512 * 12 * 12
        self.conv5_3 = nn.Conv2d(512, 512, 3, padding=(1, 1))  # 512 * 12 * 12
        self.maxpool5 = nn.MaxPool2d((2, 2), padding=(1, 1))  # pooling 512 * 7 * 7

        # view

        self.fc1 = nn.Linear(512 * 7 * 7, 512)
        self.fc2 = nn.Linear(512, 64)
        self.fc3 = nn.Linear(64, 2)

    def forward(self, x):
        # x.size(0)即为batch_size

        in_size = x.size(0)
        out = self.conv1_1(x)  # 222
        out = F.relu(out)
        out = self.conv1_2(out)  # 222
        out = F.relu(out)
        out = self.maxpool1(out)  # 112
        out = self.conv2_1(out)  # 110
        out = F.relu(out)
        out = self.conv2_2(out)  # 110
        out = F.relu(out)
        out = self.maxpool2(out)  # 56
        out = self.conv3_1(out)  # 54
        out = F.relu(out)
        out = self.conv3_2(out)  # 54
        out = F.relu(out)
        out = self.conv3_3(out)  # 54
        out = F.relu(out)
        out = self.maxpool3(out)  # 28
        out = self.conv4_1(out)  # 26
        out = F.relu(out)
        out = self.conv4_2(out)  # 26
        out = F.relu(out)
        out = self.conv4_3(out)  # 26
        out = F.relu(out)
        out = self.maxpool4(out)  # 14
        out = self.conv5_1(out)  # 12

        out = F.relu(out)
        out = self.conv5_2(out)  # 12
        out = F.relu(out)
        out = self.conv5_3(out)  # 12
        out = F.relu(out)
        out = self.maxpool5(out)  # 7

        # 展平

        out = out.view(in_size, -1)
        out = self.fc1(out)
        out = F.relu(out)
        out = self.fc2(out)
        out = F.relu(out)
        out = self.fc3(out)

        #       out = F.log_softmax(out, dim=1)

        return out


class VGG8(nn.Module):
    def __init__(self):
        super(VGG8, self).__init__()

        # 3 * 224 * 224
        self.conv1_1 = nn.Conv2d(3, 64, 3)  # 64 * 222 * 222
        self.maxpool1 = nn.MaxPool2d((2, 2), padding=(1, 1))  # pooling 64 * 112 * 112
        self.conv2_1 = nn.Conv2d(64, 128, 3)  # 128 * 110 * 110
        self.maxpool2 = nn.MaxPool2d((2, 2), padding=(1, 1))  # pooling 128 * 56 * 56
        self.conv3_1 = nn.Conv2d(128, 256, 3)  # 256 * 54 * 54
        self.maxpool3 = nn.MaxPool2d((2, 2), padding=(1, 1))  # 256 * 28 * 28
        self.conv4_1 = nn.Conv2d(256, 512, 3)  # 512 * 26 * 26
        self.maxpool4 = nn.MaxPool2d((2, 2), padding=(1, 1))  # pooling 512 * 14 * 14
        self.conv5_1 = nn.Conv2d(512, 512, 3)  # 512 * 12 * 12
        self.maxpool5 = nn.MaxPool2d((2, 2), padding=(1, 1))  # pooling 512 * 7 * 7

        # view
        self.fc1 = nn.Linear(512 * 7 * 7, 512)
        self.fc2 = nn.Linear(512, 64)
        self.fc3 = nn.Linear(64, 2)

    def forward(self, x):
        # x.size(0)即为batch_size
        in_size = x.size(0)
        out = self.conv1_1(x)  # 222
        out = F.relu(out)
        out = self.maxpool1(out)  # 112
        out = self.conv2_1(out)  # 110
        out = F.relu(out)
        out = self.maxpool2(out)  # 56
        out = self.conv3_1(out)  # 54
        out = F.relu(out)
        out = self.maxpool3(out)  # 28
        out = self.conv4_1(out)  # 26
        out = F.relu(out)
        out = self.maxpool4(out)  # 14
        out = self.conv5_1(out)  # 12
        out = F.relu(out)
        out = self.maxpool5(out)  # 7

        # 展平
        out = out.view(in_size, -1)
        out = self.fc1(out)
        out = F.relu(out)
        out = self.fc2(out)
        out = F.relu(out)
        out = self.fc3(out)

        #       out = F.log_softmax(out, dim=1)

        return out


net = Net()

#net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.005, momentum=0.9)

if __name__ == '__main__':
    for epoch in range(11):
        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            inputs, labels = data
#            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
            if i % 100 == 99:
                print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 100))
                running_loss = 0.0
        if epoch % 2 == 0:
            correct = 0
            total = 0
            with torch.no_grad():
                for data in testloader:
                    images, labels = data

#                    images, labels = images.to(device), labels.to(device)
                    outputs = net(images)
                    _, predicted = torch.max(outputs.data, 1)
                    total += labels.size(0)
                    correct += (predicted == labels).sum().item()
                print('Accuracy of the network on the 1000 test images: %d %%' % (100 * correct / total))

print('finished !!!')



本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mfbz.cn/a/755985.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

Webpack: 底层配置逻辑

概述 Webpack 5 提供了非常强大、灵活的模块打包功能&#xff0c;配合其成熟生态下数量庞大的插件、Loader 资源&#xff0c;已经能够满足大多数前端项目的工程化需求&#xff0c;但代价则是日益复杂、晦涩的使用方法&#xff0c;开发者通常需要根据项目环境、资源类型、编译目…

在运行中遇到扫描包问题

问题描述&#xff1a;当我们看到这个上面一行代码时就代表我们有个包没有被当前的Spring容器给扫描到&#xff0c;关于这个问题我们有两个&#xff1a;第一把整个包导进来&#xff0c;第二用哪个导哪个

人脑网络的多层建模与分析

摘要 了解人类大脑的结构及其与功能的关系&#xff0c;对于各种应用至关重要&#xff0c;包括但不限于预防、处理和治疗脑部疾病(如阿尔茨海默病或帕金森病)&#xff0c;以及精神疾病(如精神分裂症)的新方法。结构和功能神经影像学方面的最新进展&#xff0c;以及计算机科学等…

Qt开发笔记:Qt3D三维开发笔记(一):Qt3D三维开发基础概念介绍

若该文为原创文章&#xff0c;转载请注明原文出处 本文章博客地址&#xff1a;https://blog.csdn.net/qq21497936/article/details/140059315 长沙红胖子Qt&#xff08;长沙创微智科&#xff09;博文大全&#xff1a;开发技术集合&#xff08;包含Qt实用技术、树莓派、三维、O…

AI绘画:P图如此丝滑,OpenAI上线ChatGPT图像编辑功能,DallE-3绘画如此简单

大家好我是极客菌&#xff0c;用ChatGPT的DallE-3进行AI绘画对很多人来说是一个门槛很低的选择&#xff0c;现在OpenAI又重磅上线了图像编辑器功能(DallE editor)&#xff0c;可以很方便的对图片的局部进行修改&#xff0c;而且支持中文&#xff0c;主打一个功能强大且好用&…

Django —— 用户名和密码配置

创建项目ProjectA&#xff1a; django-admin startproject ProjectA cd进入ProjectA文件夹运行项目&#xff1a; python manage.py runserver 0.0.0.0:8000 Starting development server at http://0.0.0.0:8000/Quit the server with CTRL-BREAK. 访问http://localhost:80…

SET加密:电子商务安全的基石

随着电子商务的飞速发展&#xff0c;如何确保在线交易的安全性和可信度已成为消费者、商家和金融机构共同关注的焦点。SET协议&#xff08;Secure Electronic Transaction&#xff09;作为一种安全电子交易的国际标准&#xff0c;凭借其卓越的安全性能和广泛的行业认可&#xf…

c语言中extern定义和引用其他文件的变量,(sublime text)单独一个文件编译不会成功

关键字extern的作用 这个很常见的都知道是定义一个外部变量或函数&#xff0c;但并不是简单的建立两个文件&#xff0c;然后在用extern 定义在另一个非最初定义变量的文件里 区分文件和编译运行的文件 例如&#xff0c;一个文件夹里有文件a.c和文件b.c,在sublime text中直接…

重庆高校大学智能制造实验室数字孪生可视化系统平台建设项目验收

随着科技的飞速发展&#xff0c;智能制造已成为推动产业升级和经济社会发展的重要力量。在这一背景下&#xff0c;重庆高校大学智能制造实验室积极推进数字孪生可视化系统平台建设项目&#xff0c;旨在通过先进的数字化技术&#xff0c;实现实验室资源的优化配置和高效利用。 …

java虚拟机栈帧操作

虚拟机栈(Virtual Machine Stack)是虚拟机(如JVM、Python VM等)用来管理方法调用和执行的栈结构。它主要用于存储方法调用的相关信息,包括局部变量、操作数栈、动态链接和方法返回地址等。 java虚拟机栈操作的基本元素就是栈帧,栈帧主要包含了局部变量表、操作数栈、动态…

数字签名解析

1. 概述 数字签名不是手写签名的数字图像&#xff1b; 数字签名是一种可以提供认证的加密形式&#xff0c;是转向完全无纸环境的一个途径&#xff1b; 数字签名机制用以解决伪造、抵赖、冒充和篡改、完整性保护等安全问题。 2. 公钥密码与数字签名的关系 要实现数字签名&#…

【04】从0到1构建AI生成思维导图应用 -- 创建 AI 工作流

【04】从0到1构建AI生成思维导图应用 – 创建 AI 工作流 大家好&#xff01;最近自己做了一个完全免费的AI生成思维导图的网站&#xff0c;支持下载&#xff0c;编辑和对接微信公众号&#xff0c;可以在这里体验&#xff1a;https://lt2mind.zeabur.app/ 上一章&#xff1a;h…

mybatis核心配置介绍

mybatis核心配置 【mybatis全局配置介绍】 ​ mybatis-config.xml&#xff0c;是MyBatis的全局配置文件&#xff0c;包含全局配置信息&#xff0c;如数据库连接参数、插件等。整个框架中只需要一个即可。 1、mybatis全局配置文件是mybatis框架的核心配置&#xff0c;整个框架…

2024高考录取分数线一览表(含一本线、二本线、专科线)

2024年全国各地的高考录取分数线已经全部公布&#xff0c;查大学网&#xff08;www.chadaxue.com&#xff09;为大家整理全国31个省市高考录取分数线汇总&#xff0c;包括本科批&#xff08;一本分数线线和二本分数线&#xff09;、专科批和特殊类招生控制分数线汇总&#xff0…

2.ROS串口安装和调试

首先安装串口依赖 sudo apt-get install ros-melodic-serial 其次安装串口调试助手 sudo apt-get install minicom 再赋予串口权限 sudo chmod 777 /dev/ttyTHS1 打开调试助手 sudo cutecom 硬件引脚图&#xff1a;

【计算机网络仿真】b站湖科大教书匠思科Packet Tracer——实验10 IPv4地址 — 构造超网(无分类编址)

一、实验目的 1.加深对构造超网的理解&#xff1b; 二、实验要求 1.使用Cisco Packet Tracer仿真平台&#xff1b; 2.观看B站湖科大教书匠仿真实验视频&#xff0c;完成对应实验。 三、实验内容 1.构建网络拓扑&#xff1b; 2.根据各网络所指定的地址块完成以下工作&#…

如何保护磁盘数据?电脑磁盘数据怎么保护?

电脑磁盘是存储数据的基础&#xff0c;可以将各种重要数据保存在其中。为了避免数据泄露&#xff0c;我们需要保护磁盘数据。那么&#xff0c;电脑磁盘数据怎么保护呢&#xff1f;下面我们就一起来了解一下吧。 文件夹加密超级大师 文件夹加密超级大师是一款优秀的电脑数据加密…

c语言入门

c语言入门 C语言一经出现就以其功能丰富、表达能力强、灵活方便、应用面广等特点迅速在全世界普及和推广。C语言不但执行效率高而且可移植性好&#xff0c;可以用来开发应用软件、驱动、操作系统等。C语言也是其它众多高级语言的鼻祖语言&#xff0c;所以说学习C语言是进入编程…

华为仓颉编程语言正式发布,仓颉编程教程

目录 前言 基本概念 标识符 变量 类型 基础数据类型 表达式 if 表达式 while 表达式 for-in 表达式 程序结构 函数 定义函数 调用函数 lambda表达式 应用实例&#xff08;遍历目录&#xff09; 枚举 定义与实例化 成员访问规则 match表达式 应用实例&…

Python现在可以在线编程了!

你好&#xff0c;我是郭震 1 在线编程 在线编程好处&#xff1a; 1 无需安装和配置环境: 在线编程平台不需要用户在本地安装任何软件或配置开发环境。这对初学者和那些希望快速上手进行编程的人非常有利。 2 跨平台兼容性: 这些平台可以在任何具有互联网连接的设备上使用&#…