当前位置: 首页 > article >正文

rknn部署rk3588进行yolov8n-seg分割检测

1、pt-onnx:首先根据官方源代码导出onnx模型过程略2、onnx-rknn:分为如下两步执行其中环境要求安装rknn。step1import os, glob, shutil from rknn.api import RKNN input_width 640 input_height 640 model_path ./model dataset_path ./dataset config_path ./config dataset_file ./dataset.txt # model he onnxmodel dou yao gai model_name best platform rk3588 ONNX_MODEL f{model_path}/best-{input_height}-{input_width}.onnx OUT_NODE [output0,output1] def get_dataset_txt(dataset_path, dataset_savefile): file_data glob.glob(os.path.join(dataset_path,*.png)) with open(dataset_savefile, w) as f: for file in file_data: f.writelines(f{file}\n) def move_onnx_config(): file_data glob.glob(*.onnx) for file in file_data: shutil.move(file, f{config_path}/{file}) if __name__ __main__: isExist os.path.exists(dataset_path) if not isExist: os.makedirs(dataset_path) isExist os.path.exists(config_path) if not isExist: os.makedirs(config_path) # Prepare the dataset text file get_dataset_txt(dataset_path, dataset_file) # Create RKNN object rknn RKNN(verboseFalse) # pre-process config print(-- Config model) rknn.config(mean_values[[0, 0, 0]], std_values[[255, 255, 255]], target_platformplatform) print(done) # Load ONNX model print(-- Loading model) ret rknn.load_onnx(modelONNX_MODEL, outputsOUT_NODE) if ret ! 0: print(Load model failed!) exit(ret) print(done) # Build model print(-- hybrid_quantization_step1) ret rknn.hybrid_quantization_step1(datasetdataset_file, proposalFalse) if ret ! 0: print(hybrid_quantization_step1 failed!) exit(ret) print(done) rknn.release() print(-- Move hybrid quatization config into config folder) shutil.move(f{model_name}-{input_height}-{input_width}.data, f{config_path}/{model_name}-{input_height}-{input_width}.data) shutil.move(f{model_name}-{input_height}-{input_width}.model, f{config_path}/{model_name}-{input_height}-{input_width}.model) shutil.move(f{model_name}-{input_height}-{input_width}.quantization.cfg, f{config_path}/{model_name}-{input_height}-{input_width}.quantization.cfg) print(-- Move onnx config into config folder) move_onnx_config()step2:import os, shutil, numpy as np, cv2 from utils import * from rknn.api import RKNN conf_thres 0.65 iou_thres 0.65 input_width 640 input_height 640 model_name best model_path ./model config_path ./config result_path ./result image_path ./dataset/1.png video_path test.mp4 video_inference False RKNN_MODEL fbest-{input_height}-{input_width}.rknn CLASSES [lane, lane_line] if __name__ __main__: isExist os.path.exists(result_path) if not isExist: os.makedirs(result_path) # Create RKNN object rknn RKNN(verboseFalse) # Build model print(-- hybrid_quantization_step2) ret rknn.hybrid_quantization_step2(model_inputf{config_path}/{model_name}-{input_height}-{input_width}.model, data_inputf{config_path}/{model_name}-{input_height}-{input_width}.data, model_quantization_cfgf{config_path}/{model_name}-{input_height}-{input_width}.quantization.cfg) if ret ! 0: print(hybrid_quantization_step2 failed!) exit(ret) print(done) # Export rknn model print(-- Export rknn model) ret rknn.export_rknn(RKNN_MODEL) if ret ! 0: print(Export rknn model failed!) exit(ret) print(done) print(-- Move RKNN file into model folder) shutil.move(RKNN_MODEL, f{model_path}/{RKNN_MODEL}) # Init runtime environment print(-- Init runtime environment) ret rknn.init_runtime() if ret ! 0: print(Init runtime environment failed!) exit(ret) print(done) if video_inference True: cap cv2.VideoCapture(video_path) while(True): ret, image_3c cap.read() if not ret: break image_4c, image_3c preprocess(image_3c, input_height, input_width) print(-- Running model for video inference) outputs rknn.inference(inputs[image_3c]) colorlist gen_color(len(CLASSES)) results postprocess(outputs, image_4c, image_3c, conf_thres, iou_thres, classeslen(CLASSES)) ##[box,mask,shape] results results[0] ## batch1 boxes, masks, shape results if isinstance(masks, np.ndarray): mask_img, vis_img vis_result(image_3c, results, colorlist, CLASSES, result_path) cv2.imshow(mask_img, mask_img) cv2.imshow(vis_img, vis_img) else: print(No segmentation result) cv2.waitKey(10) else: # Preprocess input image image_3c cv2.imread(image_path) image_4c, image_3c preprocess(image_3c, input_height, input_width) print(-- Running model for image inference) print( image_3c.shape) outputs rknn.inference(inputs[image_3c]) colorlist gen_color(len(CLASSES)) results postprocess(outputs, image_4c, image_3c, conf_thres, iou_thres, classeslen(CLASSES)) ##[box,mask,shape] results results[0] ## batch1 boxes, masks, shape results if isinstance(masks, np.ndarray): mask_img, vis_img vis_result(image_3c, results, colorlist, CLASSES, result_path) print(-- Save inference result) else: print(No segmentation result) print(RKNN inference finish) rknn.release() cv2.destroyAllWindows()得到转换后的模型.rknn3、终端安装rknnlite推理代码详细如下rknnlite_inference.py:import os, cv2, time, numpy as np from utils import * from rknnlite.api import RKNNLite conf_thres 0.65 iou_thres 0.95 input_width 640 input_height 640 model_name green model_path ./model config_path ./config result_path ./result image_path ./dataset/0000.png video_path 3.mp4 video_inference False RKNN_MODEL f./model/green-640-640.rknn CLASSES [road, lane_line] if __name__ __main__: isExist os.path.exists(result_path) if not isExist: os.makedirs(result_path) rknn_lite RKNNLite(verboseFalse) ret rknn_lite.load_rknn(RKNN_MODEL) ret rknn_lite.init_runtime(core_maskRKNNLite.NPU_CORE_AUTO) if video_inference True: cap cv2.VideoCapture(video_path) while(cap.isOpened()): ret, image_3c cap.read() if not ret: break print(-- Running model for video inference) image_4c, image_3c preprocess(image_3c, input_height, input_width) #ret rknn_lite.init_runtime() start time.time() image_3C image_3c[np.newaxis,:] # print(111111,image_3C.shape) outputs rknn_lite.inference(inputs[image_3C]) stop time.time() fps round(1/(stop-start), 2) outputs[0]np.squeeze(outputs[0]) outputs[0] np.expand_dims(outputs[0], axis0) colorlist gen_color(len(CLASSES)) results postprocess(outputs, image_4c, image_3c, conf_thres, iou_thres, classeslen(CLASSES)) ##[box,mask,shape] results results[0] ## batch1 boxes, masks, shape results if type(masks) ! list and masks.ndim 3: mask_img, vis_img vis_result(image_3c, results, colorlist, CLASSES, result_path) #cv2.imshow(mask_img, mask_img) cv2.putText(vis_img, str(fps), (1,571),cv2.FONT_HERSHEY_SIMPLEX,1.0,(255,255,255)) cv2.imshow(vis_img, vis_img) else: print(-------------No segmentation result-------------) #img5 image_3c[185:455,:] #img2 np.zeros_like(img5) #cv2.imshow(1, image_3c) cv2.waitKey(1) else: image_3c cv2.imread(image_path) # (640,640,3) image_4c, image_3c preprocess(image_3c, input_height, input_width) print(-- Running model for image inference) #ret rknn_lite.init_runtime() start time.time() image_3C2 image_3c[np.newaxis,:] # (1, 640, 640, 3) outputs rknn_lite.inference(inputs[image_3C2]) # len(outputs)-2 stop time.time() fps round(1/(stop-start), 2) outputs[0]np.squeeze(outputs[0]) outputs[0] np.expand_dims(outputs[0], axis0) colorlist [ (255,255,255), (0,0,0) ] #colorlist gen_color(len(CLASSES)) results postprocess(outputs, image_4c, image_3c, conf_thres, iou_thres, classeslen(CLASSES)) ##[box,mask,shape] results results[0] ## batch1 boxes, masks, shape results if masks.ndim 2: masks np.expand_dims(masks, axis0).astype(np.float32) if type(masks) ! list and masks.ndim 3: mask_img, vis_img vis_result(image_3c, results, colorlist, CLASSES, result_path) print(-- Save inference result) else: print(-------------No segmentation result-------------) print(rknn_liteLite inference finish) rknn_lite.release() cv2.destroyAllWindows()utils.py:import cv2 import time import numpy as np def xywh2xyxy(x): y np.copy(x) y[..., 0] x[..., 0] - x[..., 2] / 2 # top left x y[..., 1] x[..., 1] - x[..., 3] / 2 # top left y y[..., 2] x[..., 0] x[..., 2] / 2 # bottom right x y[..., 3] x[..., 1] x[..., 3] / 2 # bottom right y return y def clip_boxes(boxes, shape): boxes[..., [0, 2]] boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2 boxes[..., [1, 3]] boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2 def scale_boxes(img1_shape, boxes, img0_shape, ratio_padNone): if ratio_pad is None: # calculate from img0_shape gain min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain old / new pad (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding else: gain ratio_pad[0][0] pad ratio_pad[1] boxes[..., [0, 2]] - pad[0] # x padding boxes[..., [1, 3]] - pad[1] # y padding boxes[..., :4] / gain clip_boxes(boxes, img0_shape) return boxes def crop_mask(masks, boxes): n, h, w masks.shape x1, y1, x2, y2 np.split(boxes[:, :, None], 4, axis1) r np.arange(w, dtypenp.float32)[None, None, :] # rows shape(1,w,1) c np.arange(h, dtypenp.float32)[None, :, None] # cols shape(h,1,1) return masks * ((r x1) * (r x2) * (c y1) * (c y2)) def sigmoid(x): return 1.0 / (1 np.exp(-x)) def process_mask(protos, masks_in, bboxes, shape): c, mh, mw protos.shape # CHW ih, iw shape masks sigmoid(masks_in protos.reshape(c, -1)).reshape(-1, mh, mw) # CHW 【lulu】 downsampled_bboxes bboxes.copy() downsampled_bboxes[:, 0] * mw / iw downsampled_bboxes[:, 2] * mw / iw downsampled_bboxes[:, 3] * mh / ih downsampled_bboxes[:, 1] * mh / ih masks crop_mask(masks, downsampled_bboxes) # CHW masks np.transpose(masks, [1, 2, 0]) # masks cv2.resize(masks, (shape[1], shape[0]), interpolationcv2.INTER_NEAREST) masks cv2.resize(masks, (shape[1], shape[0]), interpolationcv2.INTER_LINEAR) if masks.ndim 3: masks np.transpose(masks, [2, 0, 1]) return np.where(masks 0.5, masks, 0) def nms(bboxes, scores, threshold0.5): x1 bboxes[:, 0] y1 bboxes[:, 1] x2 bboxes[:, 2] y2 bboxes[:, 3] areas (x2 - x1) * (y2 - y1) order scores.argsort()[::-1] keep [] while order.size 0: i order[0] #print(i:,i) keep.append(i) if order.size 1: break xx1 np.maximum(x1[i], x1[order[1:]]) yy1 np.maximum(y1[i], y1[order[1:]]) xx2 np.minimum(x2[i], x2[order[1:]]) yy2 np.minimum(y2[i], y2[order[1:]]) w np.maximum(0.0, (xx2 - xx1)) h np.maximum(0.0, (yy2 - yy1)) inter w * h iou inter / (areas[i] areas[order[1:]] - inter) ids np.where(iou threshold)[0] order order[ids 1] return keep def non_max_suppression( prediction, conf_thres0.25, iou_thres0.45, classesNone, agnosticFalse, multi_labelFalse, labels(), max_det300, nc0, # number of classes (optional) ): # Checks assert 0 conf_thres 1, fInvalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0 assert 0 iou_thres 1, fInvalid IoU {iou_thres}, valid values are between 0.0 and 1.0 # 【lulu】prediction.shape[1]box cls num_masks bs prediction.shape[0] # batch size nc nc or (prediction.shape[1] - 4) # number of classes nm prediction.shape[1] - nc - 4 # num_masks mi 4 nc # mask start index xc np.max(prediction[:, 4:mi], axis1) conf_thres ## 【lulu】 # Settings # min_wh 2 # (pixels) minimum box width and height max_wh 7680 # (pixels) maximum box width and height max_nms 30000 # maximum number of boxes into torchvision.ops.nms() time_limit 0.5 0.05 * bs # seconds to quit after redundant True # require redundant detections multi_label nc 1 # multiple labels per box (adds 0.5ms/img) merge False # use merge-NMS t time.time() output [np.zeros((0, 6 nm))] * bs ## 【lulu】 for xi, x in enumerate(prediction): # image_3c index, image_3c inference # Apply constraints # x[((x[:, 2:4] min_wh) | (x[:, 2:4] max_wh)).any(1), 4] 0 # width-height x np.transpose(x, [1, 0])[xc[xi]] ## 【lulu】#x.shape[0]-----10 # If none remain process next image_3c if not x.shape[0]: continue # Detections matrix nx6 (xyxy, conf, cls) box, cls, mask np.split(x, [4, 4 nc], axis1) ## 【lulu】 box xywh2xyxy(box) # center_x, center_y, width, height) to (x1, y1, x2, y2) j np.argmax(cls, axis1) ## 【lulu】 conf cls[np.array(range(j.shape[0])), j].reshape(-1, 1) x np.concatenate([box, conf, j.reshape(-1, 1), mask], axis1)[conf.reshape(-1, ) conf_thres] # Check shape n x.shape[0] # number of boxes if not n: continue x x[np.argsort(x[:, 4])[::-1][:max_nms]] # sort by confidence and remove excess boxes 【lulu】 # Batched NMS c x[:, 5:6] * max_wh # classes boxes, scores x[:, :4] c, x[:, 4] # boxes (offset by class), scores i nms(boxes, scores, iou_thres) ## 【lulu】 i i[:max_det] # limit detections output[xi] x[i] if (time.time() - t) time_limit: # LOGGER.warning(fWARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded) break # time limit exceeded return output def make_anchors(feats_shape, strides, grid_cell_offset0.5): Generate anchors from features. anchor_points, stride_tensor [], [] assert feats_shape is not None dtype_ np.float for i, stride in enumerate(strides): _, _, h, w feats_shape[i] sx np.arange(w, dtypedtype_) grid_cell_offset # shift x sy np.arange(h, dtypedtype_) grid_cell_offset # shift y sy, sx np.meshgrid(sy, sx, indexingij) anchor_points.append(np.stack((sx, sy), -1).reshape(-1, 2)) stride_tensor.append(np.full((h * w, 1), stride, dtypedtype_)) return np.concatenate(anchor_points), np.concatenate(stride_tensor) def dist2bbox(distance, anchor_points, xywhTrue, dim-1): Transform distance(ltrb) to box(xywh or xyxy). lt, rb np.split(distance, 2, dim) x1y1 anchor_points - lt x2y2 anchor_points rb if xywh: c_xy (x1y1 x2y2) / 2 wh x2y2 - x1y1 return np.concatenate((c_xy, wh), dim) # xywh bbox return np.concatenate((x1y1, x2y2), dim) # xyxy bbox def letterbox(im, new_shape(640, 640), color(114, 114, 114), autoTrue, scaleFillFalse, scaleupTrue, stride32): # Resize and pad image while meeting stride-multiple constraints shape im.shape[:2] # current shape [height, width] if isinstance(new_shape, int): new_shape (new_shape, new_shape) # Scale ratio (new / old) r min(new_shape[0] / shape[0], new_shape[1] / shape[1]) if not scaleup: # only scale down, do not scale up (for better val mAP) r min(r, 1.0) # Compute padding ratio r, r # width, height ratios new_unpad int(round(shape[1] * r)), int(round(shape[0] * r)) dw, dh new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding if auto: # minimum rectangle dw, dh np.mod(dw, stride), np.mod(dh, stride) # wh padding elif scaleFill: # stretch dw, dh 0.0, 0.0 new_unpad (new_shape[1], new_shape[0]) ratio new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios dw / 2 # divide padding into 2 sides dh / 2 if shape[::-1] ! new_unpad: # resize im cv2.resize(im, new_unpad, interpolationcv2.INTER_LINEAR) top, bottom int(round(dh - 0.1)), int(round(dh 0.1)) left, right int(round(dw - 0.1)), int(round(dw 0.1)) im cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, valuecolor) # add border return im, ratio, (dw, dh) def preprocess(image, input_height, input_width): image_3c image # Convert the image_3c color space from BGR to RGB image_3c cv2.cvtColor(image_3c, cv2.COLOR_BGR2RGB) # Resize the image_3c to match the input shape image_3c, ratio, dwdh letterbox(image_3c, new_shape[input_height, input_width], autoFalse) # Normalize the image_3c data by dividing it by 255.0 image_4c np.array(image_3c) / 255.0 # Transpose the image_3c to have the channel dimension as the first dimension image_4c np.transpose(image_4c, (2, 0, 1)) # Channel first # Expand the dimensions of the image_3c data to match the expected input shape image_4c np.expand_dims(image_4c, axis0).astype(np.float32) image_4c np.ascontiguousarray(image_4c) # contiguous # Return the preprocessed image_3c data return image_4c, image_3c def postprocess(preds, img, orig_img, OBJ_THRESH, NMS_THRESH, classesNone): p non_max_suppression(preds[0], OBJ_THRESH, NMS_THRESH, agnosticFalse, max_det300, ncclasses, classesNone) results [] proto preds[1] for i, pred in enumerate(p): shape orig_img.shape if not len(pred): results.append([[], [], []]) # save empty boxes continue masks process_mask(proto[i], pred[:, 6:], pred[:, :4], img.shape[2:]) # HWC pred[:, :4] scale_boxes(img.shape[2:], pred[:, :4], shape).round() results.append([pred[:, :6], masks, shape[:2]]) return results def gen_color(class_num): color_list [] np.random.seed(1) while 1: a list(map(int, np.random.choice(range(255), 3))) if (np.sum(a) 0): continue color_list.append(a) if len(color_list) class_num: break return color_list def vis_result(image_3c, results, colorlist, CLASSES, result_path): boxes, masks, shape results #if masks.ndim 2: # masks np.expand_dims(masks, axis0).astype(np.float32) # Convert the image_3c color space from BGR to RGB image_3c cv2.cvtColor(image_3c, cv2.COLOR_RGB2BGR) vis_img image_3c.copy() mask_img np.zeros_like(image_3c) cls_list [] center_list [] #print(boxes.shape,masks.shape,len(boxes),masks.shape) for box, mask in zip(boxes, masks): cls int(box[-1]) cls_list.append(cls) #print(box.shape,mask.shape,cls.shape,box.shape,mask.shape,cls) dummy_img np.zeros_like(image_3c) dummy_img[mask ! 0] colorlist[int(box[-1])] #(640, 640, 3) mask_img[mask ! 0] colorlist[int(box[-1])] #(640, 640, 3) centroid np.mean(np.argwhere(dummy_img), axis0) #(x,y) if np.isnan(centroid).all() False: centroid_x, centroid_y int(centroid[1]), int(centroid[0]) center_list.append([centroid_x, centroid_y]) #print(center_list,center_list) vis_img cv2.addWeighted(vis_img, 0.5, mask_img, 0.5, 0) #print(vis_img,vis_img.shape) for i, box in enumerate(boxes): cls int(box[-1]) cv2.rectangle(vis_img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 0, 255), 3, 4) cv2.putText(vis_img, f{CLASSES[cls]}:{round(box[4], 2)}, (int(box[0]), int(box[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) for j in range(len(center_list)): cv2.circle(vis_img, (center_list[j][0], center_list[j][1]), radius5, color(0, 0, 255), thickness-1) vis_img np.concatenate([image_3c, mask_img, vis_img], axis1) for i in range(len(CLASSES)): num cls_list.count(i) if num ! 0: print(fFound {num} {CLASSES[i]}) cv2.imwrite(f./{result_path}/origin_image.jpg, image_3c) cv2.imwrite(f./{result_path}/mask_image.jpg, mask_img) cv2.imwrite(f./{result_path}/visual_image.jpg, vis_img) return mask_img, vis_img

相关文章:

rknn部署rk3588进行yolov8n-seg分割检测

1、pt->onnx:首先根据官方源代码导出onnx模型(过程略) 2、onnx->rknn:分为如下两步执行 其中环境要求安装rknn。 step1:import os, glob, shutil from rknn.api import RKNNinput_width 640 input_height 640 model_path "./mo…...

从零开始:Qwen3-0.6B-FP8在Windows系统的本地部署指南

从零开始:Qwen3-0.6B-FP8在Windows系统的本地部署指南 想在自己的Windows电脑上跑一个属于自己的大语言模型吗?听起来可能有点复杂,但跟着这篇指南走,你会发现其实没那么难。今天我们就来聊聊,如何在你的Windows 11系…...

平台收到TRO后,为何总是先冻结再通知?

SellerAegis卖家守护视角下跨境电商法律逻辑解析在跨境电商运营中,TRO(Temporary Restraining Order,临时限制令)成为卖家最敏感的法律风险之一。很多卖家在账户被冻结、商品下架或资金受限后,才收到平台通知&#xff…...

3步终结3D打印材料参数调试难题:OrcaSlicer全材料工艺优化指南

3步终结3D打印材料参数调试难题:OrcaSlicer全材料工艺优化指南 【免费下载链接】OrcaSlicer G-code generator for 3D printers (Bambu, Prusa, Voron, VzBot, RatRig, Creality, etc.) 项目地址: https://gitcode.com/GitHub_Trending/orc/OrcaSlicer 3D打印…...

构建自动化Kubernetes集群健康检查的终极工作流:Popeye与CI/CD的完美集成指南

构建自动化Kubernetes集群健康检查的终极工作流:Popeye与CI/CD的完美集成指南 【免费下载链接】popeye 👀 A Kubernetes cluster resource sanitizer 项目地址: https://gitcode.com/gh_mirrors/po/popeye Popeye是一款强大的Kubernetes集群资源清…...

超越regress:用MATLAB轻松搞定带二次项和交互项的多元回归模型

超越regress:用MATLAB轻松搞定带二次项和交互项的多元回归模型 在数据分析的实际应用中,我们常常会遇到变量间关系并非简单线性的情况。比如在市场营销分析中,广告投入对销售额的影响可能呈现边际效应递减;在工程优化中&#xff0…...

Llama-3.2V-11B-cot惊艳案例:从历史照片推理服饰/建筑年代一致性

Llama-3.2V-11B-cot惊艳案例:从历史照片推理服饰/建筑年代一致性 1. 项目简介 Llama-3.2V-11B-cot是一款基于Meta Llama-3.2V-11B-cot多模态大模型开发的高性能视觉推理工具。它针对双卡4090环境进行了深度优化,特别适合需要进行复杂视觉推理的场景。工…...

Llama-3.2V-11B-cot镜像免配置教程:改路径即启,5分钟完成部署

Llama-3.2V-11B-cot镜像免配置教程:改路径即启,5分钟完成部署 1. 项目简介 Llama-3.2V-11B-cot是一款基于Meta Llama-3.2V-11B-cot多模态大模型开发的高性能视觉推理工具。它针对双卡4090环境进行了深度优化,特别适合想要快速体验多模态大模…...

xmake项目迁移终极指南:从Makefile、CMake平滑过渡到现代化构建工具

xmake项目迁移终极指南:从Makefile、CMake平滑过渡到现代化构建工具 你是否厌倦了复杂的Makefile语法和冗长的CMake配置?想要一个更简单、更高效的构建工具?xmake正是你需要的现代化构建工具!xmake是一个基于Lua的跨平台构建工具&…...

从零理解AUTOSAR BswM:用DaVinci Configurator配置ECU基础软件管理器的完整流程

深入掌握AUTOSAR BswM:DaVinci Configurator实战配置指南 在汽车电子控制单元(ECU)开发领域,AUTOSAR架构已成为行业标准。作为基础软件管理核心模块,BswM(Basic Software Manager)承担着协调各模…...

3种方案解决TranslucentTB启动失败问题:从诊断到预防的完整指南

3种方案解决TranslucentTB启动失败问题:从诊断到预防的完整指南 【免费下载链接】TranslucentTB 项目地址: https://gitcode.com/gh_mirrors/tra/TranslucentTB TranslucentTB是一款备受欢迎的任务栏美化工具,能够实现Windows任务栏的透明效果&a…...

Phi-3-vision-128k-instruct 数据库课程设计助手:ER 图生成与 SQL 优化

Phi-3-vision-128k-instruct 数据库课程设计助手:ER 图生成与 SQL 优化 1. 数据库课程设计的痛点与挑战 每到学期中段,计算机专业的学生们总会面临一个共同的难题——数据库课程设计。这个看似简单的任务,往往让许多同学熬夜到凌晨。从需求…...

【差分隐私核心参数权威指南】:ε、δ、敏感度如何精准配置?20年实战经验总结的5大避坑法则

第一章:差分隐私核心参数的数学本质与哲学内涵差分隐私并非一种具体算法,而是一套形式化约束框架,其力量源于对“隐私损失”这一抽象概念的可量化建模。其中,ε(epsilon)与δ(delta)…...

Openclaw龙虾全维度安全实战指南

扫描下载文档详情页: https://www.didaidea.com/wenku/16651.html...

VibeVoice Pro开源可部署价值:替代商业TTS降低企业AI语音成本70%

VibeVoice Pro开源可部署价值:替代商业TTS降低企业AI语音成本70% 1. 引言:企业语音成本之痛与开源破局 如果你正在为企业寻找AI语音解决方案,大概率会遇到一个两难选择:要么忍受高昂的商业TTS(文本转语音&#xff09…...

还在为找不到官方macOS安装文件而烦恼?这个开源工具3分钟帮你搞定!

还在为找不到官方macOS安装文件而烦恼?这个开源工具3分钟帮你搞定! 【免费下载链接】gibMacOS Py2/py3 script that can download macOS components direct from Apple 项目地址: https://gitcode.com/gh_mirrors/gi/gibMacOS 你是否曾经遇到过这…...

SystemVerilog数组+有符号数+log2+流操作+邮箱+assert+interface+class+time

文章目录logic类型双状态类型合并(压缩、打包、packed)数组 bit [3:0][7:0] Arr;非合并(非压缩、非打包、unpacked)数组 bit Arr [3:0][7:0] ;数组的维度和引用关系定宽数组常数数组动态数组队列队列拼接&a…...

Faktory生产环境监控终极指南:指标收集、告警设置和故障排查

Faktory生产环境监控终极指南:指标收集、告警设置和故障排查 【免费下载链接】faktory Language-agnostic persistent background job server 项目地址: https://gitcode.com/gh_mirrors/fa/faktory Faktory作为一款语言无关的持久化后台任务服务器&#xff…...

target(塔吉特)采购技术体系:硬件、IP、账号下单闭环管理

塔吉特(Target)采购下单技术是一种通过模拟真实用户行为、构建独立运营环境并规避平台风控检测的技术手段,旨在提高采购下单的成功率,尤其适用于跨境电商卖家。以下是该技术的核心要点和实施策略:一、技术核心要点1.硬…...

postgres_exporter部署最佳实践:Docker、Kubernetes和系统服务完整教程

postgres_exporter部署最佳实践:Docker、Kubernetes和系统服务完整教程 【免费下载链接】postgres_exporter 项目地址: https://gitcode.com/gh_mirrors/pos/postgres_exporter postgres_exporter是一款功能强大的开源工具,能够帮助用户轻松监控…...

ERPNext在Ubuntu 22.04上的保姆级安装指南:从零配置到邮件服务设置

ERPNext在Ubuntu 22.04上的深度部署实战:从系统调优到高可用配置 对于技术团队而言,企业级开源ERP系统的自主部署不仅是成本控制的手段,更是掌握核心技术栈的重要途径。作为Frappe框架的旗舰产品,ERPNext在制造业、零售业和项目管…...

告别过曝欠曝!用NestFuse深度学习模型搞定极端曝光图像融合(附PyTorch代码)

深度学习实战:用NestFuse模型实现极端曝光图像完美融合 逆光拍摄时,要么天空惨白一片,要么地面漆黑一团——这是摄影爱好者和计算机视觉工程师经常遇到的难题。传统HDR技术需要多张不同曝光度的照片,而现实中我们往往只有过曝和欠…...

Flask-Admin权限管理终极指南:如何实现精细化用户角色和访问控制

Flask-Admin权限管理终极指南:如何实现精细化用户角色和访问控制 【免费下载链接】flask-admin Simple and extensible administrative interface framework for Flask 项目地址: https://gitcode.com/gh_mirrors/fla/flask-admin Flask-Admin权限管理是构建…...

Verge:轻量级前端视口与DOM操作工具库全解析

Verge:轻量级前端视口与DOM操作工具库全解析 【免费下载链接】verge get viewport dimensions...detect elements in the viewport...trust in 项目地址: https://gitcode.com/gh_mirrors/ver/verge 项目定位:现代前端开发的轻量解决方案 在前端…...

终极指南:Neumorphism.io代码架构解析与React实战

终极指南:Neumorphism.io代码架构解析与React实战 【免费下载链接】neumorphism 🎉 Generate CSS for your Neumorphism/Soft UI design 项目地址: https://gitcode.com/gh_mirrors/ne/neumorphism Neumorphism.io是一个基于React.js构建的现代化…...

如何构建可扩展的WordPress应用:AWS架构演进终极指南

如何构建可扩展的WordPress应用:AWS架构演进终极指南 【免费下载链接】learn-cantrill-io-labs Standard and Advanced Demos for learn.cantrill.io courses 项目地址: https://gitcode.com/gh_mirrors/le/learn-cantrill-io-labs 在当今数字化时代&#xf…...

Spark Standalone集群搭建避坑指南:从环境变量配置到Web UI访问全流程

Spark Standalone集群搭建实战:从零到高可用的避坑手册 当你第一次尝试搭建Spark Standalone集群时,是否遇到过环境变量不生效、节点无法通信或是Web UI打不开的困扰?作为大数据处理领域的瑞士军刀,Spark的Standalone模式虽然被官…...

Vue 3项目实战:i18n国际化从单文件到多文件管理的完整升级指南

Vue 3项目国际化架构升级:从单文件到模块化管理的工程化实践 当Vue 3项目发展到一定规模后,国际化方案往往会面临新的挑战。初期简单的单文件语言包结构逐渐暴露出维护困难、协作效率低等问题。本文将分享如何将现有单文件国际化方案升级为模块化管理体系…...

告别臃肿:优化jpackage打包的Java应用体积,从100M+瘦身到几十兆的配置技巧

深度优化jpackage打包体积:从百兆到几十兆的实战指南 Java开发者常面临一个尴尬的现实——用jpackage打包的应用程序体积动辄超过100MB,尤其是包含JavaFX的GUI应用。这种"臃肿"不仅影响分发效率,还会拖慢启动速度。本文将揭示jpack…...

大模型落地药企难题?真实项目复盘,这5点才是AI赋能研发的破局关键!

引言 在大模型技术全面渗透产业的今天,医药研发领域正迎来一场深刻的数字化变革。临床试验文档作为药品研发全流程中专业性最强、合规要求最高、工作量最密集的环节之一,成为AI落地的重要场景。越来越多的创新药企、CRO机构开始引入大模型能力&#xff0…...