增加等IO状态的唤醒堆栈打印及缺页异常导致iowait分析
一、背景
在之前的博客 在计算进程D状态持续时间及等IO的时间遇到的一处问题-CSDN博客 里,我们修复了一处在抓取D状态及等IO状态堆栈的监控程序的一处时间计算bug,在这篇博客里,我们进一步丰富监控程序,在进程iodelay被唤醒时,增加是谁最终唤醒的处于in_iowait状态下的进程,这样可以有助于判断是什么原因进入的in_iowait状态。有关iodelay及iowait的概念见之前的博客 获取进程或线程级别的iodelay的方法-CSDN博客 和 cpu的iowait指标解释及示例-CSDN博客 。在第二章里,我们给出修改后的源码,并做一些解释。然后在第三章里,我们利用修改后的程序,分析一处缺页异常导致iowait的例子,做原理分析。
另外,需要强调的是,并不是所有等io的进程的状态都是D的,即等io的进程状态除了可能是TASK_UNINTERRUPTIBLE,也可能是TASK_KILLABLE状态的,这一点,我们会在 2.3 一节和 3.3 一节里进行说明。
二、增加等IO状态的唤醒堆栈打印
2.1 修改后的完整源码
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/ctype.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/errno.h>
#include <linux/stddef.h>
#include <linux/lockdep.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/init.h>
#include <asm/atomic.h>
#include <trace/events/workqueue.h>
#include <linux/sched/clock.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/tracepoint.h>
#include <trace/events/osmonitor.h>
#include <trace/events/sched.h>
#include <trace/events/irq.h>
#include <trace/events/kmem.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <asm/processor.h>
#include <linux/sched/task_stack.h>
#include <linux/nmi.h>
#include <asm/apic.h>
#include <linux/version.h>
#include <linux/sched/mm.h>
#include <asm/irq_regs.h>
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/stop_machine.h>MODULE_LICENSE("GPL");
MODULE_AUTHOR("zhaoxin");
MODULE_DESCRIPTION("Module for monitor D tasks.");
MODULE_VERSION("1.0");#define IODELAY_TRACEPOINT_ENABLE#define TEST_STACK_TRACE_ENTRIES 32typedef unsigned int (*stack_trace_save_tsk_func)(struct task_struct *task,unsigned long *store, unsigned int size,unsigned int skipnr);
stack_trace_save_tsk_func _stack_trace_save_tsk;typedef int (*get_cmdline_func)(struct task_struct *task, char *buffer, int buflen);
get_cmdline_func _get_cmdline_func;#define TESTDIOMONITOR_SAMPLEDESC_SWDSTART "swDstart"
#define TESTDIOMONITOR_SAMPLEDESC_WADSTOP "waDstop"
#define TESTDIOMONITOR_SAMPLEDESC_SWDIOSTART "swDiostart"
#define TESTDIOMONITOR_SAMPLEDESC_WADIOSTOP "waDiostop"
#define TESTDIOMONITOR_SAMPLEDESC_DEXCEED "Dexceed"
#define TESTDIOMONITOR_SAMPLEDESC_DIOEXCEED "Dioexceed"
#define TESTDIOMONITOR_SAMPLEDESC_IOEXCEED "Ioexceed"#define TESTDIOMONITOR_SIMPLE#ifdef TESTDIOMONITOR_SIMPLE
#define TESTDIOMONITOR_SIMPLE_THRESHOLDNS 0ull//5000000ull
#endif// 1ms
//#define TESTDIOMONITOR_DEXCEED_THRESHOLD 1000ull//1000000ullstruct uclamp_bucket {unsigned long value : bits_per(SCHED_CAPACITY_SCALE);unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);
};struct uclamp_rq {unsigned int value;struct uclamp_bucket bucket[UCLAMP_BUCKETS];
};/* CFS-related fields in a runqueue */
struct cfs_rq {struct load_weight load;unsigned int nr_running;unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */unsigned int idle_nr_running; /* SCHED_IDLE */unsigned int idle_h_nr_running; /* SCHED_IDLE */u64 exec_clock;u64 min_vruntime;
#ifdef CONFIG_SCHED_COREunsigned int forceidle_seq;u64 min_vruntime_fi;
#endif#ifndef CONFIG_64BITu64 min_vruntime_copy;
#endifstruct rb_root_cached tasks_timeline;/** 'curr' points to currently running entity on this cfs_rq.* It is set to NULL otherwise (i.e when none are currently running).*/struct sched_entity *curr;struct sched_entity *next;struct sched_entity *last;struct sched_entity *skip;#ifdef CONFIG_SCHED_DEBUGunsigned int nr_spread_over;
#endif#ifdef CONFIG_SMP/** CFS load tracking*/struct sched_avg avg;
#ifndef CONFIG_64BITu64 last_update_time_copy;
#endifstruct {raw_spinlock_t lock ____cacheline_aligned;int nr;unsigned long load_avg;unsigned long util_avg;unsigned long runnable_avg;} removed;#ifdef CONFIG_FAIR_GROUP_SCHEDunsigned long tg_load_avg_contrib;long propagate;long prop_runnable_sum;/** h_load = weight * f(tg)** Where f(tg) is the recursive weight fraction assigned to* this group.*/unsigned long h_load;u64 last_h_load_update;struct sched_entity *h_load_next;
#endif /* CONFIG_FAIR_GROUP_SCHED */
#endif /* CONFIG_SMP */#ifdef CONFIG_FAIR_GROUP_SCHEDstruct rq *rq; /* CPU runqueue to which this cfs_rq is attached *//** leaf cfs_rqs are those that hold tasks (lowest schedulable entity in* a hierarchy). Non-leaf lrqs hold other higher schedulable entities* (like users, containers etc.)** leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU.* This list is used during load balance.*/int on_list;struct list_head leaf_cfs_rq_list;struct task_group *tg; /* group that "owns" this runqueue *//* Locally cached copy of our task_group's idle value */int idle;#ifdef CONFIG_CFS_BANDWIDTHint runtime_enabled;s64 runtime_remaining;u64 throttled_pelt_idle;
#ifndef CONFIG_64BITu64 throttled_pelt_idle_copy;
#endifu64 throttled_clock;u64 throttled_clock_pelt;u64 throttled_clock_pelt_time;int throttled;int throttle_count;struct list_head throttled_list;
#ifdef CONFIG_SMPstruct list_head throttled_csd_list;
#endif
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
};struct rt_prio_array {DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */struct list_head queue[MAX_RT_PRIO];
};/* Real-Time classes' related field in a runqueue: */
struct rt_rq {struct rt_prio_array active;unsigned int rt_nr_running;unsigned int rr_nr_running;
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHEDstruct {int curr; /* highest queued rt task prio */
#ifdef CONFIG_SMPint next; /* next highest */
#endif} highest_prio;
#endif
#ifdef CONFIG_SMPunsigned int rt_nr_migratory;unsigned int rt_nr_total;int overloaded;struct plist_head pushable_tasks;#endif /* CONFIG_SMP */int rt_queued;int rt_throttled;u64 rt_time;u64 rt_runtime;/* Nests inside the rq lock: */raw_spinlock_t rt_runtime_lock;#ifdef CONFIG_RT_GROUP_SCHEDunsigned int rt_nr_boosted;struct rq *rq;struct task_group *tg;
#endif
};/* Deadline class' related fields in a runqueue */
struct dl_rq {/* runqueue is an rbtree, ordered by deadline */struct rb_root_cached root;unsigned int dl_nr_running;#ifdef CONFIG_SMP/** Deadline values of the currently executing and the* earliest ready task on this rq. Caching these facilitates* the decision whether or not a ready but not running task* should migrate somewhere else.*/struct {u64 curr;u64 next;} earliest_dl;unsigned int dl_nr_migratory;int overloaded;/** Tasks on this rq that can be pushed away. They are kept in* an rb-tree, ordered by tasks' deadlines, with caching* of the leftmost (earliest deadline) element.*/struct rb_root_cached pushable_dl_tasks_root;
#elsestruct dl_bw dl_bw;
#endif/** "Active utilization" for this runqueue: increased when a* task wakes up (becomes TASK_RUNNING) and decreased when a* task blocks*/u64 running_bw;/** Utilization of the tasks "assigned" to this runqueue (including* the tasks that are in runqueue and the tasks that executed on this* CPU and blocked). Increased when a task moves to this runqueue, and* decreased when the task moves away (migrates, changes scheduling* policy, or terminates).* This is needed to compute the "inactive utilization" for the* runqueue (inactive utilization = this_bw - running_bw).*/u64 this_bw;u64 extra_bw;/** Maximum available bandwidth for reclaiming by SCHED_FLAG_RECLAIM* tasks of this rq. Used in calculation of reclaimable bandwidth(GRUB).*/u64 max_bw;/** Inverse of the fraction of CPU utilization that can be reclaimed* by the GRUB algorithm.*/u64 bw_ratio;
};struct rq {/* runqueue lock: */raw_spinlock_t __lock;/** nr_running and cpu_load should be in the same cacheline because* remote CPUs use both these fields when doing load calculation.*/unsigned int nr_running;
#ifdef CONFIG_NUMA_BALANCINGunsigned int nr_numa_running;unsigned int nr_preferred_running;unsigned int numa_migrate_on;
#endif
#ifdef CONFIG_NO_HZ_COMMON
#ifdef CONFIG_SMPunsigned long last_blocked_load_update_tick;unsigned int has_blocked_load;call_single_data_t nohz_csd;
#endif /* CONFIG_SMP */unsigned int nohz_tick_stopped;atomic_t nohz_flags;
#endif /* CONFIG_NO_HZ_COMMON */#ifdef CONFIG_SMPunsigned int ttwu_pending;
#endifu64 nr_switches;#ifdef CONFIG_UCLAMP_TASK/* Utilization clamp values based on CPU's RUNNABLE tasks */struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned;unsigned int uclamp_flags;
#define UCLAMP_FLAG_IDLE 0x01
#endifstruct cfs_rq cfs;struct rt_rq rt;struct dl_rq dl;#ifdef CONFIG_FAIR_GROUP_SCHED/* list of leaf cfs_rq on this CPU: */struct list_head leaf_cfs_rq_list;struct list_head *tmp_alone_branch;
#endif /* CONFIG_FAIR_GROUP_SCHED *//** This is part of a global counter where only the total sum* over all CPUs matters. A task can increase this counter on* one CPU and if it got migrated afterwards it may decrease* it on another CPU. Always updated under the runqueue lock:*/unsigned int nr_uninterruptible;struct task_struct __rcu *curr;struct task_struct *idle;struct task_struct *stop;unsigned long next_balance;struct mm_struct *prev_mm;unsigned int clock_update_flags;u64 clock;/* Ensure that all clocks are in the same cache line */u64 clock_task ____cacheline_aligned;u64 clock_pelt;unsigned long lost_idle_time;atomic_t nr_iowait;#ifdef CONFIG_SCHED_DEBUGu64 last_seen_need_resched_ns;int ticks_without_resched;
#endif#ifdef CONFIG_MEMBARRIERint membarrier_state;
#endif#ifdef CONFIG_SMPstruct root_domain *rd;struct sched_domain __rcu *sd;unsigned long cpu_capacity;unsigned long cpu_capacity_orig;struct callback_head *balance_callback;unsigned char nohz_idle_balance;unsigned char idle_balance;unsigned long misfit_task_load;/* For active balancing */int active_balance;int push_cpu;struct cpu_stop_work active_balance_work;/* CPU of this runqueue: */int cpu;int online;struct list_head cfs_tasks;struct sched_avg avg_rt;struct sched_avg avg_dl;
#ifdef CONFIG_HAVE_SCHED_AVG_IRQstruct sched_avg avg_irq;
#endif
#ifdef CONFIG_SCHED_THERMAL_PRESSUREstruct sched_avg avg_thermal;
#endifu64 idle_stamp;u64 avg_idle;unsigned long wake_stamp;u64 wake_avg_idle;/* This is used to determine avg_idle's max value */u64 max_idle_balance_cost;#ifdef CONFIG_HOTPLUG_CPUstruct rcuwait hotplug_wait;
#endif
#endif /* CONFIG_SMP */#ifdef CONFIG_IRQ_TIME_ACCOUNTINGu64 prev_irq_time;
#endif
#ifdef CONFIG_PARAVIRTu64 prev_steal_time;
#endif
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTINGu64 prev_steal_time_rq;
#endif/* calc_load related fields */unsigned long calc_load_update;long calc_load_active;#ifdef CONFIG_SCHED_HRTICK
#ifdef CONFIG_SMPcall_single_data_t hrtick_csd;
#endifstruct hrtimer hrtick_timer;ktime_t hrtick_time;
#endif#ifdef CONFIG_SCHEDSTATS/* latency stats */struct sched_info rq_sched_info;unsigned long long rq_cpu_time;/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? *//* sys_sched_yield() stats */unsigned int yld_count;/* schedule() stats */unsigned int sched_count;unsigned int sched_goidle;/* try_to_wake_up() stats */unsigned int ttwu_count;unsigned int ttwu_local;
#endif#ifdef CONFIG_CPU_IDLE/* Must be inspected within a rcu lock section */struct cpuidle_state *idle_state;
#endif#ifdef CONFIG_SMPunsigned int nr_pinned;
#endifunsigned int push_busy;struct cpu_stop_work push_work;#ifdef CONFIG_SCHED_CORE/* per rq */struct rq *core;struct task_struct *core_pick;unsigned int core_enabled;unsigned int core_sched_seq;struct rb_root core_tree;/* shared state -- careful with sched_core_cpu_deactivate() */unsigned int core_task_seq;unsigned int core_pick_seq;unsigned long core_cookie;unsigned int core_forceidle_count;unsigned int core_forceidle_seq;unsigned int core_forceidle_occupation;u64 core_forceidle_start;
#endif
};// runqueues (not export symbol)
struct rq* _prq = NULL;struct rq* my_cpu_rq(int i_cpu)
{return per_cpu_ptr(_prq, i_cpu);
}u64 my_rq_clock_task(void)
{struct rq* prq = my_cpu_rq(smp_processor_id());return prq->clock_task;
}#define TESTDIOMONITOR_FILE_MAXLEN 1024typedef struct testdiomonitor_sample {struct timespec64 time;int cpu;int pid;int tgid;int ppid;char comm[TASK_COMM_LEN];char ppidcomm[TASK_COMM_LEN];// 0 or 1int bin_iowait;/** "swDstart" // 在sched_switch里* "waDstop" // 在sched_waking里* "swDiostart" // 在sched_switch里* "waDiostop" // 在sched_waking里* "Dexceed" // 超出阈值,非iowait* "Dioexceed" // 超出阈值,iowait*/const char* desc;u64 dtimens; // 纳秒单位,D状态持续的时间u64 iowaittimens; // 纳秒单位,等待io的时间int stackn;void* parray_stack[TEST_STACK_TRACE_ENTRIES];int wakercpu;int wakerpid;int wakertgid;int wakerppid;char wakercomm[TASK_COMM_LEN];char wakerppidcomm[TASK_COMM_LEN];int wakerstackn;void* parray_wakerstack[TEST_STACK_TRACE_ENTRIES];u32 writedone; // 0 or 1
} testdiomonitor_sample;#define TESTDIOMONITOR_SAMPLE_RINGBUFF_MAXCOUNT 8192typedef struct testdiomonitor_sample_ringbuff {testdiomonitor_sample* parray_sample;volatile u64 wp; // Index is wp & (TESTDIOMONITOR_SAMPLE_RINGBUFF_MAXCOUNT - 1).volatile u64 rp; // Index is rp & (TESTDIOMONITOR_SAMPLE_RINGBUFF_MAXCOUNT - 1).u32 skipcount; // 0 means no skip any abnormal event
} testdiomonitor_sample_ringbuff;#define TESTDIOMONITOR_LINEBUFF 1024typedef struct testdiomonitor_env {struct file* file;char file_linebuff[TESTDIOMONITOR_LINEBUFF];int headoffset;loff_t file_pos;testdiomonitor_sample_ringbuff ringbuff;
} testdiomonitor_env;static testdiomonitor_env _env;static struct delayed_work work_write_file;
static struct workqueue_struct *wq_write_file;#define FILENAME "test_new.txt"void init_file(void)
{_env.file = filp_open(FILENAME, O_WRONLY | O_CREAT | O_TRUNC, 0644);if (IS_ERR(_env.file)) {_env.file = NULL;}
}void exit_file(void)
{if (_env.file) {filp_close(_env.file, NULL);}
}void testdiomonitor_write_file(char* i_pchar, int i_size)
{if (_env.file) {kernel_write(_env.file, i_pchar, i_size, &_env.file_pos);}
}void testdiomonitor_write_file_emptyline(void)
{testdiomonitor_write_file("\n", strlen("\n"));
}void testdiomonitor_file_oneline(const char* i_format, ...)
{char* pcontent = &_env.file_linebuff[_env.headoffset];va_list args;va_start(args, i_format);vsnprintf(pcontent, TESTDIOMONITOR_LINEBUFF - _env.headoffset, i_format, args);va_end(args);testdiomonitor_write_file(_env.file_linebuff, strlen(_env.file_linebuff));
}void testdiomonitor_replace_null_with_space(char *str, int n) {for (int i = 0; i < n - 1; i++) {if (str[i] == '\0') {str[i] = ' ';}}
}void testdiomonitor_set_cmdline(char* i_pbuff, int i_buffsize, struct task_struct* i_ptask)
{int ret = _get_cmdline_func(i_ptask, i_pbuff, i_buffsize);if (ret <= 0) {i_pbuff[0] = '\0';return;}testdiomonitor_replace_null_with_space(i_pbuff, ret);i_pbuff[ret - 1] = '\0';
}void testdiomonitor_checkget_parentinfo_and_cmdline(testdiomonitor_sample* io_psample, struct task_struct* i_ptask)
{struct task_struct* parent;rcu_read_lock();parent = rcu_dereference(i_ptask->real_parent);io_psample->ppid = parent->pid;strlcpy(io_psample->ppidcomm, parent->comm, TASK_COMM_LEN);rcu_read_unlock();
}void testdiomonitor_checkget_parentinfo_and_cmdline_waker(testdiomonitor_sample* io_psample, struct task_struct* i_ptask)
{struct task_struct* parent;rcu_read_lock();parent = rcu_dereference(i_ptask->real_parent);io_psample->wakerppid = parent->pid;strlcpy(io_psample->wakerppidcomm, parent->comm, TASK_COMM_LEN);rcu_read_unlock();
}#define TESTDIOMONITOR_COMMANDLINE_MAX 128static void write_file(struct work_struct *w)
{ssize_t ret;u32 index;testdiomonitor_sample* psample;struct tm t;char timestr[64];char exceedstr[64];char temp_commandline[TESTDIOMONITOR_COMMANDLINE_MAX];struct pid* pid_struct;struct task_struct* ptask;int stacki;while (_env.ringbuff.rp != _env.ringbuff.wp) {index = (_env.ringbuff.rp & (TESTDIOMONITOR_SAMPLE_RINGBUFF_MAXCOUNT - 1));psample = &_env.ringbuff.parray_sample[index];if (psample->writedone != 1) {break;}testdiomonitor_write_file_emptyline();_env.headoffset = sprintf(_env.file_linebuff, "[%llu][%s] ", _env.ringbuff.rp, psample->desc);time64_to_tm(psample->time.tv_sec + 8 * 60 * 60, 0, &t);snprintf(timestr, 64, "%04ld-%02d-%02d-%02d_%02d_%02d.%09ld",1900 + t.tm_year, t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, psample->time.tv_nsec);if (psample->desc == TESTDIOMONITOR_SAMPLEDESC_DEXCEED) {snprintf(exceedstr, 64, "dtimens[%llu]", psample->dtimens);}else if (psample->desc == TESTDIOMONITOR_SAMPLEDESC_DIOEXCEED) {snprintf(exceedstr, 64, "iowaittimens[%llu]", psample->iowaittimens);}else if (psample->desc == TESTDIOMONITOR_SAMPLEDESC_IOEXCEED) {snprintf(exceedstr, 64, "delayacct_iowaittimens[%llu]", psample->iowaittimens);}else {exceedstr[0] = '\0';}if (psample->desc == TESTDIOMONITOR_SAMPLEDESC_DIOEXCEED) {testdiomonitor_file_oneline("begin...time[%s]wakercpu[%d]desc[%s]%s\n", timestr, psample->wakercpu, psample->desc, "wakerDioexceed");testdiomonitor_file_oneline("wakertgid[%d]wakerpid[%d]wakercomm[%s]wakerppid[%d]wakerppidcomm[%s]\n",psample->wakertgid, psample->wakerpid, psample->wakercomm, psample->wakerppid, psample->wakerppidcomm);pid_struct = find_get_pid(psample->wakerpid);if (pid_struct) {ptask = get_pid_task(pid_struct, PIDTYPE_PID);if (ptask) {testdiomonitor_set_cmdline(temp_commandline, TESTDIOMONITOR_COMMANDLINE_MAX, ptask);put_task_struct(ptask);}else {temp_commandline[0] = '\0';}put_pid(pid_struct);}else {temp_commandline[0] = '\0';}testdiomonitor_file_oneline("wakercommandline[%s]\n", temp_commandline);pid_struct = find_get_pid(psample->wakerppid);if (pid_struct) {ptask = get_pid_task(pid_struct, PIDTYPE_PID);if (ptask) {testdiomonitor_set_cmdline(temp_commandline, TESTDIOMONITOR_COMMANDLINE_MAX, ptask);put_task_struct(ptask);}else {temp_commandline[0] = '\0';}put_pid(pid_struct);}else {temp_commandline[0] = '\0';}testdiomonitor_file_oneline("wakerppid_commandline[%s]\n", temp_commandline);testdiomonitor_file_oneline("stack[%d]:\n", psample->wakerstackn);for (stacki = 0; stacki < psample->wakerstackn; stacki++) {testdiomonitor_file_oneline("%*c%pS\n", 5, ' ', (void *)psample->parray_wakerstack[stacki]);}testdiomonitor_file_oneline("cpu[%d]desc[%s]%s\n", psample->cpu, psample->desc, exceedstr);}else {testdiomonitor_file_oneline("begin...time[%s]cpu[%d]desc[%s]%s\n", timestr, psample->cpu, psample->desc, exceedstr);}testdiomonitor_file_oneline("tgid[%d]pid[%d]comm[%s]ppid[%d]ppidcomm[%s]\n",psample->tgid, psample->pid, psample->ppidcomm, psample->ppid, psample->ppidcomm);pid_struct = find_get_pid(psample->pid);if (pid_struct) {ptask = get_pid_task(pid_struct, PIDTYPE_PID);if (ptask) {testdiomonitor_set_cmdline(temp_commandline, TESTDIOMONITOR_COMMANDLINE_MAX, ptask);put_task_struct(ptask);}else {temp_commandline[0] = '\0';}put_pid(pid_struct);}else {temp_commandline[0] = '\0';}testdiomonitor_file_oneline("commandline[%s]\n", temp_commandline);pid_struct = find_get_pid(psample->ppid);if (pid_struct) {ptask = get_pid_task(pid_struct, PIDTYPE_PID);if (ptask) {testdiomonitor_set_cmdline(temp_commandline, TESTDIOMONITOR_COMMANDLINE_MAX, ptask);put_task_struct(ptask);}else {temp_commandline[0] = '\0';}put_pid(pid_struct);}else {temp_commandline[0] = '\0';}testdiomonitor_file_oneline("ppid_commandline[%s]\n", temp_commandline);testdiomonitor_file_oneline("stack[%d]:\n", psample->stackn);for (stacki = 0; stacki < psample->stackn; stacki++) {testdiomonitor_file_oneline("%*c%pS\n", 5, ' ', (void *)psample->parray_stack[stacki]);}testdiomonitor_write_file_emptyline();psample->writedone = 0;_env.ringbuff.rp ++;}queue_delayed_work_on(nr_cpu_ids - 1, wq_write_file,&work_write_file, 1);
}static void init_write_file(void)
{init_file();wq_write_file = alloc_workqueue("testdiomonitor_write_file", WQ_MEM_RECLAIM, 0);INIT_DELAYED_WORK(&work_write_file, write_file);queue_delayed_work_on(nr_cpu_ids - 1, wq_write_file,&work_write_file, 3);
}static void exit_write_file(void)
{cancel_delayed_work_sync(&work_write_file);destroy_workqueue(wq_write_file);exit_file();
}void init_testdiomonitor_sample_ringbuff(void)
{testdiomonitor_sample* psample;_env.ringbuff.parray_sample = kvzalloc(sizeof(testdiomonitor_sample) * TESTDIOMONITOR_SAMPLE_RINGBUFF_MAXCOUNT, GFP_KERNEL);
}void exit_testdiomonitor_sample_ringbuff(void)
{kvfree(_env.ringbuff.parray_sample);
}testdiomonitor_sample* testdiomonitor_get_psample(void)
{u64 windex_raw, windex_raw_old;u32 windex;while (1) {windex_raw = _env.ringbuff.wp;if (windex_raw - _env.ringbuff.rp >= (u64)(TESTDIOMONITOR_SAMPLE_RINGBUFF_MAXCOUNT)) {_env.ringbuff.skipcount ++;return NULL;}// atomic_cmpxchg return old valuewindex_raw_old = atomic64_cmpxchg((atomic64_t*)&_env.ringbuff.wp,windex_raw, windex_raw + 1);if (windex_raw_old == windex_raw) {break;}}windex = (u32)(windex_raw & (u64)(TESTDIOMONITOR_SAMPLE_RINGBUFF_MAXCOUNT - 1));return &_env.ringbuff.parray_sample[windex];
}void testdiomonitor_add_sample(const char* i_desc, struct task_struct* i_task, u64 i_timens)
{testdiomonitor_sample* psample = testdiomonitor_get_psample();if (!psample) {return;}ktime_get_real_ts64(&psample->time);psample->cpu = task_cpu(i_task);psample->pid = i_task->pid;psample->tgid = i_task->tgid;strlcpy(psample->comm, i_task->comm, TASK_COMM_LEN);testdiomonitor_checkget_parentinfo_and_cmdline(psample, i_task);psample->bin_iowait = i_task->in_iowait;psample->desc = i_desc;if (i_desc == TESTDIOMONITOR_SAMPLEDESC_DEXCEED) {psample->dtimens = i_timens;}else if (i_desc == TESTDIOMONITOR_SAMPLEDESC_DIOEXCEED || i_desc == TESTDIOMONITOR_SAMPLEDESC_IOEXCEED) {psample->iowaittimens = i_timens;}psample->stackn = _stack_trace_save_tsk(i_task, (unsigned long*)psample->parray_stack, TEST_STACK_TRACE_ENTRIES, 0);if (i_desc == TESTDIOMONITOR_SAMPLEDESC_DIOEXCEED) {psample->wakercpu = smp_processor_id();psample->wakerpid = current->pid;psample->wakertgid = current->tgid;strlcpy(psample->wakercomm, current->comm, TASK_COMM_LEN);testdiomonitor_checkget_parentinfo_and_cmdline_waker(psample, current);psample->wakerstackn = _stack_trace_save_tsk(current, (unsigned long*)psample->parray_wakerstack, TEST_STACK_TRACE_ENTRIES, 0);}psample->writedone = 1;
}static void cb_sched_switch(void *i_data, bool i_preempt,struct task_struct *i_prev,struct task_struct *i_next,unsigned int i_prev_state)
{
#ifndef TESTDIOMONITOR_SIMPLEvoid* parray_stack[TEST_STACK_TRACE_ENTRIES];int num_stack;int stacki;if (i_prev_state == TASK_UNINTERRUPTIBLE) {if (i_prev->in_iowait) {testdiomonitor_add_sample(TESTDIOMONITOR_SAMPLEDESC_SWDIOSTART, i_prev, 0);}else {testdiomonitor_add_sample(TESTDIOMONITOR_SAMPLEDESC_SWDSTART, i_prev, 0);}}else if (i_prev->in_iowait) {testdiomonitor_add_sample(TESTDIOMONITOR_SAMPLEDESC_SWDIOSTART, i_prev, 0);}
#endif
}const char* getstatstr_bystate(u32 i_state) {switch (i_state) {case TASK_RUNNING:return "TASK_RUNNING";case TASK_INTERRUPTIBLE:return "TASK_INTERRUPTIBLE";case TASK_UNINTERRUPTIBLE:return "TASK_UNINTERRUPTIBLE";default:return "other";}
}static void cb_sched_waking(void *i_data, struct task_struct *i_p) {if (i_p->__state == TASK_UNINTERRUPTIBLE) {//u64 currns = my_rq_clock_task();struct rq* prq = my_cpu_rq(task_cpu(i_p));u64 currns = prq->clock_task;u64 local_c = local_clock();int cpuid = smp_processor_id();if (i_p->in_iowait) {
#ifndef TESTDIOMONITOR_SIMPLEtestdiomonitor_add_sample(TESTDIOMONITOR_SAMPLEDESC_WADIOSTOP, i_p, 0);
#endif
#ifdef TESTDIOMONITOR_SIMPLEif (currns - i_p->se.exec_start >= TESTDIOMONITOR_SIMPLE_THRESHOLDNS)
#endiftestdiomonitor_add_sample(TESTDIOMONITOR_SAMPLEDESC_DIOEXCEED, i_p, currns - i_p->se.exec_start);
#ifndef TESTDIOMONITOR_SIMPLEif (i_p->se.exec_start > currns) {//if (task_cpu(i_p) == cpuid) {printk("comm[%s]pid[%d]exec_start[%llu]currns[%llu]local_clock[%llu]last_cpu[%d]cpuid[%d]\n", i_p->comm, i_p->pid, i_p->se.exec_start, currns, local_c, task_cpu(i_p), cpuid);}}// if (printk_ratelimit()) {// printk("waking dump_stack[D]:\n");// dump_stack();// }
#endif}
#ifndef TESTDIOMONITOR_SIMPLEelse {testdiomonitor_add_sample(TESTDIOMONITOR_SAMPLEDESC_WADSTOP, i_p, 0);testdiomonitor_add_sample(TESTDIOMONITOR_SAMPLEDESC_DEXCEED, i_p, my_rq_clock_task() - i_p->se.exec_start);if (i_p->se.exec_start > currns) {//if (task_cpu(i_p) == cpuid) {printk("comm[%s]pid[%d]exec_start[%llu]currns[%llu]local_clock[%llu]last_cpu[%d]cpuid[%d]\n", i_p->comm, i_p->pid, i_p->se.exec_start, currns, local_c, task_cpu(i_p), cpuid);}}}
#endif}else if (i_p->in_iowait) {struct rq* prq = my_cpu_rq(task_cpu(i_p));u64 currns = prq->clock_task;u64 local_c = local_clock();int cpuid = smp_processor_id();//if (printk_ratelimit()) // {// printk("i_p->__state=[%u][%s]\n", i_p->__state, getstatstr_bystate(i_p->__state));// printk("waking dump_stack[K]:\n");// dump_stack();// }
#ifndef TESTDIOMONITOR_SIMPLEtestdiomonitor_add_sample(TESTDIOMONITOR_SAMPLEDESC_WADIOSTOP, i_p, 0);
#endif
#ifdef TESTDIOMONITOR_SIMPLEif (currns - i_p->se.exec_start >= TESTDIOMONITOR_SIMPLE_THRESHOLDNS)
#endiftestdiomonitor_add_sample(TESTDIOMONITOR_SAMPLEDESC_DIOEXCEED, i_p, currns - i_p->se.exec_start);}
}static void cb_iodelay_account(void *i_data, struct task_struct *i_curr,unsigned long long i_delta)
{
#ifdef TESTDIOMONITOR_SIMPLEif (i_delta >= TESTDIOMONITOR_SIMPLE_THRESHOLDNS)
#endiftestdiomonitor_add_sample(TESTDIOMONITOR_SAMPLEDESC_IOEXCEED, i_curr, i_delta);
}struct kern_tracepoint {void *callback;struct tracepoint *ptr;bool bregister;
};
static void clear_kern_tracepoint(struct kern_tracepoint *tp)
{if (tp->bregister) {tracepoint_probe_unregister(tp->ptr, tp->callback, NULL);}
}#define INIT_KERN_TRACEPOINT(tracepoint_name) \static struct kern_tracepoint mykern_##tracepoint_name = {.callback = NULL, .ptr = NULL, .bregister = false};#define TRACEPOINT_CHECK_AND_SET(tracepoint_name) \static void tracepoint_name##_tracepoint_check_and_set(struct tracepoint *tp, void *priv) \{ \if (!strcmp(#tracepoint_name, tp->name)) \{ \((struct kern_tracepoint *)priv)->ptr = tp; \return; \} \}INIT_KERN_TRACEPOINT(sched_switch)
TRACEPOINT_CHECK_AND_SET(sched_switch)
INIT_KERN_TRACEPOINT(sched_waking)
TRACEPOINT_CHECK_AND_SET(sched_waking)
#ifdef IODELAY_TRACEPOINT_ENABLE
INIT_KERN_TRACEPOINT(iodelay_account)
TRACEPOINT_CHECK_AND_SET(iodelay_account)
#endiftypedef unsigned long (*kallsyms_lookup_name_func)(const char *name);
kallsyms_lookup_name_func _kallsyms_lookup_name_func;void* get_func_by_symbol_name_kallsyms_lookup_name(void)
{int ret;void* pfunc = NULL;struct kprobe kp;memset(&kp, 0, sizeof(kp));kp.symbol_name = "kallsyms_lookup_name";kp.pre_handler = NULL;kp.addr = NULL; // 作为强调,提示使用symbol_nameret = register_kprobe(&kp);if (ret < 0) {printk("register_kprobe fail!\n");return NULL;}printk("register_kprobe succeed!\n");pfunc = (void*)kp.addr;unregister_kprobe(&kp);return pfunc;
}void* get_func_by_symbol_name(const char* i_symbol)
{if (_kallsyms_lookup_name_func == NULL) {return NULL;}return _kallsyms_lookup_name_func(i_symbol);
}static int __init testdiomonitor_init(void)
{//printk("offset of mmap_lock in mm_struct [%d]\n", offsetof(struct mm_struct, mmap_lock));_kallsyms_lookup_name_func = get_func_by_symbol_name_kallsyms_lookup_name();_prq = get_func_by_symbol_name("runqueues");if (_prq == NULL) {printk(KERN_ERR "get_func_by_symbol_name runqueues failed!\n");return -1;}init_testdiomonitor_sample_ringbuff();init_write_file();_stack_trace_save_tsk = get_func_by_symbol_name("stack_trace_save_tsk");if (_stack_trace_save_tsk == NULL) {printk(KERN_ERR "get_func_by_symbol_name stack_trace_save_tsk failed!\n");return -1;}_get_cmdline_func = get_func_by_symbol_name("get_cmdline");if (_get_cmdline_func == NULL) {printk(KERN_ERR "get_func_by_symbol_name get_cmdline failed!\n");return -1;}mykern_sched_switch.callback = cb_sched_switch;for_each_kernel_tracepoint(sched_switch_tracepoint_check_and_set, &mykern_sched_switch);if (!mykern_sched_switch.ptr) {printk(KERN_ERR "mykern_sched_switch register failed!\n");return -1;}else {printk(KERN_INFO "mykern_sched_switch register succeeded!\n");}tracepoint_probe_register(mykern_sched_switch.ptr, mykern_sched_switch.callback, NULL);mykern_sched_switch.bregister = 1;mykern_sched_waking.callback = cb_sched_waking;for_each_kernel_tracepoint(sched_waking_tracepoint_check_and_set, &mykern_sched_waking);if (!mykern_sched_waking.ptr) {printk(KERN_ERR "mykern_sched_waking register failed!\n");return -1;}else {printk(KERN_INFO "mykern_sched_waking register succeeded!\n");}tracepoint_probe_register(mykern_sched_waking.ptr, mykern_sched_waking.callback, NULL);mykern_sched_waking.bregister = 1;#ifdef IODELAY_TRACEPOINT_ENABLEmykern_iodelay_account.callback = cb_iodelay_account;for_each_kernel_tracepoint(iodelay_account_tracepoint_check_and_set, &mykern_iodelay_account);if (!mykern_iodelay_account.ptr) {printk(KERN_ERR "mykern_iodelay_account register failed!\n");return -1;}else {printk(KERN_INFO "mykern_iodelay_account register succeeded!\n");}tracepoint_probe_register(mykern_iodelay_account.ptr, mykern_iodelay_account.callback, NULL);mykern_iodelay_account.bregister = 1;
#endifreturn 0;
}static void __exit testdiomonitor_exit(void)
{clear_kern_tracepoint(&mykern_sched_switch);clear_kern_tracepoint(&mykern_sched_waking);
#ifdef IODELAY_TRACEPOINT_ENABLEclear_kern_tracepoint(&mykern_iodelay_account);
#endiftracepoint_synchronize_unregister();exit_write_file();exit_testdiomonitor_sample_ringbuff();
}module_init(testdiomonitor_init);
module_exit(testdiomonitor_exit);
2.2 修改部分里的waker相关部分说明
这次的改动是增加了针对进程iodelay唤醒时场景下,除了打印发生iodelay进程的相关堆栈,也打印出waker的堆栈的记录,提示是在哪里的逻辑进行的唤醒。
首先,在testdiomonitor_sample结构体里增加waker相关的部分:

然后,响应的增加waker相关的函数:

在落盘时,如果是TESTDIOMONITOR_SAMPLEDESC_DIOEXCEED类型,也就是iodelay唤醒相关类型时,就补充打印waker的堆栈如下:

也打印出waker的commandline及waker的parent的commandline:

然后在testdiomonitor_add_sample进行采样记录时,判断如果是TESTDIOMONITOR_SAMPLEDESC_DIOEXCEED的场景时进行waker的相关记录:

2.3 修改部分里的为了应对in_iowait状态下的非TASK_UNINTERRUPTIBLE状态
在调试时发现,如果我们只筛选TASK_UNINTERRUPTIBLE状态才做waking的相关sample的记录的话,那么会漏掉filemap的缺页异常场景的sample记录。因为filemap缺页异常场景的进程的状态不是TASK_UNINTERRUPTIBLE状态,而是多了一个bit位,是TASK_KILLABLE状态:
看TASK_KILLABLE状态的定义:

所以,修改了cb_sched_waking里的逻辑,补了一个else判断,如下图:

2.4 运行结果
insmod ko之后,抓到的某一个唤醒iowait的任务的waker和wakee的堆栈截图如下:

上图里由于用的rq_clock来算的iowaittimens,所以可能是有一点误差的但是至少能保证时间轴的单调性,即不会出现算出来的iowaittimens小于0,细节参考之前的博客 在计算进程D状态持续时间及等IO的时间遇到的一处问题-CSDN博客 。
上图的这次就是缺页异常导致的iowait,我们在下面第三章里详细展开分析这个原理。
三、缺页异常导致iowait的原理分析
我们回到上面 2.3 里展示的抓到这一次waker和wakee的堆栈截图:

解释一下,是进程231797这个bash /usr/local/sunlogin/etc/watch.sh的脚本进程里执行了grep -v /usr/local/sunlogin/etc/watch.sh(进程231801)在执行时,读一个文件时,使用了pagecache,发生了缺页异常,在等io完成相关page的加载时(io把相关文件从磁盘读回到page cache),这时候,231797这个bash /usr/local/sunlogin/etc/watch.sh的脚本进程又执行了grep sunloginclient --mod=service脚本(进程231799),进程231799在读一个文件时,发现这个文件的pagecache的页对应的folio被置上了PG_locked,说明正在读磁盘等磁盘完成读请求,所以进程231799陷入了D状态的睡眠(in_iowait被置上);然后,进程231801被通知完成了读之后,也唤醒了其他要读该folio的进程,这里唤醒的是231799进程。
要注意,读磁盘读的可能是数据文件,也可能是程序本身,也可能是so库,等等。
3.1 抓到的filemap_fault全部是filemap_fault+0xe8这个offset
我们看一下抓到的所有的filemap_fault,这次抓到总共88次:

每次都是filemap_fault的0xe8这个offset,继而调用到folio_wait_bit_common的0x124这个offset都是88次:

每次都是调用到了folio_wait_bit_common的0x124的位置:

3.2 确定相关的调用链
在看vmlinux里找对应源码前,我们先确认一下我们看的vmlinux是否匹配:
对应的vmlinux里filemap_fault函数的首地址ffffffff8137ac20加上0xab0:


就是下一个函数的开头:

所以是匹配的。
我们找一下filemap_fault里的0xe8的位置:
ffffffff8137ac20加上0xe8是FFFFFFFF8137AD08:

搜一下这个call对应的原始代码:
return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
代码里就一处是:

由于堆栈里没有__folio_lock_killable,所以说明__folio_lock_killable被弄成了inline方式被使用,看__folio_lock_killable的就两个引用的地方,一个是__folio_lock_or_retry调用了__folio_lock_killable,如下图:

还有一个是lock_folio_maybe_drop_mmap调用了__folio_lock_killable:

稍加比较就可以得出是上面的第二处即lock_folio_maybe_drop_mmap调用的__folio_lock_killable。
所以,我们iowait相关的缺页异常的这个调用链是:
filemap_fault->lock_folio_maybe_drop_mmap->__folio_lock_killable->folio_wait_bit_common
3.3 关于PG_locked标志位
在明确了调用链后,我们回过来看一下这个__folio_lock_killable这个函数:

它的意思是等一个folio的PG_locked的标志位被释放,在等的期间,进程被设置成TASK_KILLABLE状态,而TASK_KILLABLE状态在上面 2.3 里也说过,是多了TASK_WAKEKILL的这个标志位的。
为什么内核里有这么一个设计,置上了PG_locked来等io,等io完成再清除这个标志位,其他人在这个期间要等待,为什么要有这么个设计?
其实很容易理解,第一个在读相关文件发生缺页异常,并下发io读请求,等待io完成这个期间,如果在io完成之前就放开让其他缺页异常进来,那么读到的内容就会是错误的或者不完整的内容。
相关文章:
增加等IO状态的唤醒堆栈打印及缺页异常导致iowait分析
一、背景 在之前的博客 在计算进程D状态持续时间及等IO的时间遇到的一处问题-CSDN博客 里,我们修复了一处在抓取D状态及等IO状态堆栈的监控程序的一处时间计算bug,在这篇博客里,我们进一步丰富监控程序,在进程iodelay被唤醒时&am…...
nodejs:midi-writer-js 将基金净值数据转换为 midi 文件
开放式基金是没有公布每日交易量的。 /funds/data/660008.csv 文件开头: date,jz,ljjz 2016-01-04,1.1141,1.1141 2016-01-05,1.1161,1.1161 2016-01-06,1.1350,1.1350 这是一个将开放式基金数据转换为 MIDI音乐的 js 程序示例。该程序将基金净值映射为 MIDI音符的…...
新能源汽车空调系统(R134A)性能评估(一)
国内外主流空调系统厂家:贝尔、德尔福、空调国际、法雷奥、电装、松芝、杰信、新电、豫新等 泛亚汽车的空调电子部是比较优秀的整车空调研发团队。 空调系统综合试验台架是一套由试验室、风量测定装置、空气调和器、空气温度测定装置、湿度测定装置、加热器试验辅助…...
Oracle 数据库中优化 INSERT INTO 操作的性能
在 Oracle 数据库中优化 INSERT INTO 操作的性能,尤其是在处理大批量数据时,可以通过以下方法显著提升效率。 使用直接路径插入(Direct-Path Insert) 通过 APPEND 提示绕过缓冲区缓存,直接写入数据文件,减…...
Ubuntu 22.04安装MongoDB:GLM4模型对话数据收集与微调教程
在Ubuntu 22.04安装MongoDB Community Edition的教程请点击下方链接进行参考: 点击这里获取MongoDB Community Edition安装教程 今天将为大家带来如何微调GLM4模型并连接数据库进行对话的教程。快跟着小编一起试试吧~ 1. 大模型 ChatGLM4 微调步骤 1.1 从 github…...
Java 中的继承与多态:面向对象编程的核心特性
继承和多态是面向对象编程中最重要的两个概念,它们使代码结构更加清晰、灵活,并极大地提高了代码复用性。本文将深入探讨 Java 中的继承与多态,帮助你更好地理解这些核心概念。 1. 继承 1.1 为什么需要继承 在实际编程中,我们经…...
可编程增益放大器(PGA)在智能传感器自调节系统中的角色
在电子电路设计中,放大器芯片作为信号处理的核心器件,其性能直接影响系统整体表现。然而面对运算放大器、功率放大器、仪表放大器等众多类型,工程师常陷入选型困惑。作为国内领先的半导体解决方案提供商,华芯邦深耕放大器芯片领域…...
微信登录、商品浏览前瞻
一.业务效果 二.所需技术...
浙大研究团队揭示电场调控5-HT1AR的分子机制
本期介绍的文章题为 “Structural Insight into the Inactive/Active States of 5‑HT1AR and Molecular Mechanisms of Electric Fields in Modulating 5‑HT1AR” 。近期发表于JCIM。通过分子动力学模拟,探究 5-羟色胺 1A 受体(5-HT1AR) 在非活性 / 活性状态的构象…...
RoboOS与RoboBrain:引领具身智能新时代的跨本体协作框架
摘要 2025年3月29日,智源研究院在中关村论坛的“未来人工智能先锋论坛”上发布了两项重要成果:跨本体具身大小脑协作框架RoboOS与开源具身大脑RoboBrain。这一创新技术使机器人能够实现跨场景多任务部署及跨本体协作,推动单机智能向群体智能…...
视频AI赋能水利行业生态治理,水电站大坝漂浮物实时监测与智能预警方案
水电站大坝周边水域垃圾漂浮物不仅影响水质,还可能对大坝设施运行、水生态环境造成威胁。传统依靠人工巡检的方式效率低、存在监测盲区,难以实时全面地掌握漂浮物情况。借助EasyCVR视频汇聚平台与TSINGSEE青犀AI算法中台构建智能化监测方案,能…...
SnapdragonCamera骁龙相机源码解析
骁龙相机是高通开发的一个测试系统摄像头的demo,代码完善,功能强大。可以配合Camera驱动进行功能联调。 很多逻辑代码在CaptureModule.java里。 CaptureModule有8000多行,包罗万象。 涉及到界面显示要结合CaptureUI.java 一起来实现。 Ca…...
Spring Boot 整合 RabbitMQ:注解声明队列与交换机详解
RabbitMQ 作为一款高性能的消息中间件,在分布式系统中广泛应用。Spring Boot 通过 spring-boot-starter-amqp 提供了对 RabbitMQ 的无缝集成,开发者可以借助注解快速声明队列、交换机及绑定规则,极大简化了配置流程。本文将通过代码示例和原理…...
SecureCRT常用命令
一、连接命令 1. telnet:使用Telnet协议连接到远程设备。 例如:telnet 192.168.1.1 会连接到IP地址为192.168.1.1的远程设备。 二、文件传输命令 2. put:上传文件到远程服务器。 例如:put C:\localfile.txt /remotefolder 将本地文…...
flink 分组窗口聚合 与 窗口表值函数聚合 的区别
警告:分组窗口聚合已经过时。推荐使用更加强大和有效的窗口表值函数聚合。 参考官方文档 在 Apache Flink 中,分组窗口聚合(Group Window Aggregation) 和 窗口表值函数聚合(Windowing TVF Aggregation)…...
阿里云Tair KVCache:打造以缓存为中心的大模型Token超级工厂
一、Tair KVCache 简介 Tair KVCache 是阿里云瑶池旗下云数据库 Tair 面向大语言模型推理场景推出的 KVCache 缓存加速服务。 随着互联网技术的演进与流量规模的激增,缓存技术逐渐成为系统架构的核心组件。该阶段催生了 Redis 等开源缓存数据库,阿里巴巴…...
通过TIM+DMA Burst 实现STM32输出变频且不同脉冲数量的PWM波形
Burst介绍: DMA控制器可以生成单次传输或增量突发传输,传输的节拍数为4、8或16。 为了确保数据一致性,构成突发传输的每组传输都是不可分割的:AHB传输被锁定,AHB总线矩阵的仲裁器在突发传输序列期间不会撤销DMA主设备…...
【JAVA】【疑难杂症解决!】org.springframework.transaction.UnexpectedRollbackException:
程序莫名其妙出现这个事务回滚错误,不显示具体错误信息!! 问题:前几天遇到一个问题,代码没有抛出我想要的带自定义提示消息的异常,却报了个这个,去搜了一下,大概原因如下: 因为我在方法上写了@Transactional注解,里边调用的service的方法上也写了@Transactional注解…...
[Effective C++]条款26:尽可能延后变量定义的出现时间
. 在C中,尽可能延后变量定义的出现时间,主要原因是为了提供代码的可读性,减少不必要的开销以及避免潜在的错误。 1、代码执行过程中抛出异常 如果在代码开头定义了变量,但在后续代码中抛出了异常,可能导致变量在未被使…...
如何在k8s中对接s3存储
github地址: https://github.com/majst01/csi-driver-s3 1.CSI for S3 这是用于 S3(或兼容 S3)存储的容器存储接口 (CSI)。它可以动态分配存储桶并通过Fuse mount将它们安装到任何容器中 2.状态 这仍处于试验阶段,不应在任何…...
基于TradingView和CTPBee的自动化期货交易系统实现
引言 在量化交易领域,TradingView因其强大的技术分析工具和丰富的指标库而广受欢迎,但是其不支持国内期货自动化交易,CTPBee则是一个优秀的国产Python期货交易接口。本文将介绍如何将两者结合,实现一个完整的自动化交易系统。 本…...
FPGA实现LED流水灯
一、在VsCode中写代码 1、建立工程项目文件water_led.v文件 2、打开项目文件,创建三个目录 3、打开文件trl,创建water_led.v文件 4、打开文件tb,创建water_led_tb.v文件 5、用VsCode打开water_led.v文件,编写源代码 module water…...
电商场景下高稳定性数据接口的选型与实践
在电商系统开发中,API接口需要应对高并发请求、动态数据更新和复杂业务场景。我将重点解析电商场景对数据接口的特殊需求及选型方案。 一、电商API必备的四大核心能力 千万级商品数据实时同步 支持SKU基础信息/价格/库存多维度更新每日增量数据抓取与历史版本对比…...
4199. 公约数-公约数模版题
给定两个正整数 a 和 b。 你需要回答 q个询问。 每个询问给定两个整数 l,r,你需要找到最大的整数 x,满足: x 是 a 和 b 的公约数。l≤x≤r。 输入格式 第一行包含两个整数 a,b。 第二行包含一个整数 q。 接下来 q 行,每行包…...
百度文库免费下载器
01 引言 在国内的环境下,Greasy Fork网站是彻底打不开了,导致好多小伙伴想要用脚本都没办法。 特别是需要某Wen库下载的小伙伴,之前还说实在没办法,去Greasy Fork网站上安个脚本就可下载,但是现在网站被墙了…...
[NCTF2019]True XML cookbook[XXE] [内网探测] [网络ip相关知识]
一模一样的登录界面 我直接故伎重演但是并卵 (话说XXE注入之前好像其他博客都加上了<?xml version"1.0" encoding"utf-8"?>,但是不加好像也没有什么问题🤔) <?php /** * autor: c0ny1 * date: …...
Qt | 电脑音频采集曲线Charts
01 audio.pro # 项目使用了charts(图表)模块和multimedia(多媒体)模块。QT += charts multimedia# 调试打印QT += coreHEADERS += \ widget.h \ xyseriesiodevice.hSOURCES += \ main.cpp\ widget.cpp \ xyseriesiodevice.cpptarget.path = $$[QT_INSTAL…...
Linux驱动的基本概念
一 交叉开发编译 概念:交叉开发编译(Cross Compilation)是指在一个平台上生成能在另一个不同平台上执行的代码的编译过程。这是嵌入式系统开发和跨平台软件开发中的常见技术。 二 系统启动流程 在Linux源码下,通过网口利用tftp协议把u-bantu下的uImage…...
win server2022 限制共享文件夹d
点击配额管理中的配额 然后创建配额 导入要配额的文件即可 然后确定即可...
Ansible(3)——主机清单与配置文件
目录 一、创建 Ansible 清单: 1、清单定义: 2、使用静态清单指定受管主机: (1)主机名称指定: (2)IP 地址指定: 3、验证清单: (1࿰…...
