struct rseq { __u32 cpu_id_start; /* 0 4 */ __u32 cpu_id; /* 4 4 */ __u64 rseq_cs; /* 8 8 */ __u32 flags; /* 16 4 */ __u32 node_id; /* 20 4 */ __u32 mm_cid; /* 24 4 */ char end[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 7 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct mem_cgroup { struct cgroup_subsys_state css; /* 0 200 */ /* XXX last struct has 1 hole */ /* --- cacheline 3 boundary (192 bytes) was 8 bytes ago --- */ struct mem_cgroup_id id; /* 200 8 */ /* XXX 48 bytes hole, try to pack */ /* --- cacheline 4 boundary (256 bytes) --- */ struct page_counter memory __attribute__((__aligned__(64))); /* 256 192 */ /* XXX last struct has 24 bytes of padding, 1 hole */ /* --- cacheline 7 boundary (448 bytes) --- */ union { struct page_counter swap; /* 448 192 */ struct page_counter memsw; /* 448 192 */ }; /* 448 192 */ /* --- cacheline 10 boundary (640 bytes) --- */ struct page_counter kmem; /* 640 192 */ /* XXX last struct has 24 bytes of padding, 1 hole */ /* --- cacheline 13 boundary (832 bytes) --- */ struct page_counter tcpmem; /* 832 192 */ /* XXX last struct has 24 bytes of padding, 1 hole */ /* --- cacheline 16 boundary (1024 bytes) --- */ struct work_struct high_work; /* 1024 32 */ long unsigned int zswap_max; /* 1056 8 */ bool zswap_writeback; /* 1064 1 */ /* XXX 7 bytes hole, try to pack */ long unsigned int soft_limit; /* 1072 8 */ struct vmpressure vmpressure; /* 1080 120 */ /* XXX last struct has 1 hole */ /* --- cacheline 18 boundary (1152 bytes) was 48 bytes ago --- */ bool oom_group; /* 1200 1 */ bool oom_lock; /* 1201 1 */ /* XXX 2 bytes hole, try to pack */ int under_oom; /* 1204 4 */ int swappiness; /* 1208 4 */ int oom_kill_disable; /* 1212 4 */ /* --- cacheline 19 boundary (1216 bytes) --- */ struct cgroup_file events_file; /* 1216 56 */ struct cgroup_file events_local_file; /* 1272 56 */ /* --- cacheline 20 boundary (1280 bytes) was 48 bytes ago --- */ struct cgroup_file swap_events_file; /* 1328 56 */ /* --- cacheline 21 boundary (1344 bytes) was 40 bytes ago --- */ struct mutex thresholds_lock; /* 1384 32 */ /* --- cacheline 22 boundary (1408 bytes) was 8 bytes ago --- */ struct mem_cgroup_thresholds thresholds; /* 1416 16 */ struct mem_cgroup_thresholds memsw_thresholds; /* 1432 16 */ struct list_head oom_notify; /* 1448 16 */ long unsigned int move_charge_at_immigrate; /* 1464 8 */ /* --- cacheline 23 boundary (1472 bytes) --- */ spinlock_t move_lock; /* 1472 4 */ /* XXX 4 bytes hole, try to pack */ long unsigned int move_lock_flags; /* 1480 8 */ /* XXX 48 bytes hole, try to pack */ /* --- cacheline 24 boundary (1536 bytes) --- */ struct cacheline_padding _pad1_; /* 1536 0 */ struct memcg_vmstats * vmstats __attribute__((__aligned__(64))); /* 1536 8 */ atomic_long_t memory_events[9]; /* 1544 72 */ /* --- cacheline 25 boundary (1600 bytes) was 16 bytes ago --- */ atomic_long_t memory_events_local[9]; /* 1616 72 */ /* --- cacheline 26 boundary (1664 bytes) was 24 bytes ago --- */ long unsigned int socket_pressure; /* 1688 8 */ bool tcpmem_active; /* 1696 1 */ /* XXX 3 bytes hole, try to pack */ int tcpmem_pressure; /* 1700 4 */ int kmemcg_id; /* 1704 4 */ /* XXX 4 bytes hole, try to pack */ struct obj_cgroup * objcg; /* 1712 8 */ struct obj_cgroup * orig_objcg; /* 1720 8 */ /* --- cacheline 27 boundary (1728 bytes) --- */ struct list_head objcg_list; /* 1728 16 */ /* XXX 48 bytes hole, try to pack */ /* --- cacheline 28 boundary (1792 bytes) --- */ struct cacheline_padding _pad2_; /* 1792 0 */ atomic_t moving_account __attribute__((__aligned__(64))); /* 1792 4 */ /* XXX 4 bytes hole, try to pack */ struct task_struct * move_lock_task; /* 1800 8 */ struct memcg_vmstats_percpu * vmstats_percpu; /* 1808 8 */ struct list_head cgwb_list; /* 1816 16 */ struct wb_domain cgwb_domain; /* 1832 120 */ /* XXX last struct has 1 hole */ /* --- cacheline 30 boundary (1920 bytes) was 32 bytes ago --- */ struct memcg_cgwb_frn cgwb_frn[4]; /* 1952 160 */ /* --- cacheline 33 boundary (2112 bytes) --- */ struct list_head event_list; /* 2112 16 */ spinlock_t event_list_lock; /* 2128 4 */ /* XXX 4 bytes hole, try to pack */ struct deferred_split deferred_split_queue; /* 2136 32 */ /* XXX last struct has 1 hole */ struct lru_gen_mm_list mm_list; /* 2168 24 */ /* XXX last struct has 4 bytes of padding */ /* --- cacheline 34 boundary (2176 bytes) was 16 bytes ago --- */ struct mem_cgroup_per_node * nodeinfo[]; /* 2192 0 */ /* size: 2240, cachelines: 35, members: 49 */ /* sum members: 2020, holes: 10, sum holes: 172 */ /* padding: 48 */ /* member types with holes: 7, total: 7 */ /* paddings: 4, sum paddings: 76 */ /* forced alignments: 3, forced holes: 1, sum forced holes: 48 */ } __attribute__((__aligned__(64))); struct trace_event_raw_xen_mc_extend_args { struct trace_entry ent; /* 0 8 */ unsigned int op; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ size_t args; /* 16 8 */ enum xen_mc_extend_args res; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_xen_mmu_alloc_ptpage { struct trace_entry ent; /* 0 8 */ struct mm_struct * mm; /* 8 8 */ long unsigned int pfn; /* 16 8 */ unsigned int level; /* 24 4 */ bool pinned; /* 28 1 */ char __data[]; /* 29 0 */ /* size: 32, cachelines: 1, members: 6 */ /* padding: 3 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_xen_mmu_release_ptpage { struct trace_entry ent; /* 0 8 */ long unsigned int pfn; /* 8 8 */ unsigned int level; /* 16 4 */ bool pinned; /* 20 1 */ char __data[]; /* 21 0 */ /* size: 24, cachelines: 1, members: 5 */ /* padding: 3 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_xen_cpu_write_idt_entry { struct trace_entry ent; /* 0 8 */ gate_desc * dt; /* 8 8 */ int entrynum; /* 16 4 */ char __data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_xen_cpu_set_ldt { struct trace_entry ent; /* 0 8 */ const void * addr; /* 8 8 */ unsigned int entries; /* 16 4 */ char __data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct netprio_map { struct callback_head rcu; /* 0 16 */ u32 priomap_len; /* 16 4 */ u32 priomap[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 3 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct qdisc_size_table { struct callback_head rcu; /* 0 16 */ struct list_head list; /* 16 16 */ struct tc_sizespec szopts; /* 32 24 */ int refcnt; /* 56 4 */ u16 data[]; /* 60 0 */ /* size: 64, cachelines: 1, members: 5 */ /* padding: 4 */ }; struct sysinfo { __kernel_long_t uptime; /* 0 8 */ __kernel_ulong_t loads[3]; /* 8 24 */ __kernel_ulong_t totalram; /* 32 8 */ __kernel_ulong_t freeram; /* 40 8 */ __kernel_ulong_t sharedram; /* 48 8 */ __kernel_ulong_t bufferram; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ __kernel_ulong_t totalswap; /* 64 8 */ __kernel_ulong_t freeswap; /* 72 8 */ __u16 procs; /* 80 2 */ __u16 pad; /* 82 2 */ /* XXX 4 bytes hole, try to pack */ __kernel_ulong_t totalhigh; /* 88 8 */ __kernel_ulong_t freehigh; /* 96 8 */ __u32 mem_unit; /* 104 4 */ char _f[]; /* 108 0 */ /* size: 112, cachelines: 2, members: 14 */ /* sum members: 104, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_xhci_log_free_virt_dev { struct trace_entry ent; /* 0 8 */ void * vdev; /* 8 8 */ long long unsigned int out_ctx; /* 16 8 */ long long unsigned int in_ctx; /* 24 8 */ int slot_id; /* 32 4 */ u16 current_mel; /* 36 2 */ char __data[]; /* 38 0 */ /* size: 40, cachelines: 1, members: 7 */ /* padding: 2 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_xhci_log_virt_dev { struct trace_entry ent; /* 0 8 */ void * vdev; /* 8 8 */ long long unsigned int out_ctx; /* 16 8 */ long long unsigned int in_ctx; /* 24 8 */ int devnum; /* 32 4 */ int state; /* 36 4 */ int speed; /* 40 4 */ u8 portnum; /* 44 1 */ u8 level; /* 45 1 */ /* XXX 2 bytes hole, try to pack */ int slot_id; /* 48 4 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 11 */ /* sum members: 50, holes: 1, sum holes: 2 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_xhci_log_ep_ctx { struct trace_entry ent; /* 0 8 */ u32 info; /* 8 4 */ u32 info2; /* 12 4 */ u64 deq; /* 16 8 */ u32 tx_info; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct ethtool_rxnfc { __u32 cmd; /* 0 4 */ __u32 flow_type; /* 4 4 */ __u64 data; /* 8 8 */ struct ethtool_rx_flow_spec fs; /* 16 168 */ /* XXX last struct has 4 bytes of padding, 1 hole */ /* --- cacheline 2 boundary (128 bytes) was 56 bytes ago --- */ union { __u32 rule_cnt; /* 184 4 */ __u32 rss_context; /* 184 4 */ }; /* 184 4 */ __u32 rule_locs[]; /* 188 0 */ /* size: 192, cachelines: 3, members: 6 */ /* padding: 4 */ /* member types with holes: 1, total: 1 */ /* paddings: 1, sum paddings: 4 */ }; struct ioam6_schema { struct rhash_head head; /* 0 8 */ struct callback_head rcu; /* 8 16 */ struct ioam6_namespace * ns; /* 24 8 */ u32 id; /* 32 4 */ int len; /* 36 4 */ __be32 hdr; /* 40 4 */ u8 data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 7 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_initcall_finish { struct trace_entry ent; /* 0 8 */ initcall_t func; /* 8 8 */ int ret; /* 16 4 */ char __data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct poll_list { struct poll_list * next; /* 0 8 */ unsigned int len; /* 8 4 */ struct pollfd entries[]; /* 12 0 */ /* size: 16, cachelines: 1, members: 3 */ /* padding: 4 */ /* last cacheline: 16 bytes */ }; struct posix_acl { refcount_t a_refcount; /* 0 4 */ /* XXX 4 bytes hole, try to pack */ struct callback_head a_rcu; /* 8 16 */ unsigned int a_count; /* 24 4 */ struct posix_acl_entry a_entries[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 4 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_rtc_time_alarm_class { struct trace_entry ent; /* 0 8 */ time64_t secs; /* 8 8 */ int err; /* 16 4 */ char __data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_rtc_offset_class { struct trace_entry ent; /* 0 8 */ long int offset; /* 8 8 */ int err; /* 16 4 */ char __data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct proc_dir_entry { atomic_t in_use; /* 0 4 */ refcount_t refcnt; /* 4 4 */ struct list_head pde_openers; /* 8 16 */ spinlock_t pde_unload_lock; /* 24 4 */ /* XXX 4 bytes hole, try to pack */ struct completion * pde_unload_completion; /* 32 8 */ const struct inode_operations * proc_iops; /* 40 8 */ union { const struct proc_ops * proc_ops; /* 48 8 */ const struct file_operations * proc_dir_ops; /* 48 8 */ }; /* 48 8 */ const struct dentry_operations * proc_dops; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ union { const struct seq_operations * seq_ops; /* 64 8 */ int (*single_show)(struct seq_file *, void *); /* 64 8 */ }; /* 64 8 */ proc_write_t write; /* 72 8 */ void * data; /* 80 8 */ unsigned int state_size; /* 88 4 */ unsigned int low_ino; /* 92 4 */ nlink_t nlink; /* 96 4 */ kuid_t uid; /* 100 4 */ kgid_t gid; /* 104 4 */ /* XXX 4 bytes hole, try to pack */ loff_t size; /* 112 8 */ struct proc_dir_entry * parent; /* 120 8 */ /* --- cacheline 2 boundary (128 bytes) --- */ struct rb_root subdir; /* 128 8 */ struct rb_node subdir_node; /* 136 24 */ char * name; /* 160 8 */ umode_t mode; /* 168 2 */ u8 flags; /* 170 1 */ u8 namelen; /* 171 1 */ char inline_name[]; /* 172 0 */ /* size: 176, cachelines: 3, members: 25 */ /* sum members: 164, holes: 2, sum holes: 8 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct scsi_vpd { struct callback_head rcu; /* 0 16 */ int len; /* 16 4 */ unsigned char data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 3 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_ata_tf_load { struct trace_entry ent; /* 0 8 */ unsigned int ata_port; /* 8 4 */ unsigned char cmd; /* 12 1 */ unsigned char dev; /* 13 1 */ unsigned char lbal; /* 14 1 */ unsigned char lbam; /* 15 1 */ unsigned char lbah; /* 16 1 */ unsigned char nsect; /* 17 1 */ unsigned char feature; /* 18 1 */ unsigned char hob_lbal; /* 19 1 */ unsigned char hob_lbam; /* 20 1 */ unsigned char hob_lbah; /* 21 1 */ unsigned char hob_nsect; /* 22 1 */ unsigned char hob_feature; /* 23 1 */ unsigned char proto; /* 24 1 */ char __data[]; /* 25 0 */ /* size: 28, cachelines: 1, members: 16 */ /* padding: 3 */ /* last cacheline: 28 bytes */ }; struct trace_event_raw_ata_bmdma_status { struct trace_entry ent; /* 0 8 */ unsigned int ata_port; /* 8 4 */ unsigned int tag; /* 12 4 */ unsigned char host_stat; /* 16 1 */ char __data[]; /* 17 0 */ /* size: 20, cachelines: 1, members: 5 */ /* padding: 3 */ /* last cacheline: 20 bytes */ }; struct trace_event_raw_ata_sff_hsm_template { struct trace_entry ent; /* 0 8 */ unsigned int ata_port; /* 8 4 */ unsigned int ata_dev; /* 12 4 */ unsigned int tag; /* 16 4 */ unsigned int qc_flags; /* 20 4 */ unsigned int protocol; /* 24 4 */ unsigned int hsm_state; /* 28 4 */ unsigned char dev_state; /* 32 1 */ char __data[]; /* 33 0 */ /* size: 36, cachelines: 1, members: 9 */ /* padding: 3 */ /* last cacheline: 36 bytes */ }; struct trace_event_raw_ata_sff_template { struct trace_entry ent; /* 0 8 */ unsigned int ata_port; /* 8 4 */ unsigned char hsm_state; /* 12 1 */ char __data[]; /* 13 0 */ /* size: 16, cachelines: 1, members: 4 */ /* padding: 3 */ /* last cacheline: 16 bytes */ }; struct trace_event_raw_btrfs_add_block_group { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ u64 offset; /* 24 8 */ u64 size; /* 32 8 */ u64 flags; /* 40 8 */ u64 bytes_used; /* 48 8 */ u64 bytes_super; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ int create; /* 64 4 */ char __data[]; /* 68 0 */ /* size: 72, cachelines: 2, members: 9 */ /* padding: 4 */ /* last cacheline: 8 bytes */ }; struct trace_event_raw_btrfs_space_reservation { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ u32 __data_loc_type; /* 24 4 */ /* XXX 4 bytes hole, try to pack */ u64 val; /* 32 8 */ u64 bytes; /* 40 8 */ int reserve; /* 48 4 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 7 */ /* sum members: 48, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_btrfs_flush_space { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ u64 flags; /* 24 8 */ u64 num_bytes; /* 32 8 */ int state; /* 40 4 */ int ret; /* 44 4 */ bool for_preempt; /* 48 1 */ char __data[]; /* 49 0 */ /* size: 56, cachelines: 1, members: 8 */ /* padding: 7 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_btrfs_setup_cluster { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ u64 bg_objectid; /* 24 8 */ u64 flags; /* 32 8 */ u64 start; /* 40 8 */ u64 max_size; /* 48 8 */ u64 size; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ int bitmap; /* 64 4 */ char __data[]; /* 68 0 */ /* size: 72, cachelines: 2, members: 9 */ /* padding: 4 */ /* last cacheline: 8 bytes */ }; struct trace_event_raw_btrfs_workqueue { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ const void * wq; /* 24 8 */ u32 __data_loc_name; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 5 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_btrfs__qgroup_rsv_data { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ u64 rootid; /* 24 8 */ u64 ino; /* 32 8 */ u64 start; /* 40 8 */ u64 len; /* 48 8 */ u64 reserved; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ int op; /* 64 4 */ char __data[]; /* 68 0 */ /* size: 72, cachelines: 2, members: 9 */ /* padding: 4 */ /* last cacheline: 8 bytes */ }; struct trace_event_raw_qgroup_update_reserve { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ u64 qgid; /* 24 8 */ u64 cur_reserved; /* 32 8 */ s64 diff; /* 40 8 */ int type; /* 48 4 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 7 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_qgroup_meta_reserve { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ u64 refroot; /* 24 8 */ s64 diff; /* 32 8 */ int type; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_qgroup_meta_free_all_pertrans { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ u64 refroot; /* 24 8 */ s64 diff; /* 32 8 */ int type; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_btrfs_set_extent_bit { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ unsigned int owner; /* 24 4 */ /* XXX 4 bytes hole, try to pack */ u64 ino; /* 32 8 */ u64 rootid; /* 40 8 */ u64 start; /* 48 8 */ u64 len; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ unsigned int set_bits; /* 64 4 */ char __data[]; /* 68 0 */ /* size: 72, cachelines: 2, members: 9 */ /* sum members: 64, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 8 bytes */ }; struct trace_event_raw_btrfs_clear_extent_bit { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ unsigned int owner; /* 24 4 */ /* XXX 4 bytes hole, try to pack */ u64 ino; /* 32 8 */ u64 rootid; /* 40 8 */ u64 start; /* 48 8 */ u64 len; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ unsigned int clear_bits; /* 64 4 */ char __data[]; /* 68 0 */ /* size: 72, cachelines: 2, members: 9 */ /* sum members: 64, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 8 bytes */ }; struct trace_event_raw_btrfs_sleep_tree_lock { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ u64 block; /* 24 8 */ u64 generation; /* 32 8 */ u64 start_ns; /* 40 8 */ u64 end_ns; /* 48 8 */ u64 diff_ns; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ u64 owner; /* 64 8 */ int is_log_tree; /* 72 4 */ char __data[]; /* 76 0 */ /* size: 80, cachelines: 2, members: 10 */ /* padding: 4 */ /* last cacheline: 16 bytes */ }; struct trace_event_raw_btrfs_locking_events { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ u64 block; /* 24 8 */ u64 generation; /* 32 8 */ u64 owner; /* 40 8 */ int is_log_tree; /* 48 4 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 7 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_btrfs_raid56_bio { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ u64 full_stripe; /* 24 8 */ u64 physical; /* 32 8 */ u64 devid; /* 40 8 */ u32 offset; /* 48 4 */ u32 len; /* 52 4 */ u8 opf; /* 56 1 */ u8 total_stripes; /* 57 1 */ u8 real_stripes; /* 58 1 */ u8 nr_data; /* 59 1 */ u8 stripe_nr; /* 60 1 */ char __data[]; /* 61 0 */ /* size: 64, cachelines: 1, members: 13 */ /* padding: 3 */ }; struct trace_event_raw_btrfs_insert_one_raid_extent { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ u64 logical; /* 24 8 */ u64 length; /* 32 8 */ int num_stripes; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_btrfs_extent_map_shrinker_remove_em { struct trace_entry ent; /* 0 8 */ u8 fsid[16]; /* 8 16 */ u64 ino; /* 24 8 */ u64 root_id; /* 32 8 */ u64 start; /* 40 8 */ u64 len; /* 48 8 */ u64 block_start; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ u32 flags; /* 64 4 */ char __data[]; /* 68 0 */ /* size: 72, cachelines: 2, members: 9 */ /* padding: 4 */ /* last cacheline: 8 bytes */ }; struct frag { struct list_head list; /* 0 16 */ u32 group; /* 16 4 */ u8 num; /* 20 1 */ u8 rec; /* 21 1 */ u8 map; /* 22 1 */ u8 data[]; /* 23 0 */ /* size: 24, cachelines: 1, members: 6 */ /* padding: 1 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_thread_noise { struct trace_entry ent; /* 0 8 */ char comm[16]; /* 8 16 */ u64 start; /* 24 8 */ u64 duration; /* 32 8 */ pid_t pid; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_softirq_noise { struct trace_entry ent; /* 0 8 */ u64 start; /* 8 8 */ u64 duration; /* 16 8 */ int vector; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct xsk_buff_pool { struct device * dev; /* 0 8 */ struct net_device * netdev; /* 8 8 */ struct list_head xsk_tx_list; /* 16 16 */ spinlock_t xsk_tx_list_lock; /* 32 4 */ refcount_t users; /* 36 4 */ struct xdp_umem * umem; /* 40 8 */ struct work_struct work; /* 48 32 */ /* --- cacheline 1 boundary (64 bytes) was 16 bytes ago --- */ struct list_head free_list; /* 80 16 */ struct list_head xskb_list; /* 96 16 */ u32 heads_cnt; /* 112 4 */ u16 queue_id; /* 116 2 */ /* XXX 10 bytes hole, try to pack */ /* --- cacheline 2 boundary (128 bytes) --- */ struct xsk_queue * fq __attribute__((__aligned__(64))); /* 128 8 */ struct xsk_queue * cq; /* 136 8 */ dma_addr_t * dma_pages; /* 144 8 */ struct xdp_buff_xsk * heads; /* 152 8 */ struct xdp_desc * tx_descs; /* 160 8 */ u64 chunk_mask; /* 168 8 */ u64 addrs_cnt; /* 176 8 */ u32 free_list_cnt; /* 184 4 */ u32 dma_pages_cnt; /* 188 4 */ /* --- cacheline 3 boundary (192 bytes) --- */ u32 free_heads_cnt; /* 192 4 */ u32 headroom; /* 196 4 */ u32 chunk_size; /* 200 4 */ u32 chunk_shift; /* 204 4 */ u32 frame_len; /* 208 4 */ u8 tx_metadata_len; /* 212 1 */ u8 cached_need_wakeup; /* 213 1 */ bool uses_need_wakeup; /* 214 1 */ bool unaligned; /* 215 1 */ bool tx_sw_csum; /* 216 1 */ /* XXX 7 bytes hole, try to pack */ void * addrs; /* 224 8 */ spinlock_t cq_lock; /* 232 4 */ /* XXX 4 bytes hole, try to pack */ struct xdp_buff_xsk * free_heads[]; /* 240 0 */ /* size: 256, cachelines: 4, members: 33 */ /* sum members: 219, holes: 3, sum holes: 21 */ /* padding: 16 */ /* forced alignments: 1, forced holes: 1, sum forced holes: 10 */ } __attribute__((__aligned__(64))); struct ieee80211_regdomain { struct callback_head callback_head; /* 0 16 */ u32 n_reg_rules; /* 16 4 */ char alpha2[3]; /* 20 3 */ /* XXX 1 byte hole, try to pack */ enum nl80211_dfs_regions dfs_region; /* 24 4 */ struct ieee80211_reg_rule reg_rules[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 27, holes: 1, sum holes: 1 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_handshake_event_class { struct trace_entry ent; /* 0 8 */ const void * req; /* 8 8 */ const void * sk; /* 16 8 */ unsigned int netns_ino; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct watch_filter { union { struct callback_head rcu; /* 0 16 */ long unsigned int type_filter[1]; /* 0 8 */ }; /* 0 16 */ u32 nr_filters; /* 16 4 */ struct watch_type_filter filters[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 3 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct landlock_rule { struct rb_node node; /* 0 24 */ union landlock_key key; /* 24 8 */ u32 num_layers; /* 32 4 */ struct landlock_layer layers[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct ghes_vendor_record_entry { struct work_struct work; /* 0 32 */ int error_severity; /* 32 4 */ char vendor_record[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 3 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct linux_efi_tpm_eventlog { u32 size; /* 0 4 */ u32 final_events_preboot_size; /* 4 4 */ u8 version; /* 8 1 */ u8 log[]; /* 9 0 */ /* size: 12, cachelines: 1, members: 4 */ /* padding: 3 */ /* last cacheline: 12 bytes */ }; struct trace_event_raw_icmp_send { struct trace_entry ent; /* 0 8 */ const void * skbaddr; /* 8 8 */ int type; /* 16 4 */ int code; /* 20 4 */ __u8 saddr[4]; /* 24 4 */ __u8 daddr[4]; /* 28 4 */ __u16 sport; /* 32 2 */ __u16 dport; /* 34 2 */ short unsigned int ulen; /* 36 2 */ char __data[]; /* 38 0 */ /* size: 40, cachelines: 1, members: 10 */ /* padding: 2 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_ksm_merge_one_page { struct trace_entry ent; /* 0 8 */ long unsigned int pfn; /* 8 8 */ void * rmap_item; /* 16 8 */ void * mm; /* 24 8 */ int err; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_ksm_merge_with_ksm_page { struct trace_entry ent; /* 0 8 */ void * ksm_page; /* 8 8 */ long unsigned int pfn; /* 16 8 */ void * rmap_item; /* 24 8 */ void * mm; /* 32 8 */ int err; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 7 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_ksm_advisor { struct trace_entry ent; /* 0 8 */ s64 scan_time; /* 8 8 */ long unsigned int pages_to_scan; /* 16 8 */ unsigned int cpu_percent; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct packet_fanout { possible_net_t net; /* 0 8 */ unsigned int num_members; /* 8 4 */ u32 max_num_members; /* 12 4 */ u16 id; /* 16 2 */ u8 type; /* 18 1 */ u8 flags; /* 19 1 */ /* XXX 4 bytes hole, try to pack */ union { atomic_t rr_cur; /* 24 4 */ struct bpf_prog * bpf_prog; /* 24 8 */ }; /* 24 8 */ struct list_head list; /* 32 16 */ spinlock_t lock; /* 48 4 */ refcount_t sk_ref; /* 52 4 */ /* XXX 8 bytes hole, try to pack */ /* --- cacheline 1 boundary (64 bytes) --- */ struct packet_type prot_hook __attribute__((__aligned__(64))); /* 64 72 */ /* XXX last struct has 1 hole */ /* --- cacheline 2 boundary (128 bytes) was 8 bytes ago --- */ struct sock * arr[]; /* 136 0 */ /* size: 192, cachelines: 3, members: 12 */ /* sum members: 124, holes: 2, sum holes: 12 */ /* padding: 56 */ /* member types with holes: 1, total: 1 */ /* forced alignments: 1, forced holes: 1, sum forced holes: 8 */ } __attribute__((__aligned__(64))); struct trace_event_raw_kmem_cache_alloc { struct trace_entry ent; /* 0 8 */ long unsigned int call_site; /* 8 8 */ const void * ptr; /* 16 8 */ size_t bytes_req; /* 24 8 */ size_t bytes_alloc; /* 32 8 */ long unsigned int gfp_flags; /* 40 8 */ int node; /* 48 4 */ bool accounted; /* 52 1 */ char __data[]; /* 53 0 */ /* size: 56, cachelines: 1, members: 9 */ /* padding: 3 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_kmalloc { struct trace_entry ent; /* 0 8 */ long unsigned int call_site; /* 8 8 */ const void * ptr; /* 16 8 */ size_t bytes_req; /* 24 8 */ size_t bytes_alloc; /* 32 8 */ long unsigned int gfp_flags; /* 40 8 */ int node; /* 48 4 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 8 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_kmem_cache_free { struct trace_entry ent; /* 0 8 */ long unsigned int call_site; /* 8 8 */ const void * ptr; /* 16 8 */ u32 __data_loc_name; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_mm_page_free { struct trace_entry ent; /* 0 8 */ long unsigned int pfn; /* 8 8 */ unsigned int order; /* 16 4 */ char __data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_mm_page_alloc { struct trace_entry ent; /* 0 8 */ long unsigned int pfn; /* 8 8 */ unsigned int order; /* 16 4 */ /* XXX 4 bytes hole, try to pack */ long unsigned int gfp_flags; /* 24 8 */ int migratetype; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 6 */ /* sum members: 32, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_mm_page { struct trace_entry ent; /* 0 8 */ long unsigned int pfn; /* 8 8 */ unsigned int order; /* 16 4 */ int migratetype; /* 20 4 */ int percpu_refill; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_mm_page_alloc_extfrag { struct trace_entry ent; /* 0 8 */ long unsigned int pfn; /* 8 8 */ int alloc_order; /* 16 4 */ int fallback_order; /* 20 4 */ int alloc_migratetype; /* 24 4 */ int fallback_migratetype; /* 28 4 */ int change_ownership; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 8 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_mm_alloc_contig_migrate_range_info { struct trace_entry ent; /* 0 8 */ long unsigned int start; /* 8 8 */ long unsigned int end; /* 16 8 */ long unsigned int nr_migrated; /* 24 8 */ long unsigned int nr_reclaimed; /* 32 8 */ long unsigned int nr_mapped; /* 40 8 */ int migratetype; /* 48 4 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 8 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_task_newtask { struct trace_entry ent; /* 0 8 */ pid_t pid; /* 8 4 */ char comm[16]; /* 12 16 */ /* XXX 4 bytes hole, try to pack */ long unsigned int clone_flags; /* 32 8 */ short int oom_score_adj; /* 40 2 */ char __data[]; /* 42 0 */ /* size: 48, cachelines: 1, members: 6 */ /* sum members: 38, holes: 1, sum holes: 4 */ /* padding: 6 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_task_rename { struct trace_entry ent; /* 0 8 */ pid_t pid; /* 8 4 */ char oldcomm[16]; /* 12 16 */ char newcomm[16]; /* 28 16 */ short int oom_score_adj; /* 44 2 */ char __data[]; /* 46 0 */ /* size: 48, cachelines: 1, members: 6 */ /* padding: 2 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_mm_khugepaged_scan_pmd { struct trace_entry ent; /* 0 8 */ struct mm_struct * mm; /* 8 8 */ long unsigned int pfn; /* 16 8 */ bool writable; /* 24 1 */ /* XXX 3 bytes hole, try to pack */ int referenced; /* 28 4 */ int none_or_zero; /* 32 4 */ int status; /* 36 4 */ int unmapped; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 9 */ /* sum members: 41, holes: 1, sum holes: 3 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_mm_collapse_huge_page_swapin { struct trace_entry ent; /* 0 8 */ struct mm_struct * mm; /* 8 8 */ int swapped_in; /* 16 4 */ int referenced; /* 20 4 */ int ret; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_alloc_vmap_area { struct trace_entry ent; /* 0 8 */ long unsigned int addr; /* 8 8 */ long unsigned int size; /* 16 8 */ long unsigned int align; /* 24 8 */ long unsigned int vstart; /* 32 8 */ long unsigned int vend; /* 40 8 */ int failed; /* 48 4 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 8 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_purge_vmap_area_lazy { struct trace_entry ent; /* 0 8 */ long unsigned int start; /* 8 8 */ long unsigned int end; /* 16 8 */ unsigned int npurged; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_iomap_readpage_class { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ u64 ino; /* 16 8 */ int nr_pages; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_iomap_dio_rw_begin { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ loff_t isize; /* 24 8 */ loff_t pos; /* 32 8 */ size_t count; /* 40 8 */ size_t done_before; /* 48 8 */ int ki_flags; /* 56 4 */ unsigned int dio_flags; /* 60 4 */ /* --- cacheline 1 boundary (64 bytes) --- */ bool aio; /* 64 1 */ char __data[]; /* 65 0 */ /* size: 72, cachelines: 2, members: 11 */ /* sum members: 61, holes: 1, sum holes: 4 */ /* padding: 7 */ /* last cacheline: 8 bytes */ }; struct trace_event_raw_binder_wait_for_work { struct trace_entry ent; /* 0 8 */ bool proc_work; /* 8 1 */ bool transaction_stack; /* 9 1 */ bool thread_todo; /* 10 1 */ char __data[]; /* 11 0 */ /* size: 12, cachelines: 1, members: 5 */ /* padding: 1 */ /* last cacheline: 12 bytes */ }; struct ecryptfs_private_key { u32 key_size; /* 0 4 */ u32 data_len; /* 4 4 */ u8 signature[17]; /* 8 17 */ char pki_type[17]; /* 25 17 */ u8 data[]; /* 42 0 */ /* size: 44, cachelines: 1, members: 5 */ /* padding: 2 */ /* last cacheline: 44 bytes */ }; struct trace_event_raw_scsi_cmd_done_timeout_template { struct trace_entry ent; /* 0 8 */ unsigned int host_no; /* 8 4 */ unsigned int channel; /* 12 4 */ unsigned int id; /* 16 4 */ unsigned int lun; /* 20 4 */ int result; /* 24 4 */ unsigned int opcode; /* 28 4 */ unsigned int cmd_len; /* 32 4 */ int driver_tag; /* 36 4 */ int scheduler_tag; /* 40 4 */ unsigned int data_sglen; /* 44 4 */ unsigned int prot_sglen; /* 48 4 */ unsigned char prot_op; /* 52 1 */ /* XXX 3 bytes hole, try to pack */ u32 __data_loc_cmnd; /* 56 4 */ u8 sense_key; /* 60 1 */ u8 asc; /* 61 1 */ u8 ascq; /* 62 1 */ char __data[]; /* 63 0 */ /* size: 64, cachelines: 1, members: 18 */ /* sum members: 60, holes: 1, sum holes: 3 */ /* padding: 1 */ }; struct trace_event_raw_cpu_idle_miss { struct trace_entry ent; /* 0 8 */ u32 cpu_id; /* 8 4 */ u32 state; /* 12 4 */ bool below; /* 16 1 */ char __data[]; /* 17 0 */ /* size: 20, cachelines: 1, members: 5 */ /* padding: 3 */ /* last cacheline: 20 bytes */ }; struct trace_event_raw_suspend_resume { struct trace_entry ent; /* 0 8 */ const char * action; /* 8 8 */ int val; /* 16 4 */ bool start; /* 20 1 */ char __data[]; /* 21 0 */ /* size: 24, cachelines: 1, members: 5 */ /* padding: 3 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_cma_alloc_start { struct trace_entry ent; /* 0 8 */ u32 __data_loc_name; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ long unsigned int count; /* 16 8 */ unsigned int align; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_cma_alloc_busy_retry { struct trace_entry ent; /* 0 8 */ u32 __data_loc_name; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ long unsigned int pfn; /* 16 8 */ const struct page * page; /* 24 8 */ long unsigned int count; /* 32 8 */ unsigned int align; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 7 */ /* sum members: 40, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_scsi_prepare_zone_append { struct trace_entry ent; /* 0 8 */ unsigned int host_no; /* 8 4 */ unsigned int channel; /* 12 4 */ unsigned int id; /* 16 4 */ unsigned int lun; /* 20 4 */ sector_t lba; /* 24 8 */ unsigned int wp_offset; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 8 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_hyperv_nested_flush_guest_mapping { struct trace_entry ent; /* 0 8 */ u64 as; /* 8 8 */ int ret; /* 16 4 */ char __data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_hyperv_nested_flush_guest_mapping_range { struct trace_entry ent; /* 0 8 */ u64 as; /* 8 8 */ int ret; /* 16 4 */ char __data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_mm_filemap_op_page_cache { struct trace_entry ent; /* 0 8 */ long unsigned int pfn; /* 8 8 */ long unsigned int i_ino; /* 16 8 */ long unsigned int index; /* 24 8 */ dev_t s_dev; /* 32 4 */ unsigned char order; /* 36 1 */ char __data[]; /* 37 0 */ /* size: 40, cachelines: 1, members: 7 */ /* padding: 3 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_file_check_and_advance_wb_err { struct trace_entry ent; /* 0 8 */ struct file * file; /* 8 8 */ long unsigned int i_ino; /* 16 8 */ dev_t s_dev; /* 24 4 */ errseq_t old; /* 28 4 */ errseq_t new; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 7 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_damos_before_apply { struct trace_entry ent; /* 0 8 */ unsigned int context_idx; /* 8 4 */ unsigned int scheme_idx; /* 12 4 */ long unsigned int target_idx; /* 16 8 */ long unsigned int start; /* 24 8 */ long unsigned int end; /* 32 8 */ unsigned int nr_accesses; /* 40 4 */ unsigned int age; /* 44 4 */ unsigned int nr_regions; /* 48 4 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 10 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_alarmtimer_suspend { struct trace_entry ent; /* 0 8 */ s64 expires; /* 8 8 */ unsigned char alarm_type; /* 16 1 */ char __data[]; /* 17 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 7 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_ext4_other_inode_update_time { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ ino_t orig_ino; /* 24 8 */ uid_t uid; /* 32 4 */ gid_t gid; /* 36 4 */ __u16 mode; /* 40 2 */ char __data[]; /* 42 0 */ /* size: 48, cachelines: 1, members: 8 */ /* sum members: 38, holes: 1, sum holes: 4 */ /* padding: 6 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_ext4_free_inode { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ uid_t uid; /* 24 4 */ gid_t gid; /* 28 4 */ __u64 blocks; /* 32 8 */ __u16 mode; /* 40 2 */ char __data[]; /* 42 0 */ /* size: 48, cachelines: 1, members: 8 */ /* sum members: 38, holes: 1, sum holes: 4 */ /* padding: 6 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_ext4_request_inode { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t dir; /* 16 8 */ __u16 mode; /* 24 2 */ char __data[]; /* 26 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 22, holes: 1, sum holes: 4 */ /* padding: 6 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_ext4_allocate_inode { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ ino_t dir; /* 24 8 */ __u16 mode; /* 32 2 */ char __data[]; /* 34 0 */ /* size: 40, cachelines: 1, members: 6 */ /* sum members: 30, holes: 1, sum holes: 4 */ /* padding: 6 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_ext4_evict_inode { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ int nlink; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_ext4_drop_inode { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ int drop; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_ext4__write_begin { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ loff_t pos; /* 24 8 */ unsigned int len; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 6 */ /* sum members: 32, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_ext4_writepages { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ long int nr_to_write; /* 24 8 */ long int pages_skipped; /* 32 8 */ loff_t range_start; /* 40 8 */ loff_t range_end; /* 48 8 */ long unsigned int writeback_index; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ int sync_mode; /* 64 4 */ char for_kupdate; /* 68 1 */ char range_cyclic; /* 69 1 */ char __data[]; /* 70 0 */ /* size: 72, cachelines: 2, members: 12 */ /* sum members: 66, holes: 1, sum holes: 4 */ /* padding: 2 */ /* last cacheline: 8 bytes */ }; struct trace_event_raw_ext4_da_write_pages { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ long unsigned int first_page; /* 24 8 */ long int nr_to_write; /* 32 8 */ int sync_mode; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 7 */ /* sum members: 40, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_ext4_writepages_result { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ int ret; /* 24 4 */ int pages_written; /* 28 4 */ long int pages_skipped; /* 32 8 */ long unsigned int writeback_index; /* 40 8 */ int sync_mode; /* 48 4 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 9 */ /* sum members: 48, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_ext4__mb_new_pa { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ __u64 pa_pstart; /* 24 8 */ __u64 pa_lstart; /* 32 8 */ __u32 pa_len; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 7 */ /* sum members: 40, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_ext4_mb_release_inode_pa { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ __u64 block; /* 24 8 */ __u32 count; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 6 */ /* sum members: 32, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_ext4_mb_release_group_pa { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ __u64 pa_pstart; /* 16 8 */ __u32 pa_len; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_ext4_discard_preallocations { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ unsigned int len; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_ext4_request_blocks { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ unsigned int len; /* 24 4 */ __u32 logical; /* 28 4 */ __u32 lleft; /* 32 4 */ __u32 lright; /* 36 4 */ __u64 goal; /* 40 8 */ __u64 pleft; /* 48 8 */ __u64 pright; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ unsigned int flags; /* 64 4 */ char __data[]; /* 68 0 */ /* size: 72, cachelines: 2, members: 12 */ /* sum members: 64, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 8 bytes */ }; struct trace_event_raw_ext4_allocate_blocks { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ __u64 block; /* 24 8 */ unsigned int len; /* 32 4 */ __u32 logical; /* 36 4 */ __u32 lleft; /* 40 4 */ __u32 lright; /* 44 4 */ __u64 goal; /* 48 8 */ __u64 pleft; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ __u64 pright; /* 64 8 */ unsigned int flags; /* 72 4 */ char __data[]; /* 76 0 */ /* size: 80, cachelines: 2, members: 13 */ /* sum members: 72, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 16 bytes */ }; struct trace_event_raw_ext4_free_blocks { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ __u64 block; /* 24 8 */ long unsigned int count; /* 32 8 */ int flags; /* 40 4 */ __u16 mode; /* 44 2 */ char __data[]; /* 46 0 */ /* size: 48, cachelines: 1, members: 8 */ /* sum members: 42, holes: 1, sum holes: 4 */ /* padding: 2 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_ext4_sync_file_enter { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ ino_t parent; /* 24 8 */ int datasync; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 6 */ /* sum members: 32, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_ext4_sync_file_exit { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ int ret; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_ext4_alloc_da_blocks { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ unsigned int data_blocks; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_ext4_mballoc_alloc { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ __u32 orig_logical; /* 24 4 */ int orig_start; /* 28 4 */ __u32 orig_group; /* 32 4 */ int orig_len; /* 36 4 */ __u32 goal_logical; /* 40 4 */ int goal_start; /* 44 4 */ __u32 goal_group; /* 48 4 */ int goal_len; /* 52 4 */ __u32 result_logical; /* 56 4 */ int result_start; /* 60 4 */ /* --- cacheline 1 boundary (64 bytes) --- */ __u32 result_group; /* 64 4 */ int result_len; /* 68 4 */ __u16 found; /* 72 2 */ __u16 groups; /* 74 2 */ __u16 buddy; /* 76 2 */ __u16 flags; /* 78 2 */ __u16 tail; /* 80 2 */ __u8 cr; /* 82 1 */ char __data[]; /* 83 0 */ /* size: 88, cachelines: 2, members: 22 */ /* sum members: 79, holes: 1, sum holes: 4 */ /* padding: 5 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_ext4__mballoc { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ int result_start; /* 24 4 */ __u32 result_group; /* 28 4 */ int result_len; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 7 */ /* sum members: 32, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_ext4_forget { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ __u64 block; /* 24 8 */ int is_metadata; /* 32 4 */ __u16 mode; /* 36 2 */ char __data[]; /* 38 0 */ /* size: 40, cachelines: 1, members: 7 */ /* sum members: 34, holes: 1, sum holes: 4 */ /* padding: 2 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_ext4_da_update_reserve_space { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ __u64 i_blocks; /* 24 8 */ int used_blocks; /* 32 4 */ int reserved_data_blocks; /* 36 4 */ int quota_claim; /* 40 4 */ __u16 mode; /* 44 2 */ char __data[]; /* 46 0 */ /* size: 48, cachelines: 1, members: 9 */ /* sum members: 42, holes: 1, sum holes: 4 */ /* padding: 2 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_ext4_da_reserve_space { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ __u64 i_blocks; /* 24 8 */ int reserved_data_blocks; /* 32 4 */ __u16 mode; /* 36 2 */ char __data[]; /* 38 0 */ /* size: 40, cachelines: 1, members: 7 */ /* sum members: 34, holes: 1, sum holes: 4 */ /* padding: 2 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_ext4_da_release_space { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ __u64 i_blocks; /* 24 8 */ int freed_blocks; /* 32 4 */ int reserved_data_blocks; /* 36 4 */ __u16 mode; /* 40 2 */ char __data[]; /* 42 0 */ /* size: 48, cachelines: 1, members: 8 */ /* sum members: 38, holes: 1, sum holes: 4 */ /* padding: 6 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_ext4_read_block_bitmap_load { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ __u32 group; /* 12 4 */ bool prefetch; /* 16 1 */ char __data[]; /* 17 0 */ /* size: 20, cachelines: 1, members: 5 */ /* padding: 3 */ /* last cacheline: 20 bytes */ }; struct trace_event_raw_ext4__fallocate_mode { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ loff_t offset; /* 24 8 */ loff_t len; /* 32 8 */ int mode; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 7 */ /* sum members: 40, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_ext4_unlink_exit { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ int ret; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_ext4__map_blocks_enter { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ ext4_lblk_t lblk; /* 24 4 */ unsigned int len; /* 28 4 */ unsigned int flags; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 7 */ /* sum members: 32, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_ext4_ext_load_extent { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ ext4_fsblk_t pblk; /* 24 8 */ ext4_lblk_t lblk; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 6 */ /* sum members: 32, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_ext4_journal_start_reserved { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ long unsigned int ip; /* 16 8 */ int blocks; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_ext4_ext_show_extent { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ ext4_fsblk_t pblk; /* 24 8 */ ext4_lblk_t lblk; /* 32 4 */ short unsigned int len; /* 36 2 */ char __data[]; /* 38 0 */ /* size: 40, cachelines: 1, members: 7 */ /* sum members: 34, holes: 1, sum holes: 4 */ /* padding: 2 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_ext4_ext_remove_space { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ ext4_lblk_t start; /* 24 4 */ ext4_lblk_t end; /* 28 4 */ int depth; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 7 */ /* sum members: 32, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_ext4_ext_remove_space_done { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ ext4_lblk_t start; /* 24 4 */ ext4_lblk_t end; /* 28 4 */ int depth; /* 32 4 */ /* XXX 4 bytes hole, try to pack */ ext4_fsblk_t pc_pclu; /* 40 8 */ ext4_lblk_t pc_lblk; /* 48 4 */ int pc_state; /* 52 4 */ short unsigned int eh_entries; /* 56 2 */ char __data[]; /* 58 0 */ /* size: 64, cachelines: 1, members: 11 */ /* sum members: 50, holes: 2, sum holes: 8 */ /* padding: 6 */ }; struct trace_event_raw_ext4__es_extent { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ ext4_lblk_t lblk; /* 24 4 */ ext4_lblk_t len; /* 28 4 */ ext4_fsblk_t pblk; /* 32 8 */ char status; /* 40 1 */ char __data[]; /* 41 0 */ /* size: 48, cachelines: 1, members: 8 */ /* sum members: 37, holes: 1, sum holes: 4 */ /* padding: 7 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_ext4_es_find_extent_range_enter { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ ext4_lblk_t lblk; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_ext4_es_find_extent_range_exit { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ ext4_lblk_t lblk; /* 24 4 */ ext4_lblk_t len; /* 28 4 */ ext4_fsblk_t pblk; /* 32 8 */ char status; /* 40 1 */ char __data[]; /* 41 0 */ /* size: 48, cachelines: 1, members: 8 */ /* sum members: 37, holes: 1, sum holes: 4 */ /* padding: 7 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_ext4_es_lookup_extent_enter { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ ext4_lblk_t lblk; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_ext4_es_insert_delayed_block { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ino_t ino; /* 16 8 */ ext4_lblk_t lblk; /* 24 4 */ ext4_lblk_t len; /* 28 4 */ ext4_fsblk_t pblk; /* 32 8 */ char status; /* 40 1 */ bool allocated; /* 41 1 */ char __data[]; /* 42 0 */ /* size: 48, cachelines: 1, members: 9 */ /* sum members: 38, holes: 1, sum holes: 4 */ /* padding: 6 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_ext4_error { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ const char * function; /* 16 8 */ unsigned int line; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_ext4_fc_track_range { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ tid_t t_tid; /* 12 4 */ ino_t i_ino; /* 16 8 */ tid_t i_sync_tid; /* 24 4 */ /* XXX 4 bytes hole, try to pack */ long int start; /* 32 8 */ long int end; /* 40 8 */ int error; /* 48 4 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 9 */ /* sum members: 48, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_ext4_update_sb { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ext4_fsblk_t fsblk; /* 16 8 */ unsigned int flags; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_qi_submit { struct trace_entry ent; /* 0 8 */ u64 qw0; /* 8 8 */ u64 qw1; /* 16 8 */ u64 qw2; /* 24 8 */ u64 qw3; /* 32 8 */ u32 __data_loc_iommu; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 7 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_prq_report { struct trace_entry ent; /* 0 8 */ u64 dw0; /* 8 8 */ u64 dw1; /* 16 8 */ u64 dw2; /* 24 8 */ u64 dw3; /* 32 8 */ long unsigned int seq; /* 40 8 */ u32 __data_loc_iommu; /* 48 4 */ u32 __data_loc_dev; /* 52 4 */ u32 __data_loc_buff; /* 56 4 */ char __data[]; /* 60 0 */ /* size: 64, cachelines: 1, members: 10 */ /* padding: 4 */ }; struct trace_event_raw_mctp_key_acquire { struct trace_entry ent; /* 0 8 */ __u8 paddr; /* 8 1 */ __u8 laddr; /* 9 1 */ __u8 tag; /* 10 1 */ char __data[]; /* 11 0 */ /* size: 12, cachelines: 1, members: 5 */ /* padding: 1 */ /* last cacheline: 12 bytes */ }; struct name_cache_entry { struct btrfs_lru_cache_entry entry; /* 0 48 */ u64 parent_ino; /* 48 8 */ u64 parent_gen; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ int ret; /* 64 4 */ int need_later_update; /* 68 4 */ int name_len; /* 72 4 */ char name[]; /* 76 0 */ /* size: 80, cachelines: 2, members: 7 */ /* padding: 4 */ /* last cacheline: 16 bytes */ }; struct trace_event_raw_devlink_health_reporter_state_update { struct trace_entry ent; /* 0 8 */ u32 __data_loc_bus_name; /* 8 4 */ u32 __data_loc_dev_name; /* 12 4 */ u32 __data_loc_driver_name; /* 16 4 */ u32 __data_loc_reporter_name; /* 20 4 */ u8 new_state; /* 24 1 */ char __data[]; /* 25 0 */ /* size: 28, cachelines: 1, members: 7 */ /* padding: 3 */ /* last cacheline: 28 bytes */ }; struct workqueue_struct { struct list_head pwqs; /* 0 16 */ struct list_head list; /* 16 16 */ struct mutex mutex; /* 32 32 */ /* --- cacheline 1 boundary (64 bytes) --- */ int work_color; /* 64 4 */ int flush_color; /* 68 4 */ atomic_t nr_pwqs_to_flush; /* 72 4 */ /* XXX 4 bytes hole, try to pack */ struct wq_flusher * first_flusher; /* 80 8 */ struct list_head flusher_queue; /* 88 16 */ struct list_head flusher_overflow; /* 104 16 */ struct list_head maydays; /* 120 16 */ /* --- cacheline 2 boundary (128 bytes) was 8 bytes ago --- */ struct worker * rescuer; /* 136 8 */ int nr_drainers; /* 144 4 */ int max_active; /* 148 4 */ int min_active; /* 152 4 */ int saved_max_active; /* 156 4 */ int saved_min_active; /* 160 4 */ /* XXX 4 bytes hole, try to pack */ struct workqueue_attrs * unbound_attrs; /* 168 8 */ struct pool_workqueue * dfl_pwq; /* 176 8 */ struct wq_device * wq_dev; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ char name[32]; /* 192 32 */ struct callback_head rcu; /* 224 16 */ /* XXX 16 bytes hole, try to pack */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned int flags __attribute__((__aligned__(64))); /* 256 4 */ /* XXX 4 bytes hole, try to pack */ struct pool_workqueue * * cpu_pwq; /* 264 8 */ struct wq_node_nr_active * node_nr_active[]; /* 272 0 */ /* size: 320, cachelines: 5, members: 24 */ /* sum members: 244, holes: 4, sum holes: 28 */ /* padding: 48 */ /* forced alignments: 1, forced holes: 1, sum forced holes: 16 */ } __attribute__((__aligned__(64))); struct trace_event_raw_workqueue_queue_work { struct trace_entry ent; /* 0 8 */ void * work; /* 8 8 */ void * function; /* 16 8 */ u32 __data_loc_workqueue; /* 24 4 */ int req_cpu; /* 28 4 */ int cpu; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 7 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct svc_deferred_req { u32 prot; /* 0 4 */ /* XXX 4 bytes hole, try to pack */ struct svc_xprt * xprt; /* 8 8 */ struct __kernel_sockaddr_storage addr; /* 16 128 */ /* --- cacheline 2 boundary (128 bytes) was 16 bytes ago --- */ size_t addrlen; /* 144 8 */ struct __kernel_sockaddr_storage daddr; /* 152 128 */ /* --- cacheline 4 boundary (256 bytes) was 24 bytes ago --- */ size_t daddrlen; /* 280 8 */ void * xprt_ctxt; /* 288 8 */ struct cache_deferred_req handle; /* 296 56 */ /* --- cacheline 5 boundary (320 bytes) was 32 bytes ago --- */ int argslen; /* 352 4 */ __be32 args[]; /* 356 0 */ /* size: 360, cachelines: 6, members: 10 */ /* sum members: 352, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct cis_cache_entry { struct list_head node; /* 0 16 */ unsigned int addr; /* 16 4 */ unsigned int len; /* 20 4 */ unsigned int attr; /* 24 4 */ unsigned char cache[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_contention_begin { struct trace_entry ent; /* 0 8 */ void * lock_addr; /* 8 8 */ unsigned int flags; /* 16 4 */ char __data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_contention_end { struct trace_entry ent; /* 0 8 */ void * lock_addr; /* 8 8 */ int ret; /* 16 4 */ char __data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_nmi_handler { struct trace_entry ent; /* 0 8 */ void * handler; /* 8 8 */ s64 delta_ns; /* 16 8 */ int handled; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct sem_undo { struct list_head list_proc; /* 0 16 */ struct callback_head rcu; /* 16 16 */ struct sem_undo_list * ulp; /* 32 8 */ struct list_head list_id; /* 40 16 */ int semid; /* 56 4 */ short int semadj[]; /* 60 0 */ /* size: 64, cachelines: 1, members: 6 */ /* padding: 4 */ }; struct sidtab_str_cache { struct callback_head rcu_member; /* 0 16 */ struct list_head lru_member; /* 16 16 */ struct sidtab_entry * parent; /* 32 8 */ u32 len; /* 40 4 */ char str[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 5 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_timer_start { struct trace_entry ent; /* 0 8 */ void * timer; /* 8 8 */ void * function; /* 16 8 */ long unsigned int expires; /* 24 8 */ long unsigned int bucket_expiry; /* 32 8 */ long unsigned int now; /* 40 8 */ unsigned int flags; /* 48 4 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 8 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_hrtimer_start { struct trace_entry ent; /* 0 8 */ void * hrtimer; /* 8 8 */ void * function; /* 16 8 */ s64 expires; /* 24 8 */ s64 softexpires; /* 32 8 */ enum hrtimer_mode mode; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 7 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_mem_connect { struct trace_entry ent; /* 0 8 */ const struct xdp_mem_allocator * xa; /* 8 8 */ u32 mem_id; /* 16 4 */ u32 mem_type; /* 20 4 */ const void * allocator; /* 24 8 */ const struct xdp_rxq_info * rxq; /* 32 8 */ int ifindex; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 8 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_filelock_lock { struct trace_entry ent; /* 0 8 */ struct file_lock * fl; /* 8 8 */ long unsigned int i_ino; /* 16 8 */ dev_t s_dev; /* 24 4 */ /* XXX 4 bytes hole, try to pack */ struct file_lock_core * blocker; /* 32 8 */ fl_owner_t owner; /* 40 8 */ unsigned int pid; /* 48 4 */ unsigned int flags; /* 52 4 */ unsigned char type; /* 56 1 */ /* XXX 7 bytes hole, try to pack */ /* --- cacheline 1 boundary (64 bytes) --- */ loff_t fl_start; /* 64 8 */ loff_t fl_end; /* 72 8 */ int ret; /* 80 4 */ char __data[]; /* 84 0 */ /* size: 88, cachelines: 2, members: 13 */ /* sum members: 73, holes: 2, sum holes: 11 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_generic_add_lease { struct trace_entry ent; /* 0 8 */ long unsigned int i_ino; /* 8 8 */ int wcount; /* 16 4 */ int rcount; /* 20 4 */ int icount; /* 24 4 */ dev_t s_dev; /* 28 4 */ fl_owner_t owner; /* 32 8 */ unsigned int flags; /* 40 4 */ unsigned char type; /* 44 1 */ char __data[]; /* 45 0 */ /* size: 48, cachelines: 1, members: 10 */ /* padding: 3 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_leases_conflict { struct trace_entry ent; /* 0 8 */ void * lease; /* 8 8 */ void * breaker; /* 16 8 */ unsigned int l_fl_flags; /* 24 4 */ unsigned int b_fl_flags; /* 28 4 */ unsigned char l_fl_type; /* 32 1 */ unsigned char b_fl_type; /* 33 1 */ bool conflict; /* 34 1 */ char __data[]; /* 35 0 */ /* size: 40, cachelines: 1, members: 9 */ /* padding: 5 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_spi_setup { struct trace_entry ent; /* 0 8 */ int bus_num; /* 8 4 */ int chip_select; /* 12 4 */ long unsigned int mode; /* 16 8 */ unsigned int bits_per_word; /* 24 4 */ unsigned int max_speed_hz; /* 28 4 */ int status; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 8 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_spi_set_cs { struct trace_entry ent; /* 0 8 */ int bus_num; /* 8 4 */ int chip_select; /* 12 4 */ long unsigned int mode; /* 16 8 */ bool enable; /* 24 1 */ char __data[]; /* 25 0 */ /* size: 32, cachelines: 1, members: 6 */ /* padding: 7 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_spi_transfer { struct trace_entry ent; /* 0 8 */ int bus_num; /* 8 4 */ int chip_select; /* 12 4 */ struct spi_transfer * xfer; /* 16 8 */ int len; /* 24 4 */ u32 __data_loc_rx_buf; /* 28 4 */ u32 __data_loc_tx_buf; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 8 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_i2c_slave { struct trace_entry ent; /* 0 8 */ int adapter_nr; /* 8 4 */ int ret; /* 12 4 */ __u16 addr; /* 16 2 */ __u16 len; /* 18 2 */ enum i2c_slave_event event; /* 20 4 */ __u8 buf[1]; /* 24 1 */ char __data[]; /* 25 0 */ /* size: 28, cachelines: 1, members: 8 */ /* padding: 3 */ /* last cacheline: 28 bytes */ }; struct trace_event_raw_smbus_write { struct trace_entry ent; /* 0 8 */ int adapter_nr; /* 8 4 */ __u16 addr; /* 12 2 */ __u16 flags; /* 14 2 */ __u8 command; /* 16 1 */ __u8 len; /* 17 1 */ /* XXX 2 bytes hole, try to pack */ __u32 protocol; /* 20 4 */ __u8 buf[34]; /* 24 34 */ char __data[]; /* 58 0 */ /* size: 60, cachelines: 1, members: 9 */ /* sum members: 56, holes: 1, sum holes: 2 */ /* padding: 2 */ /* last cacheline: 60 bytes */ }; struct trace_event_raw_smbus_read { struct trace_entry ent; /* 0 8 */ int adapter_nr; /* 8 4 */ __u16 flags; /* 12 2 */ __u16 addr; /* 14 2 */ __u8 command; /* 16 1 */ /* XXX 3 bytes hole, try to pack */ __u32 protocol; /* 20 4 */ __u8 buf[34]; /* 24 34 */ char __data[]; /* 58 0 */ /* size: 60, cachelines: 1, members: 8 */ /* sum members: 55, holes: 1, sum holes: 3 */ /* padding: 2 */ /* last cacheline: 60 bytes */ }; struct trace_event_raw_smbus_reply { struct trace_entry ent; /* 0 8 */ int adapter_nr; /* 8 4 */ __u16 addr; /* 12 2 */ __u16 flags; /* 14 2 */ __u8 command; /* 16 1 */ __u8 len; /* 17 1 */ /* XXX 2 bytes hole, try to pack */ __u32 protocol; /* 20 4 */ __u8 buf[34]; /* 24 34 */ char __data[]; /* 58 0 */ /* size: 60, cachelines: 1, members: 9 */ /* sum members: 56, holes: 1, sum holes: 2 */ /* padding: 2 */ /* last cacheline: 60 bytes */ }; struct trace_event_raw_msr_trace_class { struct trace_entry ent; /* 0 8 */ unsigned int msr; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ u64 val; /* 16 8 */ int failed; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct pcpu_chunk { struct list_head list; /* 0 16 */ int free_bytes; /* 16 4 */ struct pcpu_block_md chunk_md; /* 20 32 */ /* XXX 4 bytes hole, try to pack */ long unsigned int * bound_map; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ void * base_addr; /* 64 8 */ long unsigned int * alloc_map; /* 72 8 */ struct pcpu_block_md * md_blocks; /* 80 8 */ void * data; /* 88 8 */ bool immutable; /* 96 1 */ bool isolated; /* 97 1 */ /* XXX 2 bytes hole, try to pack */ int start_offset; /* 100 4 */ int end_offset; /* 104 4 */ /* XXX 4 bytes hole, try to pack */ struct pcpuobj_ext * obj_exts; /* 112 8 */ int nr_pages; /* 120 4 */ int nr_populated; /* 124 4 */ /* --- cacheline 2 boundary (128 bytes) --- */ int nr_empty_pop_pages; /* 128 4 */ /* XXX 4 bytes hole, try to pack */ long unsigned int populated[]; /* 136 0 */ /* size: 192, cachelines: 3, members: 17 */ /* sum members: 122, holes: 4, sum holes: 14 */ /* padding: 56 */ } __attribute__((__aligned__(64))); struct trace_event_raw_pwm { struct trace_entry ent; /* 0 8 */ struct pwm_device * pwm; /* 8 8 */ u64 period; /* 16 8 */ u64 duty_cycle; /* 24 8 */ enum pwm_polarity polarity; /* 32 4 */ bool enabled; /* 36 1 */ /* XXX 3 bytes hole, try to pack */ int err; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 8 */ /* sum members: 41, holes: 1, sum holes: 3 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_tmigr_connect_cpu_parent { struct trace_entry ent; /* 0 8 */ void * parent; /* 8 8 */ unsigned int cpu; /* 16 4 */ unsigned int lvl; /* 20 4 */ unsigned int numa_node; /* 24 4 */ unsigned int num_children; /* 28 4 */ u32 childmask; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 8 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_tmigr_group_and_cpu { struct trace_entry ent; /* 0 8 */ void * group; /* 8 8 */ void * parent; /* 16 8 */ unsigned int lvl; /* 24 4 */ unsigned int numa_node; /* 28 4 */ u32 childmask; /* 32 4 */ u8 active; /* 36 1 */ u8 migrator; /* 37 1 */ char __data[]; /* 38 0 */ /* size: 40, cachelines: 1, members: 9 */ /* padding: 2 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_tmigr_cpugroup { struct trace_entry ent; /* 0 8 */ u64 wakeup; /* 8 8 */ void * parent; /* 16 8 */ unsigned int cpu; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_tmigr_idle { struct trace_entry ent; /* 0 8 */ u64 nextevt; /* 8 8 */ u64 wakeup; /* 16 8 */ void * parent; /* 24 8 */ unsigned int cpu; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_tmigr_update_events { struct trace_entry ent; /* 0 8 */ void * child; /* 8 8 */ void * group; /* 16 8 */ u64 nextevt; /* 24 8 */ u64 group_next_expiry; /* 32 8 */ u64 child_evt_expiry; /* 40 8 */ unsigned int group_lvl; /* 48 4 */ unsigned int child_evtcpu; /* 52 4 */ u8 child_active; /* 56 1 */ u8 group_active; /* 57 1 */ char __data[]; /* 58 0 */ /* size: 64, cachelines: 1, members: 11 */ /* padding: 6 */ }; struct trace_event_raw_tmigr_handle_remote { struct trace_entry ent; /* 0 8 */ void * group; /* 8 8 */ unsigned int lvl; /* 16 4 */ char __data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct fname { __u32 hash; /* 0 4 */ __u32 minor_hash; /* 4 4 */ struct rb_node rb_hash; /* 8 24 */ struct fname * next; /* 32 8 */ __u32 inode; /* 40 4 */ __u8 name_len; /* 44 1 */ __u8 file_type; /* 45 1 */ char name[]; /* 46 0 */ /* size: 48, cachelines: 1, members: 8 */ /* padding: 2 */ /* last cacheline: 48 bytes */ }; struct pericom8250 { void * virt; /* 0 8 */ unsigned int nr; /* 8 4 */ int line[]; /* 12 0 */ /* size: 16, cachelines: 1, members: 3 */ /* padding: 4 */ /* last cacheline: 16 bytes */ }; struct trace_event_raw_dax_pmd_fault_class { struct trace_entry ent; /* 0 8 */ long unsigned int ino; /* 8 8 */ long unsigned int vm_start; /* 16 8 */ long unsigned int vm_end; /* 24 8 */ long unsigned int vm_flags; /* 32 8 */ long unsigned int address; /* 40 8 */ long unsigned int pgoff; /* 48 8 */ long unsigned int max_pgoff; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ dev_t dev; /* 64 4 */ unsigned int flags; /* 68 4 */ int result; /* 72 4 */ char __data[]; /* 76 0 */ /* size: 80, cachelines: 2, members: 12 */ /* padding: 4 */ /* last cacheline: 16 bytes */ }; struct trace_event_raw_dax_pmd_load_hole_class { struct trace_entry ent; /* 0 8 */ long unsigned int ino; /* 8 8 */ long unsigned int vm_flags; /* 16 8 */ long unsigned int address; /* 24 8 */ struct folio * zero_folio; /* 32 8 */ void * radix_entry; /* 40 8 */ dev_t dev; /* 48 4 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 8 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_dax_pte_fault_class { struct trace_entry ent; /* 0 8 */ long unsigned int ino; /* 8 8 */ long unsigned int vm_flags; /* 16 8 */ long unsigned int address; /* 24 8 */ long unsigned int pgoff; /* 32 8 */ dev_t dev; /* 40 4 */ unsigned int flags; /* 44 4 */ int result; /* 48 4 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 9 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_dax_writeback_range_class { struct trace_entry ent; /* 0 8 */ long unsigned int ino; /* 8 8 */ long unsigned int start_index; /* 16 8 */ long unsigned int end_index; /* 24 8 */ dev_t dev; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_dax_writeback_one { struct trace_entry ent; /* 0 8 */ long unsigned int ino; /* 8 8 */ long unsigned int pgoff; /* 16 8 */ long unsigned int pglen; /* 24 8 */ dev_t dev; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_wbt_step { struct trace_entry ent; /* 0 8 */ char name[32]; /* 8 32 */ const char * msg; /* 40 8 */ int step; /* 48 4 */ /* XXX 4 bytes hole, try to pack */ long unsigned int window; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ unsigned int bg; /* 64 4 */ unsigned int normal; /* 68 4 */ unsigned int max; /* 72 4 */ char __data[]; /* 76 0 */ /* size: 80, cachelines: 2, members: 9 */ /* sum members: 72, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 16 bytes */ }; struct trace_event_raw_iommu_error { struct trace_entry ent; /* 0 8 */ u32 __data_loc_device; /* 8 4 */ u32 __data_loc_driver; /* 12 4 */ u64 iova; /* 16 8 */ int flags; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct dm_name_list { __u64 dev; /* 0 8 */ __u32 next; /* 8 4 */ char name[]; /* 12 0 */ /* size: 16, cachelines: 1, members: 3 */ /* padding: 4 */ /* last cacheline: 16 bytes */ }; struct trace_event_raw_sched_skip_vma_numa { struct trace_entry ent; /* 0 8 */ long unsigned int numa_scan_offset; /* 8 8 */ long unsigned int vm_start; /* 16 8 */ long unsigned int vm_end; /* 24 8 */ enum numa_vmaskip_reason reason; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_oom_score_adj_update { struct trace_entry ent; /* 0 8 */ pid_t pid; /* 8 4 */ char comm[16]; /* 12 16 */ short int oom_score_adj; /* 28 2 */ char __data[]; /* 30 0 */ /* size: 32, cachelines: 1, members: 5 */ /* padding: 2 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_reclaim_retry_zone { struct trace_entry ent; /* 0 8 */ int node; /* 8 4 */ int zone_idx; /* 12 4 */ int order; /* 16 4 */ /* XXX 4 bytes hole, try to pack */ long unsigned int reclaimable; /* 24 8 */ long unsigned int available; /* 32 8 */ long unsigned int min_wmark; /* 40 8 */ int no_progress_loops; /* 48 4 */ bool wmark_check; /* 52 1 */ char __data[]; /* 53 0 */ /* size: 56, cachelines: 1, members: 10 */ /* sum members: 49, holes: 1, sum holes: 4 */ /* padding: 3 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_mark_victim { struct trace_entry ent; /* 0 8 */ int pid; /* 8 4 */ u32 __data_loc_comm; /* 12 4 */ long unsigned int total_vm; /* 16 8 */ long unsigned int anon_rss; /* 24 8 */ long unsigned int file_rss; /* 32 8 */ long unsigned int shmem_rss; /* 40 8 */ uid_t uid; /* 48 4 */ /* XXX 4 bytes hole, try to pack */ long unsigned int pgtables; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ short int oom_score_adj; /* 64 2 */ char __data[]; /* 66 0 */ /* size: 72, cachelines: 2, members: 11 */ /* sum members: 62, holes: 1, sum holes: 4 */ /* padding: 6 */ /* last cacheline: 8 bytes */ }; struct trace_event_raw_compact_retry { struct trace_entry ent; /* 0 8 */ int order; /* 8 4 */ int priority; /* 12 4 */ int result; /* 16 4 */ int retries; /* 20 4 */ int max_retries; /* 24 4 */ bool ret; /* 28 1 */ char __data[]; /* 29 0 */ /* size: 32, cachelines: 1, members: 8 */ /* padding: 3 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_event_da_monitor_id { struct trace_entry ent; /* 0 8 */ int id; /* 8 4 */ char state[24]; /* 12 24 */ char event[24]; /* 36 24 */ char next_state[24]; /* 60 24 */ /* --- cacheline 1 boundary (64 bytes) was 20 bytes ago --- */ bool final_state; /* 84 1 */ char __data[]; /* 85 0 */ /* size: 88, cachelines: 2, members: 7 */ /* padding: 3 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_mmap_lock { struct trace_entry ent; /* 0 8 */ struct mm_struct * mm; /* 8 8 */ u32 __data_loc_memcg_path; /* 16 4 */ bool write; /* 20 1 */ char __data[]; /* 21 0 */ /* size: 24, cachelines: 1, members: 5 */ /* padding: 3 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_mmap_lock_acquire_returned { struct trace_entry ent; /* 0 8 */ struct mm_struct * mm; /* 8 8 */ u32 __data_loc_memcg_path; /* 16 4 */ bool write; /* 20 1 */ bool success; /* 21 1 */ char __data[]; /* 22 0 */ /* size: 24, cachelines: 1, members: 6 */ /* padding: 2 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_amd_pstate_perf { struct trace_entry ent; /* 0 8 */ long unsigned int min_perf; /* 8 8 */ long unsigned int target_perf; /* 16 8 */ long unsigned int capacity; /* 24 8 */ long long unsigned int freq; /* 32 8 */ long long unsigned int mperf; /* 40 8 */ long long unsigned int aperf; /* 48 8 */ long long unsigned int tsc; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ unsigned int cpu_id; /* 64 4 */ bool changed; /* 68 1 */ bool fast_switch; /* 69 1 */ char __data[]; /* 70 0 */ /* size: 72, cachelines: 2, members: 12 */ /* padding: 2 */ /* last cacheline: 8 bytes */ }; struct trace_event_raw_mptcp_dump_mpext { struct trace_entry ent; /* 0 8 */ u64 data_ack; /* 8 8 */ u64 data_seq; /* 16 8 */ u32 subflow_seq; /* 24 4 */ u16 data_len; /* 28 2 */ u16 csum; /* 30 2 */ u8 use_map; /* 32 1 */ u8 dsn64; /* 33 1 */ u8 data_fin; /* 34 1 */ u8 use_ack; /* 35 1 */ u8 ack64; /* 36 1 */ u8 mpc_map; /* 37 1 */ u8 frozen; /* 38 1 */ u8 reset_transient; /* 39 1 */ u8 reset_reason; /* 40 1 */ u8 csum_reqd; /* 41 1 */ u8 infinite_map; /* 42 1 */ char __data[]; /* 43 0 */ /* size: 48, cachelines: 1, members: 18 */ /* padding: 5 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_swiotlb_bounced { struct trace_entry ent; /* 0 8 */ u32 __data_loc_dev_name; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ u64 dma_mask; /* 16 8 */ dma_addr_t dev_addr; /* 24 8 */ size_t size; /* 32 8 */ bool force; /* 40 1 */ char __data[]; /* 41 0 */ /* size: 48, cachelines: 1, members: 7 */ /* sum members: 37, holes: 1, sum holes: 4 */ /* padding: 7 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_skb_copy_datagram_iovec { struct trace_entry ent; /* 0 8 */ const void * skbaddr; /* 8 8 */ int len; /* 16 4 */ char __data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_net_dev_xmit { struct trace_entry ent; /* 0 8 */ void * skbaddr; /* 8 8 */ unsigned int len; /* 16 4 */ int rc; /* 20 4 */ u32 __data_loc_name; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_net_dev_rx_verbose_template { struct trace_entry ent; /* 0 8 */ u32 __data_loc_name; /* 8 4 */ unsigned int napi_id; /* 12 4 */ u16 queue_mapping; /* 16 2 */ /* XXX 6 bytes hole, try to pack */ const void * skbaddr; /* 24 8 */ bool vlan_tagged; /* 32 1 */ /* XXX 1 byte hole, try to pack */ u16 vlan_proto; /* 34 2 */ u16 vlan_tci; /* 36 2 */ u16 protocol; /* 38 2 */ u8 ip_summed; /* 40 1 */ /* XXX 3 bytes hole, try to pack */ u32 hash; /* 44 4 */ bool l4_hash; /* 48 1 */ /* XXX 3 bytes hole, try to pack */ unsigned int len; /* 52 4 */ unsigned int data_len; /* 56 4 */ unsigned int truesize; /* 60 4 */ /* --- cacheline 1 boundary (64 bytes) --- */ bool mac_header_valid; /* 64 1 */ /* XXX 3 bytes hole, try to pack */ int mac_header; /* 68 4 */ unsigned char nr_frags; /* 72 1 */ /* XXX 1 byte hole, try to pack */ u16 gso_size; /* 74 2 */ u16 gso_type; /* 76 2 */ char __data[]; /* 78 0 */ /* size: 80, cachelines: 2, members: 21 */ /* sum members: 61, holes: 6, sum holes: 17 */ /* padding: 2 */ /* last cacheline: 16 bytes */ }; struct trace_event_raw_napi_poll { struct trace_entry ent; /* 0 8 */ struct napi_struct * napi; /* 8 8 */ u32 __data_loc_dev_name; /* 16 4 */ int work; /* 20 4 */ int budget; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_sock_msg_length { struct trace_entry ent; /* 0 8 */ void * sk; /* 8 8 */ __u16 family; /* 16 2 */ __u16 protocol; /* 18 2 */ int ret; /* 20 4 */ int flags; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 7 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_udp_fail_queue_rcv_skb { struct trace_entry ent; /* 0 8 */ int rc; /* 8 4 */ __u16 sport; /* 12 2 */ __u16 dport; /* 14 2 */ __u16 family; /* 16 2 */ __u8 saddr[28]; /* 18 28 */ __u8 daddr[28]; /* 46 28 */ /* --- cacheline 1 boundary (64 bytes) was 10 bytes ago --- */ char __data[]; /* 74 0 */ /* size: 76, cachelines: 2, members: 8 */ /* padding: 2 */ /* last cacheline: 12 bytes */ }; struct trace_event_raw_tcp_event_sk_skb { struct trace_entry ent; /* 0 8 */ const void * skbaddr; /* 8 8 */ const void * skaddr; /* 16 8 */ int state; /* 24 4 */ __u16 sport; /* 28 2 */ __u16 dport; /* 30 2 */ __u16 family; /* 32 2 */ __u8 saddr[4]; /* 34 4 */ __u8 daddr[4]; /* 38 4 */ __u8 saddr_v6[16]; /* 42 16 */ __u8 daddr_v6[16]; /* 58 16 */ /* --- cacheline 1 boundary (64 bytes) was 10 bytes ago --- */ char __data[]; /* 74 0 */ /* size: 80, cachelines: 2, members: 12 */ /* padding: 6 */ /* last cacheline: 16 bytes */ }; struct trace_event_raw_tcp_retransmit_synack { struct trace_entry ent; /* 0 8 */ const void * skaddr; /* 8 8 */ const void * req; /* 16 8 */ __u16 sport; /* 24 2 */ __u16 dport; /* 26 2 */ __u16 family; /* 28 2 */ __u8 saddr[4]; /* 30 4 */ __u8 daddr[4]; /* 34 4 */ __u8 saddr_v6[16]; /* 38 16 */ __u8 daddr_v6[16]; /* 54 16 */ /* --- cacheline 1 boundary (64 bytes) was 6 bytes ago --- */ char __data[]; /* 70 0 */ /* size: 72, cachelines: 2, members: 11 */ /* padding: 2 */ /* last cacheline: 8 bytes */ }; struct trace_event_raw_tcp_cong_state_set { struct trace_entry ent; /* 0 8 */ const void * skaddr; /* 8 8 */ __u16 sport; /* 16 2 */ __u16 dport; /* 18 2 */ __u16 family; /* 20 2 */ __u8 saddr[4]; /* 22 4 */ __u8 daddr[4]; /* 26 4 */ __u8 saddr_v6[16]; /* 30 16 */ __u8 daddr_v6[16]; /* 46 16 */ __u8 cong_state; /* 62 1 */ char __data[]; /* 63 0 */ /* size: 64, cachelines: 1, members: 11 */ /* padding: 1 */ }; struct trace_event_raw_qdisc_enqueue { struct trace_entry ent; /* 0 8 */ struct Qdisc * qdisc; /* 8 8 */ const struct netdev_queue * txq; /* 16 8 */ void * skbaddr; /* 24 8 */ int ifindex; /* 32 4 */ u32 handle; /* 36 4 */ u32 parent; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 8 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_br_fdb_add { struct trace_entry ent; /* 0 8 */ u8 ndm_flags; /* 8 1 */ /* XXX 3 bytes hole, try to pack */ u32 __data_loc_dev; /* 12 4 */ unsigned char addr[6]; /* 16 6 */ u16 vid; /* 22 2 */ u16 nlh_flags; /* 24 2 */ char __data[]; /* 26 0 */ /* size: 28, cachelines: 1, members: 7 */ /* sum members: 23, holes: 1, sum holes: 3 */ /* padding: 2 */ /* last cacheline: 28 bytes */ }; struct trace_event_raw_neigh_create { struct trace_entry ent; /* 0 8 */ u32 family; /* 8 4 */ u32 __data_loc_dev; /* 12 4 */ int entries; /* 16 4 */ u8 created; /* 20 1 */ u8 gc_exempt; /* 21 1 */ u8 primary_key4[4]; /* 22 4 */ u8 primary_key6[16]; /* 26 16 */ char __data[]; /* 42 0 */ /* size: 44, cachelines: 1, members: 9 */ /* padding: 2 */ /* last cacheline: 44 bytes */ }; struct trace_event_raw_neigh_update { struct trace_entry ent; /* 0 8 */ u32 family; /* 8 4 */ u32 __data_loc_dev; /* 12 4 */ u8 lladdr[32]; /* 16 32 */ u8 lladdr_len; /* 48 1 */ u8 flags; /* 49 1 */ u8 nud_state; /* 50 1 */ u8 type; /* 51 1 */ u8 dead; /* 52 1 */ /* XXX 3 bytes hole, try to pack */ int refcnt; /* 56 4 */ __u8 primary_key4[4]; /* 60 4 */ /* --- cacheline 1 boundary (64 bytes) --- */ __u8 primary_key6[16]; /* 64 16 */ long unsigned int confirmed; /* 80 8 */ long unsigned int updated; /* 88 8 */ long unsigned int used; /* 96 8 */ u8 new_lladdr[32]; /* 104 32 */ /* --- cacheline 2 boundary (128 bytes) was 8 bytes ago --- */ u8 new_state; /* 136 1 */ /* XXX 3 bytes hole, try to pack */ u32 update_flags; /* 140 4 */ u32 pid; /* 144 4 */ char __data[]; /* 148 0 */ /* size: 152, cachelines: 3, members: 20 */ /* sum members: 142, holes: 2, sum holes: 6 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_neigh__update { struct trace_entry ent; /* 0 8 */ u32 family; /* 8 4 */ u32 __data_loc_dev; /* 12 4 */ u8 lladdr[32]; /* 16 32 */ u8 lladdr_len; /* 48 1 */ u8 flags; /* 49 1 */ u8 nud_state; /* 50 1 */ u8 type; /* 51 1 */ u8 dead; /* 52 1 */ /* XXX 3 bytes hole, try to pack */ int refcnt; /* 56 4 */ __u8 primary_key4[4]; /* 60 4 */ /* --- cacheline 1 boundary (64 bytes) --- */ __u8 primary_key6[16]; /* 64 16 */ long unsigned int confirmed; /* 80 8 */ long unsigned int updated; /* 88 8 */ long unsigned int used; /* 96 8 */ u32 err; /* 104 4 */ char __data[]; /* 108 0 */ /* size: 112, cachelines: 2, members: 17 */ /* sum members: 105, holes: 1, sum holes: 3 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_vector_activate { struct trace_entry ent; /* 0 8 */ unsigned int irq; /* 8 4 */ bool is_managed; /* 12 1 */ bool can_reserve; /* 13 1 */ bool reserve; /* 14 1 */ char __data[]; /* 15 0 */ /* size: 16, cachelines: 1, members: 6 */ /* padding: 1 */ /* last cacheline: 16 bytes */ }; struct trace_event_raw_vector_teardown { struct trace_entry ent; /* 0 8 */ unsigned int irq; /* 8 4 */ bool is_managed; /* 12 1 */ bool has_reserved; /* 13 1 */ char __data[]; /* 14 0 */ /* size: 16, cachelines: 1, members: 5 */ /* padding: 2 */ /* last cacheline: 16 bytes */ }; struct trace_event_raw_vector_free_moved { struct trace_entry ent; /* 0 8 */ unsigned int irq; /* 8 4 */ unsigned int cpu; /* 12 4 */ unsigned int vector; /* 16 4 */ bool is_managed; /* 20 1 */ char __data[]; /* 21 0 */ /* size: 24, cachelines: 1, members: 6 */ /* padding: 3 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_rpm_return_int { struct trace_entry ent; /* 0 8 */ u32 __data_loc_name; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ long unsigned int ip; /* 16 8 */ int ret; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_migration_pte { struct trace_entry ent; /* 0 8 */ long unsigned int addr; /* 8 8 */ long unsigned int pte; /* 16 8 */ int order; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_io_uring_create { struct trace_entry ent; /* 0 8 */ int fd; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ void * ctx; /* 16 8 */ u32 sq_entries; /* 24 4 */ u32 cq_entries; /* 28 4 */ u32 flags; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 7 */ /* sum members: 32, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_io_uring_file_get { struct trace_entry ent; /* 0 8 */ void * ctx; /* 8 8 */ void * req; /* 16 8 */ u64 user_data; /* 24 8 */ int fd; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_io_uring_cqring_wait { struct trace_entry ent; /* 0 8 */ void * ctx; /* 8 8 */ int min_events; /* 16 4 */ char __data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_io_uring_fail_link { struct trace_entry ent; /* 0 8 */ void * ctx; /* 8 8 */ void * req; /* 16 8 */ long long unsigned int user_data; /* 24 8 */ u8 opcode; /* 32 1 */ /* XXX 7 bytes hole, try to pack */ void * link; /* 40 8 */ u32 __data_loc_op_str; /* 48 4 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 8 */ /* sum members: 45, holes: 1, sum holes: 7 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_io_uring_task_add { struct trace_entry ent; /* 0 8 */ void * ctx; /* 8 8 */ void * req; /* 16 8 */ long long unsigned int user_data; /* 24 8 */ u8 opcode; /* 32 1 */ /* XXX 3 bytes hole, try to pack */ int mask; /* 36 4 */ u32 __data_loc_op_str; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 8 */ /* sum members: 41, holes: 1, sum holes: 3 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_io_uring_task_work_run { struct trace_entry ent; /* 0 8 */ void * tctx; /* 8 8 */ unsigned int count; /* 16 4 */ char __data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_cgroup { struct trace_entry ent; /* 0 8 */ int root; /* 8 4 */ int level; /* 12 4 */ u64 id; /* 16 8 */ u32 __data_loc_path; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 6 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_cgroup_migrate { struct trace_entry ent; /* 0 8 */ int dst_root; /* 8 4 */ int dst_level; /* 12 4 */ u64 dst_id; /* 16 8 */ int pid; /* 24 4 */ u32 __data_loc_dst_path; /* 28 4 */ u32 __data_loc_comm; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 8 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_cgroup_rstat { struct trace_entry ent; /* 0 8 */ int root; /* 8 4 */ int level; /* 12 4 */ u64 id; /* 16 8 */ int cpu; /* 24 4 */ bool contended; /* 28 1 */ char __data[]; /* 29 0 */ /* size: 32, cachelines: 1, members: 7 */ /* padding: 3 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_mm_shrink_slab_start { struct trace_entry ent; /* 0 8 */ struct shrinker * shr; /* 8 8 */ void * shrink; /* 16 8 */ int nid; /* 24 4 */ /* XXX 4 bytes hole, try to pack */ long int nr_objects_to_shrink; /* 32 8 */ long unsigned int gfp_flags; /* 40 8 */ long unsigned int cache_items; /* 48 8 */ long long unsigned int delta; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ long unsigned int total_scan; /* 64 8 */ int priority; /* 72 4 */ char __data[]; /* 76 0 */ /* size: 80, cachelines: 2, members: 11 */ /* sum members: 72, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 16 bytes */ }; struct trace_event_raw_mm_vmscan_lru_isolate { struct trace_entry ent; /* 0 8 */ int highest_zoneidx; /* 8 4 */ int order; /* 12 4 */ long unsigned int nr_requested; /* 16 8 */ long unsigned int nr_scanned; /* 24 8 */ long unsigned int nr_skipped; /* 32 8 */ long unsigned int nr_taken; /* 40 8 */ int lru; /* 48 4 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 9 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_mm_vmscan_write_folio { struct trace_entry ent; /* 0 8 */ long unsigned int pfn; /* 8 8 */ int reclaim_flags; /* 16 4 */ char __data[]; /* 20 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 4 */ /* last cacheline: 24 bytes */ }; struct trace_event_raw_fib6_table_lookup { struct trace_entry ent; /* 0 8 */ u32 tb_id; /* 8 4 */ int err; /* 12 4 */ int oif; /* 16 4 */ int iif; /* 20 4 */ __u8 tos; /* 24 1 */ __u8 scope; /* 25 1 */ __u8 flags; /* 26 1 */ __u8 src[16]; /* 27 16 */ __u8 dst[16]; /* 43 16 */ /* XXX 1 byte hole, try to pack */ u16 sport; /* 60 2 */ u16 dport; /* 62 2 */ /* --- cacheline 1 boundary (64 bytes) --- */ u8 proto; /* 64 1 */ u8 rt_type; /* 65 1 */ char name[16]; /* 66 16 */ __u8 gw[16]; /* 82 16 */ char __data[]; /* 98 0 */ /* size: 100, cachelines: 2, members: 17 */ /* sum members: 97, holes: 1, sum holes: 1 */ /* padding: 2 */ /* last cacheline: 36 bytes */ }; struct trace_event_raw_block_rq_completion { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ sector_t sector; /* 16 8 */ unsigned int nr_sector; /* 24 4 */ int error; /* 28 4 */ char rwbs[8]; /* 32 8 */ u32 __data_loc_cmd; /* 40 4 */ char __data[]; /* 44 0 */ /* size: 48, cachelines: 1, members: 8 */ /* sum members: 40, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_block_rq { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ sector_t sector; /* 16 8 */ unsigned int nr_sector; /* 24 4 */ unsigned int bytes; /* 28 4 */ char rwbs[8]; /* 32 8 */ char comm[16]; /* 40 16 */ u32 __data_loc_cmd; /* 56 4 */ char __data[]; /* 60 0 */ /* size: 64, cachelines: 1, members: 9 */ /* sum members: 56, holes: 1, sum holes: 4 */ /* padding: 4 */ }; struct trace_event_raw_block_bio { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ sector_t sector; /* 16 8 */ unsigned int nr_sector; /* 24 4 */ char rwbs[8]; /* 28 8 */ char comm[16]; /* 36 16 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 7 */ /* sum members: 48, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_block_rq_remap { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ sector_t sector; /* 16 8 */ unsigned int nr_sector; /* 24 4 */ dev_t old_dev; /* 28 4 */ sector_t old_sector; /* 32 8 */ unsigned int nr_bios; /* 40 4 */ char rwbs[8]; /* 44 8 */ char __data[]; /* 52 0 */ /* size: 56, cachelines: 1, members: 9 */ /* sum members: 48, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct trace_event_raw_mm_compaction_begin { struct trace_entry ent; /* 0 8 */ long unsigned int zone_start; /* 8 8 */ long unsigned int migrate_pfn; /* 16 8 */ long unsigned int free_pfn; /* 24 8 */ long unsigned int zone_end; /* 32 8 */ bool sync; /* 40 1 */ char __data[]; /* 41 0 */ /* size: 48, cachelines: 1, members: 7 */ /* padding: 7 */ /* last cacheline: 48 bytes */ }; struct trace_event_raw_mm_compaction_try_to_compact_pages { struct trace_entry ent; /* 0 8 */ int order; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ long unsigned int gfp_mask; /* 16 8 */ int prio; /* 24 4 */ char __data[]; /* 28 0 */ /* size: 32, cachelines: 1, members: 5 */ /* sum members: 24, holes: 1, sum holes: 4 */ /* padding: 4 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_inode_foreign_history { struct trace_entry ent; /* 0 8 */ char name[32]; /* 8 32 */ ino_t ino; /* 40 8 */ ino_t cgroup_ino; /* 48 8 */ unsigned int history; /* 56 4 */ char __data[]; /* 60 0 */ /* size: 64, cachelines: 1, members: 6 */ /* padding: 4 */ }; struct trace_event_raw_drm_vblank_event { struct trace_entry ent; /* 0 8 */ int crtc; /* 8 4 */ unsigned int seq; /* 12 4 */ ktime_t time; /* 16 8 */ bool high_prec; /* 24 1 */ char __data[]; /* 25 0 */ /* size: 32, cachelines: 1, members: 6 */ /* padding: 7 */ /* last cacheline: 32 bytes */ }; struct trace_event_raw_extlog_mem_event { struct trace_entry ent; /* 0 8 */ u32 err_seq; /* 8 4 */ u8 etype; /* 12 1 */ u8 sev; /* 13 1 */ /* XXX 2 bytes hole, try to pack */ u64 pa; /* 16 8 */ u8 pa_mask_lsb; /* 24 1 */ guid_t fru_id; /* 25 16 */ /* XXX 3 bytes hole, try to pack */ u32 __data_loc_fru_text; /* 44 4 */ struct cper_mem_err_compact data; /* 48 55 */ /* --- cacheline 1 boundary (64 bytes) was 39 bytes ago --- */ char __data[]; /* 103 0 */ /* size: 104, cachelines: 2, members: 10 */ /* sum members: 98, holes: 2, sum holes: 5 */ /* padding: 1 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_mc_event { struct trace_entry ent; /* 0 8 */ unsigned int error_type; /* 8 4 */ u32 __data_loc_msg; /* 12 4 */ u32 __data_loc_label; /* 16 4 */ u16 error_count; /* 20 2 */ u8 mc_index; /* 22 1 */ s8 top_layer; /* 23 1 */ s8 middle_layer; /* 24 1 */ s8 lower_layer; /* 25 1 */ /* XXX 6 bytes hole, try to pack */ long int address; /* 32 8 */ u8 grain_bits; /* 40 1 */ /* XXX 7 bytes hole, try to pack */ long int syndrome; /* 48 8 */ u32 __data_loc_driver_detail; /* 56 4 */ char __data[]; /* 60 0 */ /* size: 64, cachelines: 1, members: 14 */ /* sum members: 47, holes: 2, sum holes: 13 */ /* padding: 4 */ }; struct trace_event_raw_arm_event { struct trace_entry ent; /* 0 8 */ u64 mpidr; /* 8 8 */ u64 midr; /* 16 8 */ u32 running_state; /* 24 4 */ u32 psci_state; /* 28 4 */ u8 affinity; /* 32 1 */ char __data[]; /* 33 0 */ /* size: 40, cachelines: 1, members: 7 */ /* padding: 7 */ /* last cacheline: 40 bytes */ }; struct linux_dirent64 { u64 d_ino; /* 0 8 */ s64 d_off; /* 8 8 */ short unsigned int d_reclen; /* 16 2 */ unsigned char d_type; /* 18 1 */ char d_name[]; /* 19 0 */ /* size: 24, cachelines: 1, members: 5 */ /* padding: 5 */ /* last cacheline: 24 bytes */ }; struct old_linux_dirent { long unsigned int d_ino; /* 0 8 */ long unsigned int d_offset; /* 8 8 */ short unsigned int d_namlen; /* 16 2 */ char d_name[]; /* 18 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 6 */ /* last cacheline: 24 bytes */ }; struct linux_dirent { long unsigned int d_ino; /* 0 8 */ long unsigned int d_off; /* 8 8 */ short unsigned int d_reclen; /* 16 2 */ char d_name[]; /* 18 0 */ /* size: 24, cachelines: 1, members: 4 */ /* padding: 6 */ /* last cacheline: 24 bytes */ }; struct compat_old_linux_dirent { compat_ulong_t d_ino; /* 0 4 */ compat_ulong_t d_offset; /* 4 4 */ short unsigned int d_namlen; /* 8 2 */ char d_name[]; /* 10 0 */ /* size: 12, cachelines: 1, members: 4 */ /* padding: 2 */ /* last cacheline: 12 bytes */ }; struct compat_linux_dirent { compat_ulong_t d_ino; /* 0 4 */ compat_ulong_t d_off; /* 4 4 */ short unsigned int d_reclen; /* 8 2 */ char d_name[]; /* 10 0 */ /* size: 12, cachelines: 1, members: 4 */ /* padding: 2 */ /* last cacheline: 12 bytes */ }; struct trace_event_raw_jbd2_run_stats { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ tid_t tid; /* 12 4 */ long unsigned int wait; /* 16 8 */ long unsigned int request_delay; /* 24 8 */ long unsigned int running; /* 32 8 */ long unsigned int locked; /* 40 8 */ long unsigned int flushing; /* 48 8 */ long unsigned int logging; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ __u32 handle_count; /* 64 4 */ __u32 blocks; /* 68 4 */ __u32 blocks_logged; /* 72 4 */ char __data[]; /* 76 0 */ /* size: 80, cachelines: 2, members: 13 */ /* padding: 4 */ /* last cacheline: 16 bytes */ }; struct trace_event_raw_jbd2_checkpoint_stats { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ tid_t tid; /* 12 4 */ long unsigned int chp_time; /* 16 8 */ __u32 forced_to_close; /* 24 4 */ __u32 written; /* 28 4 */ __u32 dropped; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 8 */ /* padding: 4 */ /* last cacheline: 40 bytes */ }; struct trace_event_raw_jbd2_shrink_checkpoint_list { struct trace_entry ent; /* 0 8 */ dev_t dev; /* 8 4 */ tid_t first_tid; /* 12 4 */ tid_t tid; /* 16 4 */ tid_t last_tid; /* 20 4 */ long unsigned int nr_freed; /* 24 8 */ tid_t next_tid; /* 32 4 */ char __data[]; /* 36 0 */ /* size: 40, cachelines: 1, members: 8 */ /* padding: 4 */ /* last cacheline: 40 bytes */ };