1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 8 struct xstats xfsstats; 9 10 static int counter_val(struct xfsstats __percpu *stats, int idx) 11 { 12 int val = 0, cpu; 13 14 for_each_possible_cpu(cpu) 15 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx)); 16 return val; 17 } 18 19 int xfs_stats_format(struct xfsstats __percpu *stats, char *buf) 20 { 21 int i, j; 22 int len = 0; 23 uint64_t xs_xstrat_bytes = 0; 24 uint64_t xs_write_bytes = 0; 25 uint64_t xs_read_bytes = 0; 26 uint64_t defer_relog = 0; 27 28 static const struct xstats_entry { 29 char *desc; 30 int endpoint; 31 } xstats[] = { 32 { "extent_alloc", xfsstats_offset(xs_abt_lookup) }, 33 { "abt", xfsstats_offset(xs_blk_mapr) }, 34 { "blk_map", xfsstats_offset(xs_bmbt_lookup) }, 35 { "bmbt", xfsstats_offset(xs_dir_lookup) }, 36 { "dir", xfsstats_offset(xs_trans_sync) }, 37 { "trans", xfsstats_offset(xs_ig_attempts) }, 38 { "ig", xfsstats_offset(xs_log_writes) }, 39 { "log", xfsstats_offset(xs_try_logspace)}, 40 { "push_ail", xfsstats_offset(xs_xstrat_quick)}, 41 { "xstrat", xfsstats_offset(xs_write_calls) }, 42 { "rw", xfsstats_offset(xs_attr_get) }, 43 { "attr", xfsstats_offset(xs_iflush_count)}, 44 { "icluster", xfsstats_offset(vn_active) }, 45 { "vnodes", xfsstats_offset(xb_get) }, 46 { "buf", xfsstats_offset(xs_abtb_2) }, 47 { "abtb2", xfsstats_offset(xs_abtc_2) }, 48 { "abtc2", xfsstats_offset(xs_bmbt_2) }, 49 { "bmbt2", xfsstats_offset(xs_ibt_2) }, 50 { "ibt2", xfsstats_offset(xs_fibt_2) }, 51 { "fibt2", xfsstats_offset(xs_rmap_2) }, 52 { "rmapbt", xfsstats_offset(xs_refcbt_2) }, 53 { "refcntbt", xfsstats_offset(xs_rmap_mem_2) }, 54 { "rmapbt_mem", xfsstats_offset(xs_rcbag_2) }, 55 { "rcbagbt", xfsstats_offset(xs_qm_dqreclaims)}, 56 /* we print both series of quota information together */ 57 { "qm", xfsstats_offset(xs_xstrat_bytes)}, 58 }; 59 60 /* Loop over all stats groups */ 61 62 for (i = j = 0; i < ARRAY_SIZE(xstats); i++) { 63 len += scnprintf(buf + len, PATH_MAX - len, "%s", 64 xstats[i].desc); 65 /* inner loop does each group */ 66 for (; j < xstats[i].endpoint; j++) 67 len += scnprintf(buf + len, PATH_MAX - len, " %u", 68 counter_val(stats, j)); 69 len += scnprintf(buf + len, PATH_MAX - len, "\n"); 70 } 71 /* extra precision counters */ 72 for_each_possible_cpu(i) { 73 xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes; 74 xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes; 75 xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes; 76 defer_relog += per_cpu_ptr(stats, i)->s.defer_relog; 77 } 78 79 len += scnprintf(buf + len, PATH_MAX-len, "xpc %llu %llu %llu\n", 80 xs_xstrat_bytes, xs_write_bytes, xs_read_bytes); 81 len += scnprintf(buf + len, PATH_MAX-len, "defer_relog %llu\n", 82 defer_relog); 83 len += scnprintf(buf + len, PATH_MAX-len, "debug %u\n", 84 #if defined(DEBUG) 85 1); 86 #else 87 0); 88 #endif 89 90 return len; 91 } 92 93 void xfs_stats_clearall(struct xfsstats __percpu *stats) 94 { 95 int c; 96 uint32_t vn_active; 97 98 xfs_notice(NULL, "Clearing xfsstats"); 99 for_each_possible_cpu(c) { 100 preempt_disable(); 101 /* save vn_active, it's a universal truth! */ 102 vn_active = per_cpu_ptr(stats, c)->s.vn_active; 103 memset(per_cpu_ptr(stats, c), 0, sizeof(*stats)); 104 per_cpu_ptr(stats, c)->s.vn_active = vn_active; 105 preempt_enable(); 106 } 107 } 108 109 #ifdef CONFIG_PROC_FS 110 /* legacy quota interfaces */ 111 #ifdef CONFIG_XFS_QUOTA 112 113 #define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims) 114 #define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot) 115 116 static int xqm_proc_show(struct seq_file *m, void *v) 117 { 118 /* maximum; incore; ratio free to inuse; freelist */ 119 seq_printf(m, "%d\t%d\t%d\t%u\n", 120 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT), 121 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1)); 122 return 0; 123 } 124 125 /* legacy quota stats interface no 2 */ 126 static int xqmstat_proc_show(struct seq_file *m, void *v) 127 { 128 int j; 129 130 seq_puts(m, "qm"); 131 for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++) 132 seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j)); 133 seq_putc(m, '\n'); 134 return 0; 135 } 136 #endif /* CONFIG_XFS_QUOTA */ 137 138 int 139 xfs_init_procfs(void) 140 { 141 if (!proc_mkdir("fs/xfs", NULL)) 142 return -ENOMEM; 143 144 if (!proc_symlink("fs/xfs/stat", NULL, 145 "/sys/fs/xfs/stats/stats")) 146 goto out; 147 148 #ifdef CONFIG_XFS_QUOTA 149 if (!proc_create_single("fs/xfs/xqmstat", 0, NULL, xqmstat_proc_show)) 150 goto out; 151 if (!proc_create_single("fs/xfs/xqm", 0, NULL, xqm_proc_show)) 152 goto out; 153 #endif 154 return 0; 155 156 out: 157 remove_proc_subtree("fs/xfs", NULL); 158 return -ENOMEM; 159 } 160 161 void 162 xfs_cleanup_procfs(void) 163 { 164 remove_proc_subtree("fs/xfs", NULL); 165 } 166 #endif /* CONFIG_PROC_FS */ 167
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.