1 // SPDX-License-Identifier: GPL-2.0 << 2 /* 1 /* 3 * linux/fs/ufs/cylinder.c 2 * linux/fs/ufs/cylinder.c 4 * 3 * 5 * Copyright (C) 1998 4 * Copyright (C) 1998 6 * Daniel Pirkl <daniel.pirkl@email.cz> 5 * Daniel Pirkl <daniel.pirkl@email.cz> 7 * Charles University, Faculty of Mathematics 6 * Charles University, Faculty of Mathematics and Physics 8 * 7 * 9 * ext2 - inode (block) bitmap caching inspir 8 * ext2 - inode (block) bitmap caching inspired 10 */ 9 */ 11 10 12 #include <linux/fs.h> 11 #include <linux/fs.h> 13 #include <linux/time.h> 12 #include <linux/time.h> 14 #include <linux/stat.h> 13 #include <linux/stat.h> 15 #include <linux/string.h> 14 #include <linux/string.h> 16 #include <linux/bitops.h> 15 #include <linux/bitops.h> 17 16 18 #include <asm/byteorder.h> 17 #include <asm/byteorder.h> 19 18 20 #include "ufs_fs.h" 19 #include "ufs_fs.h" 21 #include "ufs.h" 20 #include "ufs.h" 22 #include "swab.h" 21 #include "swab.h" 23 #include "util.h" 22 #include "util.h" 24 23 25 /* 24 /* 26 * Read cylinder group into cache. The memory 25 * Read cylinder group into cache. The memory space for ufs_cg_private_info 27 * structure is already allocated during ufs_r 26 * structure is already allocated during ufs_read_super. 28 */ 27 */ 29 static void ufs_read_cylinder (struct super_bl 28 static void ufs_read_cylinder (struct super_block * sb, 30 unsigned cgno, unsigned bitmap_nr) 29 unsigned cgno, unsigned bitmap_nr) 31 { 30 { 32 struct ufs_sb_info * sbi = UFS_SB(sb); 31 struct ufs_sb_info * sbi = UFS_SB(sb); 33 struct ufs_sb_private_info * uspi; 32 struct ufs_sb_private_info * uspi; 34 struct ufs_cg_private_info * ucpi; 33 struct ufs_cg_private_info * ucpi; 35 struct ufs_cylinder_group * ucg; 34 struct ufs_cylinder_group * ucg; 36 unsigned i, j; 35 unsigned i, j; 37 36 38 UFSD("ENTER, cgno %u, bitmap_nr %u\n", 37 UFSD("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr); 39 uspi = sbi->s_uspi; 38 uspi = sbi->s_uspi; 40 ucpi = sbi->s_ucpi[bitmap_nr]; 39 ucpi = sbi->s_ucpi[bitmap_nr]; 41 ucg = (struct ufs_cylinder_group *)sbi 40 ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data; 42 41 43 UCPI_UBH(ucpi)->fragment = ufs_cgcmin( 42 UCPI_UBH(ucpi)->fragment = ufs_cgcmin(cgno); 44 UCPI_UBH(ucpi)->count = uspi->s_cgsize 43 UCPI_UBH(ucpi)->count = uspi->s_cgsize >> sb->s_blocksize_bits; 45 /* 44 /* 46 * We have already the first fragment 45 * We have already the first fragment of cylinder group block in buffer 47 */ 46 */ 48 UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgn 47 UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgno]; 49 for (i = 1; i < UCPI_UBH(ucpi)->count; 48 for (i = 1; i < UCPI_UBH(ucpi)->count; i++) 50 if (!(UCPI_UBH(ucpi)->bh[i] = 49 if (!(UCPI_UBH(ucpi)->bh[i] = sb_bread(sb, UCPI_UBH(ucpi)->fragment + i))) 51 goto failed; 50 goto failed; 52 sbi->s_cgno[bitmap_nr] = cgno; 51 sbi->s_cgno[bitmap_nr] = cgno; 53 52 54 ucpi->c_cgx = fs32_to_cpu(sb, ucg- 53 ucpi->c_cgx = fs32_to_cpu(sb, ucg->cg_cgx); 55 ucpi->c_ncyl = fs16_to_cpu(sb, ucg- 54 ucpi->c_ncyl = fs16_to_cpu(sb, ucg->cg_ncyl); 56 ucpi->c_niblk = fs16_to_cpu(sb, ucg- 55 ucpi->c_niblk = fs16_to_cpu(sb, ucg->cg_niblk); 57 ucpi->c_ndblk = fs32_to_cpu(sb, ucg- 56 ucpi->c_ndblk = fs32_to_cpu(sb, ucg->cg_ndblk); 58 ucpi->c_rotor = fs32_to_cpu(sb, ucg- 57 ucpi->c_rotor = fs32_to_cpu(sb, ucg->cg_rotor); 59 ucpi->c_frotor = fs32_to_cpu(sb, ucg- 58 ucpi->c_frotor = fs32_to_cpu(sb, ucg->cg_frotor); 60 ucpi->c_irotor = fs32_to_cpu(sb, ucg- 59 ucpi->c_irotor = fs32_to_cpu(sb, ucg->cg_irotor); 61 ucpi->c_btotoff = fs32_to_cpu(sb, ucg- 60 ucpi->c_btotoff = fs32_to_cpu(sb, ucg->cg_btotoff); 62 ucpi->c_boff = fs32_to_cpu(sb, ucg- 61 ucpi->c_boff = fs32_to_cpu(sb, ucg->cg_boff); 63 ucpi->c_iusedoff = fs32_to_cpu(sb, ucg 62 ucpi->c_iusedoff = fs32_to_cpu(sb, ucg->cg_iusedoff); 64 ucpi->c_freeoff = fs32_to_cpu(sb, ucg- 63 ucpi->c_freeoff = fs32_to_cpu(sb, ucg->cg_freeoff); 65 ucpi->c_nextfreeoff = fs32_to_cpu(sb, 64 ucpi->c_nextfreeoff = fs32_to_cpu(sb, ucg->cg_nextfreeoff); 66 ucpi->c_clustersumoff = fs32_to_cpu(sb 65 ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff); 67 ucpi->c_clusteroff = fs32_to_cpu(sb, u 66 ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff); 68 ucpi->c_nclusterblks = fs32_to_cpu(sb, 67 ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks); 69 UFSD("EXIT\n"); 68 UFSD("EXIT\n"); 70 return; 69 return; 71 70 72 failed: 71 failed: 73 for (j = 1; j < i; j++) 72 for (j = 1; j < i; j++) 74 brelse (sbi->s_ucg[j]); 73 brelse (sbi->s_ucg[j]); 75 sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPT 74 sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; 76 ufs_error (sb, "ufs_read_cylinder", "c 75 ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno); 77 } 76 } 78 77 79 /* 78 /* 80 * Remove cylinder group from cache, doesn't r 79 * Remove cylinder group from cache, doesn't release memory 81 * allocated for cylinder group (this is done 80 * allocated for cylinder group (this is done at ufs_put_super only). 82 */ 81 */ 83 void ufs_put_cylinder (struct super_block * sb 82 void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr) 84 { 83 { 85 struct ufs_sb_info * sbi = UFS_SB(sb); 84 struct ufs_sb_info * sbi = UFS_SB(sb); 86 struct ufs_sb_private_info * uspi; 85 struct ufs_sb_private_info * uspi; 87 struct ufs_cg_private_info * ucpi; 86 struct ufs_cg_private_info * ucpi; 88 struct ufs_cylinder_group * ucg; 87 struct ufs_cylinder_group * ucg; 89 unsigned i; 88 unsigned i; 90 89 91 UFSD("ENTER, bitmap_nr %u\n", bitmap_n 90 UFSD("ENTER, bitmap_nr %u\n", bitmap_nr); 92 91 93 uspi = sbi->s_uspi; 92 uspi = sbi->s_uspi; 94 if (sbi->s_cgno[bitmap_nr] == UFS_CGNO 93 if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) { 95 UFSD("EXIT\n"); 94 UFSD("EXIT\n"); 96 return; 95 return; 97 } 96 } 98 ucpi = sbi->s_ucpi[bitmap_nr]; 97 ucpi = sbi->s_ucpi[bitmap_nr]; 99 ucg = ubh_get_ucg(UCPI_UBH(ucpi)); 98 ucg = ubh_get_ucg(UCPI_UBH(ucpi)); 100 99 101 if (uspi->s_ncg > UFS_MAX_GROUP_LOADED 100 if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) { 102 ufs_panic (sb, "ufs_put_cylind 101 ufs_panic (sb, "ufs_put_cylinder", "internal error"); 103 return; 102 return; 104 } 103 } 105 /* 104 /* 106 * rotor is not so important data, so 105 * rotor is not so important data, so we put it to disk 107 * at the end of working with cylinder 106 * at the end of working with cylinder 108 */ 107 */ 109 ucg->cg_rotor = cpu_to_fs32(sb, ucpi-> 108 ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor); 110 ucg->cg_frotor = cpu_to_fs32(sb, ucpi- 109 ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor); 111 ucg->cg_irotor = cpu_to_fs32(sb, ucpi- 110 ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor); 112 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)) 111 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 113 for (i = 1; i < UCPI_UBH(ucpi)->count; 112 for (i = 1; i < UCPI_UBH(ucpi)->count; i++) { 114 brelse (UCPI_UBH(ucpi)->bh[i]) 113 brelse (UCPI_UBH(ucpi)->bh[i]); 115 } 114 } 116 115 117 sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPT 116 sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; 118 UFSD("EXIT\n"); 117 UFSD("EXIT\n"); 119 } 118 } 120 119 121 /* 120 /* 122 * Find cylinder group in cache and return it 121 * Find cylinder group in cache and return it as pointer. 123 * If cylinder group is not in cache, we will 122 * If cylinder group is not in cache, we will load it from disk. 124 * 123 * 125 * The cache is managed by LRU algorithm. 124 * The cache is managed by LRU algorithm. 126 */ 125 */ 127 struct ufs_cg_private_info * ufs_load_cylinder 126 struct ufs_cg_private_info * ufs_load_cylinder ( 128 struct super_block * sb, unsigned cgno 127 struct super_block * sb, unsigned cgno) 129 { 128 { 130 struct ufs_sb_info * sbi = UFS_SB(sb); 129 struct ufs_sb_info * sbi = UFS_SB(sb); 131 struct ufs_sb_private_info * uspi; 130 struct ufs_sb_private_info * uspi; 132 struct ufs_cg_private_info * ucpi; 131 struct ufs_cg_private_info * ucpi; 133 unsigned cg, i, j; 132 unsigned cg, i, j; 134 133 135 UFSD("ENTER, cgno %u\n", cgno); 134 UFSD("ENTER, cgno %u\n", cgno); 136 135 137 uspi = sbi->s_uspi; 136 uspi = sbi->s_uspi; 138 if (cgno >= uspi->s_ncg) { 137 if (cgno >= uspi->s_ncg) { 139 ufs_panic (sb, "ufs_load_cylin 138 ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg"); 140 return NULL; 139 return NULL; 141 } 140 } 142 /* 141 /* 143 * Cylinder group number cg it in cach 142 * Cylinder group number cg it in cache and it was last used 144 */ 143 */ 145 if (sbi->s_cgno[0] == cgno) { 144 if (sbi->s_cgno[0] == cgno) { 146 UFSD("EXIT\n"); 145 UFSD("EXIT\n"); 147 return sbi->s_ucpi[0]; 146 return sbi->s_ucpi[0]; 148 } 147 } 149 /* 148 /* 150 * Number of cylinder groups is not hi 149 * Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED 151 */ 150 */ 152 if (uspi->s_ncg <= UFS_MAX_GROUP_LOADE 151 if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) { 153 if (sbi->s_cgno[cgno] != UFS_C 152 if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) { 154 if (sbi->s_cgno[cgno] 153 if (sbi->s_cgno[cgno] != cgno) { 155 ufs_panic (sb, 154 ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache"); 156 UFSD("EXIT (FA 155 UFSD("EXIT (FAILED)\n"); 157 return NULL; 156 return NULL; 158 } 157 } 159 else { 158 else { 160 UFSD("EXIT\n") 159 UFSD("EXIT\n"); 161 return sbi->s_ 160 return sbi->s_ucpi[cgno]; 162 } 161 } 163 } else { 162 } else { 164 ufs_read_cylinder (sb, 163 ufs_read_cylinder (sb, cgno, cgno); 165 UFSD("EXIT\n"); 164 UFSD("EXIT\n"); 166 return sbi->s_ucpi[cgn 165 return sbi->s_ucpi[cgno]; 167 } 166 } 168 } 167 } 169 /* 168 /* 170 * Cylinder group number cg is in cach 169 * Cylinder group number cg is in cache but it was not last used, 171 * we will move to the first position 170 * we will move to the first position 172 */ 171 */ 173 for (i = 0; i < sbi->s_cg_loaded && sb 172 for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++); 174 if (i < sbi->s_cg_loaded && sbi->s_cgn 173 if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) { 175 cg = sbi->s_cgno[i]; 174 cg = sbi->s_cgno[i]; 176 ucpi = sbi->s_ucpi[i]; 175 ucpi = sbi->s_ucpi[i]; 177 for (j = i; j > 0; j--) { 176 for (j = i; j > 0; j--) { 178 sbi->s_cgno[j] = sbi-> 177 sbi->s_cgno[j] = sbi->s_cgno[j-1]; 179 sbi->s_ucpi[j] = sbi-> 178 sbi->s_ucpi[j] = sbi->s_ucpi[j-1]; 180 } 179 } 181 sbi->s_cgno[0] = cg; 180 sbi->s_cgno[0] = cg; 182 sbi->s_ucpi[0] = ucpi; 181 sbi->s_ucpi[0] = ucpi; 183 /* 182 /* 184 * Cylinder group number cg is not in 183 * Cylinder group number cg is not in cache, we will read it from disk 185 * and put it to the first position 184 * and put it to the first position 186 */ 185 */ 187 } else { 186 } else { 188 if (sbi->s_cg_loaded < UFS_MAX 187 if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED) 189 sbi->s_cg_loaded++; 188 sbi->s_cg_loaded++; 190 else 189 else 191 ufs_put_cylinder (sb, 190 ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1); 192 ucpi = sbi->s_ucpi[sbi->s_cg_l 191 ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1]; 193 for (j = sbi->s_cg_loaded - 1; 192 for (j = sbi->s_cg_loaded - 1; j > 0; j--) { 194 sbi->s_cgno[j] = sbi-> 193 sbi->s_cgno[j] = sbi->s_cgno[j-1]; 195 sbi->s_ucpi[j] = sbi-> 194 sbi->s_ucpi[j] = sbi->s_ucpi[j-1]; 196 } 195 } 197 sbi->s_ucpi[0] = ucpi; 196 sbi->s_ucpi[0] = ucpi; 198 ufs_read_cylinder (sb, cgno, 0 197 ufs_read_cylinder (sb, cgno, 0); 199 } 198 } 200 UFSD("EXIT\n"); 199 UFSD("EXIT\n"); 201 return sbi->s_ucpi[0]; 200 return sbi->s_ucpi[0]; 202 } 201 } 203 202
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.