1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* We need to carefully read the error 3 * prevent recursive traps, and pass t 4 * code for logging. 5 * 6 * We pass the AFAR in as-is, and we e 7 * information as described in asm-spa 8 */ 9 .type __spitfire_access_erro 10 __spitfire_access_error: 11 /* Disable ESTATE error reporting so t 12 * recursive traps and RED state the p 13 */ 14 stxa %g0, [%g0] ASI_ESTATE_ 15 membar #Sync 16 17 mov UDBE_UE, %g1 18 ldxa [%g0] ASI_AFSR, %g4 19 20 /* __spitfire_cee_trap branches here w 21 * UDBE_CE in %g1. It only clears EST 22 * Error Enable register. 23 */ 24 __spitfire_cee_trap_continue: 25 ldxa [%g0] ASI_AFAR, %g5 26 27 rdpr %tt, %g3 28 and %g3, 0x1ff, %g3 29 sllx %g3, SFSTAT_TRAP_TYPE_ 30 or %g4, %g3, %g4 31 rdpr %tl, %g3 32 cmp %g3, 1 33 mov 1, %g3 34 bleu %xcc, 1f 35 sllx %g3, SFSTAT_TL_GT_ONE_ 36 37 or %g4, %g3, %g4 38 39 /* Read in the UDB error register stat 40 * error bits as-needed. We only clea 41 * set. Likewise, __spitfire_cee_trap 42 * if the CE bit is set. 43 * 44 * NOTE: UltraSparc-I/II have high and 45 * registers, corresponding to t 46 * present on those chips. Ultr 47 * has a single UDB, called "SDB 48 * For IIi the upper UDB registe 49 * as zero so for our purposes t 50 * work with the checks below. 51 */ 52 1: ldxa [%g0] ASI_UDBH_ERROR_R 53 and %g3, 0x3ff, %g7 54 sllx %g7, SFSTAT_UDBH_SHIFT 55 or %g4, %g7, %g4 56 andcc %g3, %g1, %g3 57 be,pn %xcc, 1f 58 nop 59 stxa %g3, [%g0] ASI_UDB_ERR 60 membar #Sync 61 62 1: mov 0x18, %g3 63 ldxa [%g3] ASI_UDBL_ERROR_R 64 and %g3, 0x3ff, %g7 65 sllx %g7, SFSTAT_UDBL_SHIFT 66 or %g4, %g7, %g4 67 andcc %g3, %g1, %g3 68 be,pn %xcc, 1f 69 nop 70 mov 0x18, %g7 71 stxa %g3, [%g7] ASI_UDB_ERR 72 membar #Sync 73 74 1: /* Ok, now that we've latched the erro 75 * sticky bits in the AFSR. 76 */ 77 stxa %g4, [%g0] ASI_AFSR 78 membar #Sync 79 80 rdpr %tl, %g2 81 cmp %g2, 1 82 rdpr %pil, %g2 83 bleu,pt %xcc, 1f 84 wrpr %g0, PIL_NORMAL_MAX, % 85 86 ba,pt %xcc, etraptl1 87 rd %pc, %g7 88 89 ba,a,pt %xcc, 2f 90 nop 91 92 1: ba,pt %xcc, etrap_irq 93 rd %pc, %g7 94 95 2: 96 #ifdef CONFIG_TRACE_IRQFLAGS 97 call trace_hardirqs_off 98 nop 99 #endif 100 mov %l4, %o1 101 mov %l5, %o2 102 call spitfire_access_error 103 add %sp, PTREGS_OFF, %o0 104 ba,a,pt %xcc, rtrap 105 .size __spitfire_access_erro 106 107 /* This is the trap handler entry poin 108 * errors. They are corrected, but we 109 * that the event can be logged. 110 * 111 * Disrupting errors are either: 112 * 1) single-bit ECC errors during UDB 113 * memory 114 * 2) data parity errors during write- 115 * 116 * As far as I can make out from the m 117 * only for correctable errors during 118 * the front-end of the processor. 119 * 120 * The code below is only for trap lev 121 * is the only situation where we can 122 * For trap level >1 we just clear the 123 * return. 124 * 125 * This is just like __spiftire_access 126 * specifically handles correctable er 127 * uncorrectable error is indicated in 128 * directly above to __spitfire_access 129 * instead. Uncorrectable therefore t 130 * correctable, and the error logging 131 * case by inspecting the trap type. 132 */ 133 .type __spitfire_cee_trap,#f 134 __spitfire_cee_trap: 135 ldxa [%g0] ASI_AFSR, %g4 136 mov 1, %g3 137 sllx %g3, SFAFSR_UE_SHIFT, 138 andcc %g4, %g3, %g0 139 bne,pn %xcc, __spitfire_acces 140 nop 141 142 /* Ok, in this case we only have a cor 143 * Indicate we only wish to capture th 144 * %g1, and we only disable CE error r 145 * handling which disables all errors. 146 */ 147 ldxa [%g0] ASI_ESTATE_ERROR 148 andn %g3, ESTATE_ERR_CE, %g 149 stxa %g3, [%g0] ASI_ESTATE_ 150 membar #Sync 151 152 /* Preserve AFSR in %g4, indicate UDB 153 ba,pt %xcc, __spitfire_cee_t 154 mov UDBE_CE, %g1 155 .size __spitfire_cee_trap,.- 156 157 .type __spitfire_data_access 158 __spitfire_data_access_exception_tl1: 159 rdpr %pstate, %g4 160 wrpr %g4, PSTATE_MG|PSTATE_ 161 mov TLB_SFSR, %g3 162 mov DMMU_SFAR, %g5 163 ldxa [%g3] ASI_DMMU, %g4 164 ldxa [%g5] ASI_DMMU, %g5 165 stxa %g0, [%g3] ASI_DMMU 166 membar #Sync 167 rdpr %tt, %g3 168 cmp %g3, 0x80 169 blu,pn %xcc, 1f 170 cmp %g3, 0xff 171 bgu,pn %xcc, 1f 172 nop 173 ba,pt %xcc, winfix_dax 174 rdpr %tpc, %g3 175 1: sethi %hi(109f), %g7 176 ba,pt %xcc, etraptl1 177 109: or %g7, %lo(109b), %g7 178 mov %l4, %o1 179 mov %l5, %o2 180 call spitfire_data_access_e 181 add %sp, PTREGS_OFF, %o0 182 ba,a,pt %xcc, rtrap 183 .size __spitfire_data_access 184 185 .type __spitfire_data_access 186 __spitfire_data_access_exception: 187 rdpr %pstate, %g4 188 wrpr %g4, PSTATE_MG|PSTATE_ 189 mov TLB_SFSR, %g3 190 mov DMMU_SFAR, %g5 191 ldxa [%g3] ASI_DMMU, %g4 192 ldxa [%g5] ASI_DMMU, %g5 193 stxa %g0, [%g3] ASI_DMMU 194 membar #Sync 195 sethi %hi(109f), %g7 196 ba,pt %xcc, etrap 197 109: or %g7, %lo(109b), %g7 198 mov %l4, %o1 199 mov %l5, %o2 200 call spitfire_data_access_e 201 add %sp, PTREGS_OFF, %o0 202 ba,a,pt %xcc, rtrap 203 .size __spitfire_data_access 204 205 .type __spitfire_insn_access 206 __spitfire_insn_access_exception_tl1: 207 rdpr %pstate, %g4 208 wrpr %g4, PSTATE_MG|PSTATE_ 209 mov TLB_SFSR, %g3 210 ldxa [%g3] ASI_IMMU, %g4 211 rdpr %tpc, %g5 212 stxa %g0, [%g3] ASI_IMMU 213 membar #Sync 214 sethi %hi(109f), %g7 215 ba,pt %xcc, etraptl1 216 109: or %g7, %lo(109b), %g7 217 mov %l4, %o1 218 mov %l5, %o2 219 call spitfire_insn_access_e 220 add %sp, PTREGS_OFF, %o0 221 ba,a,pt %xcc, rtrap 222 .size __spitfire_insn_access 223 224 .type __spitfire_insn_access 225 __spitfire_insn_access_exception: 226 rdpr %pstate, %g4 227 wrpr %g4, PSTATE_MG|PSTATE_ 228 mov TLB_SFSR, %g3 229 ldxa [%g3] ASI_IMMU, %g4 230 rdpr %tpc, %g5 231 stxa %g0, [%g3] ASI_IMMU 232 membar #Sync 233 sethi %hi(109f), %g7 234 ba,pt %xcc, etrap 235 109: or %g7, %lo(109b), %g7 236 mov %l4, %o1 237 mov %l5, %o2 238 call spitfire_insn_access_e 239 add %sp, PTREGS_OFF, %o0 240 ba,a,pt %xcc, rtrap 241 .size __spitfire_insn_access
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.