1 /* SPDX-License-Identifier: GPL-2.0-or-later * 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 2 /* 3 * Copyright 2015, Cyril Bur, IBM Corp. 3 * Copyright 2015, Cyril Bur, IBM Corp. 4 */ 4 */ 5 5 6 #include "basic_asm.h" 6 #include "basic_asm.h" 7 #include "vmx_asm.h" 7 #include "vmx_asm.h" 8 8 9 # Should be safe from C, only touches r4, r5 a 9 # Should be safe from C, only touches r4, r5 and v0,v1,v2 10 FUNC_START(check_vmx) 10 FUNC_START(check_vmx) 11 PUSH_BASIC_STACK(32) 11 PUSH_BASIC_STACK(32) 12 mr r4,r3 12 mr r4,r3 13 li r3,1 # assume a bad result 13 li r3,1 # assume a bad result 14 li r5,0 14 li r5,0 15 lvx v0,r5,r4 15 lvx v0,r5,r4 16 vcmpequd. v1,v0,v20 16 vcmpequd. v1,v0,v20 17 vmr v2,v1 17 vmr v2,v1 18 18 19 addi r5,r5,16 19 addi r5,r5,16 20 lvx v0,r5,r4 20 lvx v0,r5,r4 21 vcmpequd. v1,v0,v21 21 vcmpequd. v1,v0,v21 22 vand v2,v2,v1 22 vand v2,v2,v1 23 23 24 addi r5,r5,16 24 addi r5,r5,16 25 lvx v0,r5,r4 25 lvx v0,r5,r4 26 vcmpequd. v1,v0,v22 26 vcmpequd. v1,v0,v22 27 vand v2,v2,v1 27 vand v2,v2,v1 28 28 29 addi r5,r5,16 29 addi r5,r5,16 30 lvx v0,r5,r4 30 lvx v0,r5,r4 31 vcmpequd. v1,v0,v23 31 vcmpequd. v1,v0,v23 32 vand v2,v2,v1 32 vand v2,v2,v1 33 33 34 addi r5,r5,16 34 addi r5,r5,16 35 lvx v0,r5,r4 35 lvx v0,r5,r4 36 vcmpequd. v1,v0,v24 36 vcmpequd. v1,v0,v24 37 vand v2,v2,v1 37 vand v2,v2,v1 38 38 39 addi r5,r5,16 39 addi r5,r5,16 40 lvx v0,r5,r4 40 lvx v0,r5,r4 41 vcmpequd. v1,v0,v25 41 vcmpequd. v1,v0,v25 42 vand v2,v2,v1 42 vand v2,v2,v1 43 43 44 addi r5,r5,16 44 addi r5,r5,16 45 lvx v0,r5,r4 45 lvx v0,r5,r4 46 vcmpequd. v1,v0,v26 46 vcmpequd. v1,v0,v26 47 vand v2,v2,v1 47 vand v2,v2,v1 48 48 49 addi r5,r5,16 49 addi r5,r5,16 50 lvx v0,r5,r4 50 lvx v0,r5,r4 51 vcmpequd. v1,v0,v27 51 vcmpequd. v1,v0,v27 52 vand v2,v2,v1 52 vand v2,v2,v1 53 53 54 addi r5,r5,16 54 addi r5,r5,16 55 lvx v0,r5,r4 55 lvx v0,r5,r4 56 vcmpequd. v1,v0,v28 56 vcmpequd. v1,v0,v28 57 vand v2,v2,v1 57 vand v2,v2,v1 58 58 59 addi r5,r5,16 59 addi r5,r5,16 60 lvx v0,r5,r4 60 lvx v0,r5,r4 61 vcmpequd. v1,v0,v29 61 vcmpequd. v1,v0,v29 62 vand v2,v2,v1 62 vand v2,v2,v1 63 63 64 addi r5,r5,16 64 addi r5,r5,16 65 lvx v0,r5,r4 65 lvx v0,r5,r4 66 vcmpequd. v1,v0,v30 66 vcmpequd. v1,v0,v30 67 vand v2,v2,v1 67 vand v2,v2,v1 68 68 69 addi r5,r5,16 69 addi r5,r5,16 70 lvx v0,r5,r4 70 lvx v0,r5,r4 71 vcmpequd. v1,v0,v31 71 vcmpequd. v1,v0,v31 72 vand v2,v2,v1 72 vand v2,v2,v1 73 73 74 li r5,STACK_FRAME_LOCAL(0,0) 74 li r5,STACK_FRAME_LOCAL(0,0) 75 stvx v2,r5,sp 75 stvx v2,r5,sp 76 ldx r0,r5,sp 76 ldx r0,r5,sp 77 cmpdi r0,0xffffffffffffffff 77 cmpdi r0,0xffffffffffffffff 78 bne 1f 78 bne 1f 79 li r3,0 79 li r3,0 80 1: POP_BASIC_STACK(32) 80 1: POP_BASIC_STACK(32) 81 blr 81 blr 82 FUNC_END(check_vmx) 82 FUNC_END(check_vmx) 83 83 84 # Safe from C 84 # Safe from C 85 FUNC_START(test_vmx) 85 FUNC_START(test_vmx) 86 # r3 holds pointer to where to put the 86 # r3 holds pointer to where to put the result of fork 87 # r4 holds pointer to the pid 87 # r4 holds pointer to the pid 88 # v20-v31 are non-volatile 88 # v20-v31 are non-volatile 89 PUSH_BASIC_STACK(512) 89 PUSH_BASIC_STACK(512) 90 std r3,STACK_FRAME_PARAM(0)(sp) # 90 std r3,STACK_FRAME_PARAM(0)(sp) # Address of varray 91 std r4,STACK_FRAME_PARAM(1)(sp) # addr 91 std r4,STACK_FRAME_PARAM(1)(sp) # address of pid 92 PUSH_VMX(STACK_FRAME_LOCAL(2,0),r4) 92 PUSH_VMX(STACK_FRAME_LOCAL(2,0),r4) 93 93 94 bl load_vmx 94 bl load_vmx 95 nop 95 nop 96 96 97 li r0,__NR_fork 97 li r0,__NR_fork 98 sc 98 sc 99 # Pass the result of fork back to the 99 # Pass the result of fork back to the caller 100 ld r9,STACK_FRAME_PARAM(1)(sp) 100 ld r9,STACK_FRAME_PARAM(1)(sp) 101 std r3,0(r9) 101 std r3,0(r9) 102 102 103 ld r3,STACK_FRAME_PARAM(0)(sp) 103 ld r3,STACK_FRAME_PARAM(0)(sp) 104 bl check_vmx 104 bl check_vmx 105 nop 105 nop 106 106 107 POP_VMX(STACK_FRAME_LOCAL(2,0),r4) 107 POP_VMX(STACK_FRAME_LOCAL(2,0),r4) 108 POP_BASIC_STACK(512) 108 POP_BASIC_STACK(512) 109 blr 109 blr 110 FUNC_END(test_vmx) 110 FUNC_END(test_vmx) 111 111 112 # int preempt_vmx(vector int *varray, int *thr 112 # int preempt_vmx(vector int *varray, int *threads_starting, int *running) 113 # On starting will (atomically) decrement thre 113 # On starting will (atomically) decrement threads_starting as a signal that 114 # the VMX have been loaded with varray. Will p 114 # the VMX have been loaded with varray. Will proceed to check the validity of 115 # the VMX registers while running is not zero. 115 # the VMX registers while running is not zero. 116 FUNC_START(preempt_vmx) 116 FUNC_START(preempt_vmx) 117 PUSH_BASIC_STACK(512) 117 PUSH_BASIC_STACK(512) 118 std r3,STACK_FRAME_PARAM(0)(sp) # vect 118 std r3,STACK_FRAME_PARAM(0)(sp) # vector int *varray 119 std r4,STACK_FRAME_PARAM(1)(sp) # int 119 std r4,STACK_FRAME_PARAM(1)(sp) # int *threads_starting 120 std r5,STACK_FRAME_PARAM(2)(sp) # int 120 std r5,STACK_FRAME_PARAM(2)(sp) # int *running 121 # VMX need to write to 16 byte aligned 121 # VMX need to write to 16 byte aligned addresses, skip STACK_FRAME_LOCAL(3,0) 122 PUSH_VMX(STACK_FRAME_LOCAL(4,0),r4) 122 PUSH_VMX(STACK_FRAME_LOCAL(4,0),r4) 123 123 124 bl load_vmx 124 bl load_vmx 125 nop 125 nop 126 126 127 sync 127 sync 128 # Atomic DEC 128 # Atomic DEC 129 ld r3,STACK_FRAME_PARAM(1)(sp) 129 ld r3,STACK_FRAME_PARAM(1)(sp) 130 1: lwarx r4,0,r3 130 1: lwarx r4,0,r3 131 addi r4,r4,-1 131 addi r4,r4,-1 132 stwcx. r4,0,r3 132 stwcx. r4,0,r3 133 bne- 1b 133 bne- 1b 134 134 135 2: ld r3,STACK_FRAME_PARAM(0)(sp) 135 2: ld r3,STACK_FRAME_PARAM(0)(sp) 136 bl check_vmx 136 bl check_vmx 137 nop 137 nop 138 cmpdi r3,0 138 cmpdi r3,0 139 bne 3f 139 bne 3f 140 ld r4,STACK_FRAME_PARAM(2)(sp) 140 ld r4,STACK_FRAME_PARAM(2)(sp) 141 ld r5,0(r4) 141 ld r5,0(r4) 142 cmpwi r5,0 142 cmpwi r5,0 143 bne 2b 143 bne 2b 144 144 145 3: POP_VMX(STACK_FRAME_LOCAL(4,0),r4) 145 3: POP_VMX(STACK_FRAME_LOCAL(4,0),r4) 146 POP_BASIC_STACK(512) 146 POP_BASIC_STACK(512) 147 blr 147 blr 148 FUNC_END(preempt_vmx) 148 FUNC_END(preempt_vmx)
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.