1 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */ 2 /* 3 * i386 specific definitions for NOLIBC 4 * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu> 5 */ 6 7 #ifndef _NOLIBC_ARCH_I386_H 8 #define _NOLIBC_ARCH_I386_H 9 10 #include "compiler.h" 11 #include "crt.h" 12 13 /* Syscalls for i386 : 14 * - mostly similar to x86_64 15 * - registers are 32-bit 16 * - syscall number is passed in eax 17 * - arguments are in ebx, ecx, edx, esi, edi, ebp respectively 18 * - all registers are preserved (except eax of course) 19 * - the system call is performed by calling int $0x80 20 * - syscall return comes in eax 21 * - the arguments are cast to long and assigned into the target registers 22 * which are then simply passed as registers to the asm code, so that we 23 * don't have to experience issues with register constraints. 24 * - the syscall number is always specified last in order to allow to force 25 * some registers before (gcc refuses a %-register at the last position). 26 * 27 * Also, i386 supports the old_select syscall if newselect is not available 28 */ 29 #define __ARCH_WANT_SYS_OLD_SELECT 30 31 #define my_syscall0(num) \ 32 ({ \ 33 long _ret; \ 34 register long _num __asm__ ("eax") = (num); \ 35 \ 36 __asm__ volatile ( \ 37 "int $0x80\n" \ 38 : "=a" (_ret) \ 39 : ""(_num) \ 40 : "memory", "cc" \ 41 ); \ 42 _ret; \ 43 }) 44 45 #define my_syscall1(num, arg1) \ 46 ({ \ 47 long _ret; \ 48 register long _num __asm__ ("eax") = (num); \ 49 register long _arg1 __asm__ ("ebx") = (long)(arg1); \ 50 \ 51 __asm__ volatile ( \ 52 "int $0x80\n" \ 53 : "=a" (_ret) \ 54 : "r"(_arg1), \ 55 ""(_num) \ 56 : "memory", "cc" \ 57 ); \ 58 _ret; \ 59 }) 60 61 #define my_syscall2(num, arg1, arg2) \ 62 ({ \ 63 long _ret; \ 64 register long _num __asm__ ("eax") = (num); \ 65 register long _arg1 __asm__ ("ebx") = (long)(arg1); \ 66 register long _arg2 __asm__ ("ecx") = (long)(arg2); \ 67 \ 68 __asm__ volatile ( \ 69 "int $0x80\n" \ 70 : "=a" (_ret) \ 71 : "r"(_arg1), "r"(_arg2), \ 72 ""(_num) \ 73 : "memory", "cc" \ 74 ); \ 75 _ret; \ 76 }) 77 78 #define my_syscall3(num, arg1, arg2, arg3) \ 79 ({ \ 80 long _ret; \ 81 register long _num __asm__ ("eax") = (num); \ 82 register long _arg1 __asm__ ("ebx") = (long)(arg1); \ 83 register long _arg2 __asm__ ("ecx") = (long)(arg2); \ 84 register long _arg3 __asm__ ("edx") = (long)(arg3); \ 85 \ 86 __asm__ volatile ( \ 87 "int $0x80\n" \ 88 : "=a" (_ret) \ 89 : "r"(_arg1), "r"(_arg2), "r"(_arg3), \ 90 ""(_num) \ 91 : "memory", "cc" \ 92 ); \ 93 _ret; \ 94 }) 95 96 #define my_syscall4(num, arg1, arg2, arg3, arg4) \ 97 ({ \ 98 long _ret; \ 99 register long _num __asm__ ("eax") = (num); \ 100 register long _arg1 __asm__ ("ebx") = (long)(arg1); \ 101 register long _arg2 __asm__ ("ecx") = (long)(arg2); \ 102 register long _arg3 __asm__ ("edx") = (long)(arg3); \ 103 register long _arg4 __asm__ ("esi") = (long)(arg4); \ 104 \ 105 __asm__ volatile ( \ 106 "int $0x80\n" \ 107 : "=a" (_ret) \ 108 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \ 109 ""(_num) \ 110 : "memory", "cc" \ 111 ); \ 112 _ret; \ 113 }) 114 115 #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \ 116 ({ \ 117 long _ret; \ 118 register long _num __asm__ ("eax") = (num); \ 119 register long _arg1 __asm__ ("ebx") = (long)(arg1); \ 120 register long _arg2 __asm__ ("ecx") = (long)(arg2); \ 121 register long _arg3 __asm__ ("edx") = (long)(arg3); \ 122 register long _arg4 __asm__ ("esi") = (long)(arg4); \ 123 register long _arg5 __asm__ ("edi") = (long)(arg5); \ 124 \ 125 __asm__ volatile ( \ 126 "int $0x80\n" \ 127 : "=a" (_ret) \ 128 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ 129 ""(_num) \ 130 : "memory", "cc" \ 131 ); \ 132 _ret; \ 133 }) 134 135 #define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \ 136 ({ \ 137 long _eax = (long)(num); \ 138 long _arg6 = (long)(arg6); /* Always in memory */ \ 139 __asm__ volatile ( \ 140 "pushl %[_arg6]\n\t" \ 141 "pushl %%ebp\n\t" \ 142 "movl 4(%%esp),%%ebp\n\t" \ 143 "int $0x80\n\t" \ 144 "popl %%ebp\n\t" \ 145 "addl $4,%%esp\n\t" \ 146 : "+a"(_eax) /* %eax */ \ 147 : "b"(arg1), /* %ebx */ \ 148 "c"(arg2), /* %ecx */ \ 149 "d"(arg3), /* %edx */ \ 150 "S"(arg4), /* %esi */ \ 151 "D"(arg5), /* %edi */ \ 152 [_arg6]"m"(_arg6) /* memory */ \ 153 : "memory", "cc" \ 154 ); \ 155 _eax; \ 156 }) 157 158 /* startup code */ 159 /* 160 * i386 System V ABI mandates: 161 * 1) last pushed argument must be 16-byte aligned. 162 * 2) The deepest stack frame should be set to zero 163 * 164 */ 165 void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void) 166 { 167 __asm__ volatile ( 168 "xor %ebp, %ebp\n" /* zero the stack frame */ 169 "mov %esp, %eax\n" /* save stack pointer to %eax, as arg1 of _start_c */ 170 "add $12, %esp\n" /* avoid over-estimating after the 'and' & 'sub' below */ 171 "and $-16, %esp\n" /* the %esp must be 16-byte aligned on 'call' */ 172 "sub $12, %esp\n" /* sub 12 to keep it aligned after the push %eax */ 173 "push %eax\n" /* push arg1 on stack to support plain stack modes too */ 174 "call _start_c\n" /* transfer to c runtime */ 175 "hlt\n" /* ensure it does not return */ 176 ); 177 __builtin_unreachable(); 178 } 179 180 #endif /* _NOLIBC_ARCH_I386_H */ 181
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.