freebsd.c (12246B)
1 // Code specific to x86 hosts running FreeBSD. 2 3 #include <stdio.h> 4 #include <string.h> 5 #include <signal.h> 6 #include <unistd.h> 7 #include <stdlib.h> 8 #include <assert.h> 9 #include <ucontext.h> 10 #include <machine/ucontext.h> 11 12 #include <machine/segments.h> 13 #include <machine/sysarch.h> 14 15 #include "vx32.h" 16 #include "vx32impl.h" 17 #include "os.h" 18 19 #if __FreeBSD__ < 7 20 #warning "libvx32 and FreeBSD 5 and 6's libpthread are not compatible." 21 #endif 22 23 #ifdef __i386__ 24 static void setbase(struct segment_descriptor *desc, unsigned long base) 25 #elif defined __amd64__ 26 static void setbase(struct user_segment_descriptor *desc, unsigned long base) 27 #endif 28 { 29 desc->sd_lobase = base & 0xffffff; 30 desc->sd_hibase = base >> 24; 31 } 32 33 #ifdef __i386__ 34 static void setlimit(struct segment_descriptor *desc, unsigned long limit) 35 #elif defined __amd64__ 36 static void setlimit(struct user_segment_descriptor *desc, unsigned long limit) 37 #endif 38 { 39 desc->sd_lolimit = limit & 0xffff; 40 desc->sd_hilimit = limit >> 16; 41 } 42 43 /* 44 #ifdef __amd64__ 45 union descriptor { 46 struct user_segment_descriptor sd; 47 struct gate_descriptor gd; 48 }; 49 #endif 50 */ 51 52 int vxemu_map(vxemu *emu, vxmmap *mm) 53 { 54 int s, sel; 55 struct vxproc *vxp; 56 union descriptor desc; 57 58 vxp = emu->proc; 59 60 if (emu->ldt_base != (uintptr_t)mm->base || emu->ldt_size != mm->size) { 61 // Set up the process's data segment selector (for DS,ES,SS). 62 memset(&desc, 0, sizeof(desc)); 63 setbase(&desc.sd, (unsigned long)mm->base); 64 setlimit(&desc.sd, (mm->size - 1) >> VXPAGESHIFT); 65 desc.sd.sd_type = SDT_MEMRWA; 66 desc.sd.sd_dpl = 3; 67 desc.sd.sd_p = 1; 68 desc.sd.sd_def32 = 1; 69 desc.sd.sd_gran = 1; 70 if(emu->datasel == 0){ 71 #ifdef __i386__ 72 if ((s = i386_set_ldt(LDT_AUTO_ALLOC, &desc, 1)) < 0) 73 #elif defined __amd64__ 74 if ((s = sysarch(I386_SET_GSBASE, &desc)) < 0) 75 #endif 76 return -1; 77 emu->datasel = (s<<3) + 4 + 3; // 4=LDT, 3=RPL 78 #ifdef __i386__ 79 }else if (i386_set_ldt(emu->datasel >> 3, &desc, 1) < 0) 80 #elif defined __amd64__ 81 }else if (sysarch(I386_SET_GSBASE, &desc) < 0) 82 #endif 83 return -1; 84 85 // Set up the process's vxemu segment selector (for FS). 86 setbase(&desc.sd, (unsigned long)emu); 87 setlimit(&desc.sd, (VXCODEBUFSIZE - 1) >> VXPAGESHIFT); 88 if(emu->emusel == 0){ 89 #ifdef __i386__ 90 if ((s = i386_set_ldt(LDT_AUTO_ALLOC, &desc, 1)) < 0) 91 #elif defined __amd64__ 92 if ((s = sysarch(I386_SET_GSBASE, &desc)) < 0) 93 #endif 94 return -1; 95 emu->emusel = (s<<3) + 4 + 3; // 4=LDT, 3=RPL 96 #ifdef __i386__ 97 }else if (i386_set_ldt(emu->emusel >> 3, &desc, 1) < 0) 98 #elif defined __amd64__ 99 }else if (sysarch(I386_SET_GSBASE, &desc) < 0) 100 #endif 101 return -1; 102 103 emu->ldt_base = (uintptr_t)mm->base; 104 emu->ldt_size = mm->size; 105 } 106 107 #ifdef __amd64__ 108 /* 109 // Set up 32-bit mode code and data segments (not vxproc-specific), 110 // giving access to the full low 32-bit of linear address space. 111 // The code segment is necessary to get into 32-bit compatibility mode; 112 // the data segment is needed because Linux for x86-64 113 // doesn't give 64-bit processes a "real" data segment by default 114 // but instead just loads zero into the data segment selectors! 115 emu->runptr.sel = FLATCODE; 116 setbase(&desc.sd, 0); 117 setlimit(&desc.sd, 0xfffff); 118 if ((s = sysarch(I386_SET_GSBASE, &desc)) < 0) 119 return -1; 120 121 desc.entry_number = FLATDATA / 8; 122 desc.contents = MODIFY_LDT_CONTENTS_DATA; 123 if (modify_ldt(1, &desc, sizeof(desc)) < 0) 124 return -1; 125 126 // Set up a far return vector in emu->retptr 127 // for getting back into 64-bit long mode. 128 extern void vxrun_return(); 129 asm volatile("movw %%cs,%0" : "=r" (emu->retptr.sel)); 130 emu->retptr.ofs = (uint32_t)(intptr_t)vxrun_return; 131 */ 132 #endif 133 134 return 0; 135 } 136 137 static void dumpmcontext(mcontext_t *ctx, uint32_t cr2) 138 { 139 #ifdef i386 140 vxprint( 141 "eax %08x ebx %08x ecx %08x edx %08x\n" 142 "esi %08x edi %08x ebp %08x esp %08x\n" 143 "eip %08x efl %08x cs %04x\n" 144 "err %08x trapno %08x cr2 %08x\n", 145 ctx->mc_eax, ctx->mc_ebx, ctx->mc_ecx, ctx->mc_edx, 146 ctx->mc_esi, ctx->mc_edi, ctx->mc_ebp, ctx->mc_esp, 147 ctx->mc_eip, ctx->mc_eflags, ctx->mc_cs, 148 ctx->mc_err, ctx->mc_trapno, cr2); 149 #else 150 vxprint( 151 "rax %016lx rbx %016lx\nrcx %016lx rdx %016lx\n" 152 "rsi %016lx rdi %016lx\nrbp %016lx rsp %016lx\n" 153 "r8 %016lx r9 %016lx\nr10 %016lx r11 %016lx\n" 154 "r12 %016lx r13 %016lx\nr14 %016lx r15 %016lx\n" 155 "rip %016lx efl %016lx cs %04x ss %04x\n" 156 "err %016lx trapno %016lx cr2 %016lx\n", 157 ctx->mc_rax, ctx->mc_rbx, ctx->mc_rcx, ctx->mc_rdx, 158 ctx->mc_rsi, ctx->mc_rdi, ctx->mc_rbp, ctx->mc_rsp, 159 ctx->mc_r8, ctx->mc_r9, ctx->mc_r10, ctx->mc_r11, 160 ctx->mc_r12, ctx->mc_r13, ctx->mc_r14, ctx->mc_r15, 161 ctx->mc_rip, ctx->mc_rflags, ctx->mc_cs, ctx->mc_ss, 162 ctx->mc_err, ctx->mc_trapno, cr2); 163 #endif 164 } 165 166 static void 167 fprestore(int *state, int fmt) 168 { 169 #ifdef __i386__ 170 if(fmt == _MC_FPFMT_387) 171 asm volatile("frstor 0(%%eax); fwait\n" : : "a" (state) : "memory"); 172 else 173 #endif 174 if(fmt == _MC_FPFMT_XMM){ 175 /* Have to 16-align the 512-byte state */ 176 char buf[512+16], *p; 177 p = buf; 178 if((long)p&15) 179 p += 16 - (long)p&15; 180 memmove(p, state, 512); 181 #ifdef __i386__ 182 asm volatile("fxrstor 0(%%eax); fwait\n" : : "a" (p) : "memory"); 183 #elif defined(__amd64__) 184 asm volatile("fxrstor 0(%%rax); fwait\n" : : "a" (p) : "memory"); 185 #endif 186 }else 187 abort(); 188 } 189 190 int vx32_sighandler(int signo, siginfo_t *si, void *v) 191 { 192 int r; 193 uint32_t magic; 194 uint16_t vs, oldvs; 195 vxproc *vxp; 196 vxemu *emu; 197 ucontext_t *uc; 198 mcontext_t *mc; 199 200 uc = v; 201 mc = &uc->uc_mcontext; 202 203 // We can't be sure that vxemu is running, 204 // and thus that %VSEG is actually mapped to a 205 // valid vxemu. The only way to tell is to look at %VSEG. 206 207 // First sanity check vxproc segment number. 208 // FreeBSD reset the register before entering the handler! 209 #ifdef __i386__ 210 asm("movw %"VSEGSTR",%0" 211 : "=r" (oldvs)); 212 vs = mc->mc_vs & 0xFFFF; /* mc_vs #defined in os.h */ 213 #elif defined(__amd64__) 214 if (sysarch(I386_GET_GSBASE, &vs) < 0) 215 return 0; 216 #endif 217 218 #ifdef __i386__ 219 if(0) vxprint("vx32_sighandler signo=%d eip=%#x esp=%#x vs=%#x currentvs=%#x\n", 220 signo, mc->mc_eip, mc->mc_esp, vs, oldvs); 221 #elif defined(__amd64__) 222 if(0) vxprint("vx32_sighandler signo=%d rip=%#x rsp=%#x vs=%#x currentvs=%#x\n", 223 signo, mc->mc_rip, mc->mc_rsp, vs, oldvs); 224 #endif 225 226 if ((vs & 7) != 7) // LDT, RPL=3 227 return 0; 228 229 // Okay, assume mapped; check for vxemu by reading 230 // first word from vs. Have to put vs into the segment 231 // register and then take it back out. 232 asm("movw %"VSEGSTR",%1\n" 233 "movw %2,%"VSEGSTR"\n" 234 "movl %"VSEGSTR":%3,%0\n" 235 "movw %1,%"VSEGSTR"\n" 236 : "=r" (magic), "=r" (oldvs) 237 : "r" (vs), "m" (((vxemu*)0)->magic)); 238 if (magic != VXEMU_MAGIC) 239 return 0; 240 241 // Okay, we're convinced. 242 243 // Find current vxproc and vxemu. 244 #ifdef __i386__ 245 asm("movw %"VSEGSTR",%1\n" 246 "movw %2,%"VSEGSTR"\n" 247 "movl %"VSEGSTR":%3,%0\n" 248 "movw %1,%"VSEGSTR"\n" 249 : "=r" (vxp), "=r" (oldvs) 250 : "r" (vs), "m" (((vxemu*)0)->proc)); 251 #elif defined(__amd64__) 252 asm("movw %"VSEGSTR",%1\n" 253 "movw %2,%"VSEGSTR"\n" 254 "movw %"VSEGSTR":%3,%0\n" 255 "movw %1,%"VSEGSTR"\n" 256 : "=r" (vxp), "=r" (oldvs) 257 : "r" (vs), "m" (((vxemu*)0)->proc)); 258 #endif 259 emu = vxp->emu; 260 261 // Get back our regular host segment register state, 262 // so that thread-local storage and such works. 263 vxrun_cleanup(emu); 264 265 // dumpmcontext(mc, (uint32_t)si->si_addr); 266 267 uint32_t addr; 268 int newtrap; 269 addr = 0; 270 switch(signo){ 271 case SIGSEGV: 272 newtrap = VXTRAP_PAGEFAULT; 273 #ifdef __i386__ 274 addr = (uint32_t)si->si_addr; 275 #elif defined(__amd64__) 276 addr = (uint64_t)si->si_addr; 277 #endif 278 break; 279 case SIGBUS: 280 /* 281 * On FreeBSD, SIGBUS means segmentation limit fault. 282 * The supplied address is bogus. 283 */ 284 newtrap = VXTRAP_PAGEFAULT; 285 addr = 0; 286 break; 287 288 case SIGFPE: 289 // vxprint("fpe %d\n", si->si_code); 290 newtrap = VXTRAP_FLOAT; 291 addr = 0; 292 break; 293 294 case SIGVTALRM: 295 newtrap = VXTRAP_IRQ + VXIRQ_TIMER; 296 addr = 0; 297 break; 298 299 case SIGTRAP: 300 // FreeBSD sends SIGTRAP when it gets a processor 301 // debug exception, which is caused by single-stepping 302 // with the TF bit, among other things. 303 // It appears that FreeBSD does not turn the flag back on 304 // before entering the signal handler. 305 addr = 0; 306 newtrap = VXTRAP_SINGLESTEP; 307 #ifdef __i386__ 308 mc->mc_eflags &= ~EFLAGS_TF; // Just in case. 309 #elif defined(__amd64__) 310 mc->mc_rflags &= ~EFLAGS_TF; // Just in case. 311 #endif 312 break; 313 314 default: 315 newtrap = VXTRAP_SIGNAL + signo; 316 break; 317 } 318 319 int replaced_trap = 0; 320 if (emu->cpu_trap) { 321 // There's already a pending trap! 322 // Handle the new trap, and assume that when it 323 // finishes, restarting the code at cpu.eip will trigger 324 // the old trap again. 325 // Have to fix up eip for int 0x30 and syscall instructions. 326 if (emu->cpu_trap == VXTRAP_SYSCALL || 327 (emu->cpu_trap&VXTRAP_CATEGORY) == VXTRAP_SOFT) 328 emu->cpu.eip -= 2; 329 replaced_trap = emu->cpu_trap; 330 } 331 emu->cpu_trap = newtrap; 332 333 #ifdef __i386__ 334 r = vxemu_sighandler(emu, mc->mc_eip); 335 #elif defined(__amd64__) 336 r = vxemu_sighandler(emu, mc->mc_rip); 337 #endif 338 339 if (r == VXSIG_SINGLESTEP){ 340 // Vxemu_sighandler wants us to single step. 341 // Execution state is in intermediate state - don't touch. 342 #ifdef __i386__ 343 mc->mc_eflags |= EFLAGS_TF; // x86 TF (single-step) bit 344 #elif defined(__amd64__) 345 mc->mc_rflags |= EFLAGS_TF; 346 #endif 347 vxrun_setup(emu); 348 return 1; 349 } 350 351 // Copy execution state into emu. 352 if ((r & VXSIG_SAVE_ALL) == VXSIG_SAVE_ALL) { 353 #ifdef __i386__ 354 emu->cpu.reg[EAX] = mc->mc_eax; 355 emu->cpu.reg[EBX] = mc->mc_ebx; 356 emu->cpu.reg[ECX] = mc->mc_ecx; 357 emu->cpu.reg[EDX] = mc->mc_edx; 358 emu->cpu.reg[ESI] = mc->mc_esi; 359 emu->cpu.reg[EDI] = mc->mc_edi; 360 emu->cpu.reg[ESP] = mc->mc_esp; // or esp_at_signal ??? 361 emu->cpu.reg[EBP] = mc->mc_ebp; 362 emu->cpu.eflags = mc->mc_eflags; 363 #elif defined(__amd64__) 364 emu->cpu.reg[EAX] = mc->mc_rax; 365 emu->cpu.reg[EBX] = mc->mc_rbx; 366 emu->cpu.reg[ECX] = mc->mc_rcx; 367 emu->cpu.reg[EDX] = mc->mc_rdx; 368 emu->cpu.reg[ESI] = mc->mc_rsi; 369 emu->cpu.reg[EDI] = mc->mc_rdi; 370 emu->cpu.reg[ESP] = mc->mc_rsp; // or esp_at_signal ??? 371 emu->cpu.reg[EBP] = mc->mc_rbp; 372 emu->cpu.eflags = mc->mc_rflags; 373 #endif 374 } else if (r & VXSIG_SAVE_ALL) { 375 if (r & VXSIG_SAVE_EAX) 376 #ifdef __i386__ 377 emu->cpu.reg[EAX] = mc->mc_eax; 378 #elif defined(__amd64__) 379 emu->cpu.reg[EAX] = mc->mc_rax; 380 #endif 381 if (r & VXSIG_SAVE_EBX) 382 #ifdef __i386__ 383 emu->cpu.reg[EBX] = mc->mc_ebx; 384 #elif defined(__amd64__) 385 emu->cpu.reg[EBX] = mc->mc_rbx; 386 #endif 387 if (r & VXSIG_SAVE_ECX) 388 #ifdef __i386__ 389 emu->cpu.reg[ECX] = mc->mc_ecx; 390 #elif defined(__amd64__) 391 emu->cpu.reg[ECX] = mc->mc_rcx; 392 #endif 393 if (r & VXSIG_SAVE_EDX) 394 #ifdef __i386__ 395 emu->cpu.reg[EDX] = mc->mc_edx; 396 #elif defined(__amd64__) 397 emu->cpu.reg[EDX] = mc->mc_rdx; 398 #endif 399 if (r & VXSIG_SAVE_ESI) 400 #ifdef __i386__ 401 emu->cpu.reg[ESI] = mc->mc_esi; 402 #elif defined(__amd64__) 403 emu->cpu.reg[ESI] = mc->mc_rsi; 404 #endif 405 if (r & VXSIG_SAVE_EDI) 406 #ifdef __i386__ 407 emu->cpu.reg[EDI] = mc->mc_edi; 408 #elif defined(__amd64__) 409 emu->cpu.reg[EDI] = mc->mc_rdi; 410 #endif 411 if (r & VXSIG_SAVE_ESP) 412 #ifdef __i386__ 413 emu->cpu.reg[ESP] = mc->mc_esp; // or esp_at_signal ??? 414 #elif defined(__amd64__) 415 emu->cpu.reg[ESP] = mc->mc_rsp; // or esp_at_signal ??? 416 #endif 417 if (r & VXSIG_SAVE_EBP) 418 #ifdef __i386__ 419 emu->cpu.reg[EBP] = mc->mc_ebp; 420 #elif defined(__amd64__) 421 emu->cpu.reg[EBP] = mc->mc_rbp; 422 #endif 423 if (r & VXSIG_SAVE_EFLAGS) 424 #ifdef __i386__ 425 emu->cpu.eflags = mc->mc_eflags; 426 #elif defined(__amd64__) 427 emu->cpu.eflags = mc->mc_rflags; 428 #endif 429 } 430 r &= ~VXSIG_SAVE_ALL; 431 432 if (r & VXSIG_SAVE_EBX_AS_EIP) 433 #ifdef __i386__ 434 emu->cpu.eip = mc->mc_ebx; 435 #elif defined(__amd64__) 436 emu->cpu.eip = mc->mc_rbx; 437 #endif 438 r &= ~VXSIG_SAVE_EBX_AS_EIP; 439 440 if (r & VXSIG_ADD_COUNT_TO_ESP) { 441 emu->cpu.reg[ESP] += (uint16_t)(r >> VXSIG_COUNT_SHIFT); 442 r &= ~VXSIG_ADD_COUNT_TO_ESP; 443 r &= ~(0xFFFF << VXSIG_COUNT_SHIFT); 444 } 445 446 if (r & VXSIG_INC_ECX) { 447 emu->cpu.reg[ECX]++; 448 r &= ~VXSIG_INC_ECX; 449 } 450 451 if (r == VXSIG_TRAP) { 452 if (emu->trapenv == NULL) 453 return 0; 454 emu->cpu.traperr = mc->mc_err; 455 emu->cpu.trapva = addr; 456 #ifdef __i386__ 457 memmove(&mc->mc_gs, &emu->trapenv->mc_gs, 19*4); 458 #elif defined(__amd64__) 459 memmove(&mc->mc_onstack, &emu->trapenv->mc_onstack, sizeof(mcontext_t)); 460 #endif 461 return 1; 462 } 463 464 // The signal handler is confused; so are we. 465 return 0; 466 } 467