Changeset 189293 in webkit
- Timestamp:
- Sep 3, 2015, 3:16:23 PM (10 years ago)
- Location:
- trunk/Source/JavaScriptCore
- Files:
-
- 21 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/ChangeLog
r189288 r189293 1 2015-09-03 Basile Clement <basile_clement@apple.com> and Michael Saboff <msaboff@apple.com> 2 3 Clean up register naming 4 https://bugs.webkit.org/show_bug.cgi?id=148658 5 6 Reviewed by Geoffrey Garen. 7 8 This changes register naming conventions in the llint and baseline JIT 9 in order to use as few (native) callee-save registers as possible on 10 64-bits platforms. It also introduces significant changes in the way 11 registers names are defined in the LLint and baseline JIT in order to 12 enable a simpler convention about which registers can be aliased. That 13 convention is valid across all architecture, and described in 14 llint/LowLevelInterpreter.asm. 15 16 Callee save registers are now called out regCS<n> (in the JIT) or 17 csr<n> (in the LLInt) with a common numbering across all tiers. Some 18 registers are unused in some tiers. 19 20 As a part of this change, rdi was removed from the list of temporary 21 registers for X86-64 Windows as it is a callee saves register. This 22 reduced the number of temporary registers for X86-64 Windows. 23 24 This is in preparation for properly handling callee save register 25 preservation and restoration. 26 27 * dfg/DFGJITCompiler.cpp: 28 (JSC::DFG::JITCompiler::compileFunction): 29 * ftl/FTLLink.cpp: 30 (JSC::FTL::link): 31 * jit/FPRInfo.h: 32 (JSC::FPRInfo::toRegister): 33 (JSC::FPRInfo::toIndex): 34 * jit/GPRInfo.h: 35 (JSC::GPRInfo::toIndex): 36 (JSC::GPRInfo::toRegister): 37 (JSC::GPRInfo::debugName): Deleted. 38 * jit/JIT.cpp: 39 (JSC::JIT::privateCompile): 40 * jit/JITArithmetic.cpp: 41 (JSC::JIT::emit_op_mod): 42 * jit/JITOpcodes.cpp: 43 (JSC::JIT::emitSlow_op_loop_hint): 44 * jit/JITOpcodes32_64.cpp: 45 (JSC::JIT::emit_op_end): 46 (JSC::JIT::emit_op_new_object): 47 * jit/RegisterPreservationWrapperGenerator.cpp: 48 (JSC::generateRegisterPreservationWrapper): 49 (JSC::generateRegisterRestoration): 50 * jit/ThunkGenerators.cpp: 51 (JSC::arityFixupGenerator): 52 (JSC::nativeForGenerator): Deleted. 53 * llint/LowLevelInterpreter.asm: 54 * llint/LowLevelInterpreter32_64.asm: 55 * llint/LowLevelInterpreter64.asm: 56 * offlineasm/arm.rb: 57 * offlineasm/arm64.rb: 58 * offlineasm/cloop.rb: 59 * offlineasm/mips.rb: 60 * offlineasm/registers.rb: 61 * offlineasm/sh4.rb: 62 * offlineasm/x86.rb: 63 1 64 2015-09-03 Filip Pizlo <fpizlo@apple.com> 2 65 -
trunk/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
r187505 r189293 396 396 if (maxFrameExtentForSlowPathCall) 397 397 addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); 398 branchTest32(Zero, GPRInfo::re gT0).linkTo(fromArityCheck, this);398 branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this); 399 399 emitStoreCodeOrigin(CodeOrigin(0)); 400 GPRReg thunkReg; 401 #if USE(JSVALUE64) 402 thunkReg = GPRInfo::regT7; 403 #else 404 thunkReg = GPRInfo::regT5; 405 #endif 400 GPRReg thunkReg = GPRInfo::argumentGPR1; 406 401 CodeLocationLabel* arityThunkLabels = 407 402 m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters()); 408 403 move(TrustedImmPtr(arityThunkLabels), thunkReg); 409 loadPtr(BaseIndex(thunkReg, GPRInfo::regT0, timesPtr()), thunkReg); 404 loadPtr(BaseIndex(thunkReg, GPRInfo::returnValueGPR, timesPtr()), thunkReg); 405 move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0); 410 406 m_callArityFixup = call(); 411 407 jump(fromArityCheck); -
trunk/Source/JavaScriptCore/ftl/FTLLink.cpp
r188932 r189293 166 166 jit.jitAssertIsNull(GPRInfo::regT1); 167 167 #endif 168 jit.move(GPRInfo::returnValueGPR, GPRInfo:: regT0);168 jit.move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0); 169 169 jit.emitFunctionEpilogue(); 170 mainPathJumps.append(jit.branchTest32(CCallHelpers::Zero, GPRInfo:: regT0));170 mainPathJumps.append(jit.branchTest32(CCallHelpers::Zero, GPRInfo::argumentGPR0)); 171 171 jit.emitFunctionPrologue(); 172 172 CodeLocationLabel* arityThunkLabels = 173 173 vm.arityCheckFailReturnThunks->returnPCsFor(vm, codeBlock->numParameters()); 174 jit.move(CCallHelpers::TrustedImmPtr(arityThunkLabels), GPRInfo:: regT7);175 jit.loadPtr(CCallHelpers::BaseIndex(GPRInfo:: regT7, GPRInfo::regT0, CCallHelpers::timesPtr()), GPRInfo::regT7);174 jit.move(CCallHelpers::TrustedImmPtr(arityThunkLabels), GPRInfo::argumentGPR1); 175 jit.loadPtr(CCallHelpers::BaseIndex(GPRInfo::argumentGPR1, GPRInfo::argumentGPR0, CCallHelpers::timesPtr()), GPRInfo::argumentGPR1); 176 176 CCallHelpers::Call callArityFixup = jit.call(); 177 177 jit.emitFunctionEpilogue(); -
trunk/Source/JavaScriptCore/jit/FPRInfo.h
r165431 r189293 269 269 public: 270 270 typedef FPRReg RegisterType; 271 static const unsigned numberOfRegisters = 6;271 static const unsigned numberOfRegisters = 7; 272 272 273 273 // Temporary registers. 274 274 static const FPRReg fpRegT0 = MIPSRegisters::f0; 275 static const FPRReg fpRegT1 = MIPSRegisters::f4; 276 static const FPRReg fpRegT2 = MIPSRegisters::f6; 277 static const FPRReg fpRegT3 = MIPSRegisters::f8; 278 static const FPRReg fpRegT4 = MIPSRegisters::f10; 279 static const FPRReg fpRegT5 = MIPSRegisters::f18; 275 static const FPRReg fpRegT1 = MIPSRegisters::f2; 276 static const FPRReg fpRegT2 = MIPSRegisters::f4; 277 static const FPRReg fpRegT3 = MIPSRegisters::f6; 278 static const FPRReg fpRegT4 = MIPSRegisters::f8; 279 static const FPRReg fpRegT5 = MIPSRegisters::f10; 280 static const FPRReg fpRegT6 = MIPSRegisters::f18; 280 281 281 282 static const FPRReg returnValueFPR = MIPSRegisters::f0; … … 287 288 { 288 289 static const FPRReg registerForIndex[numberOfRegisters] = { 289 fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5 };290 fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5, fpRegT6 }; 290 291 291 292 ASSERT(index < numberOfRegisters); … … 298 299 ASSERT(reg < 20); 299 300 static const unsigned indexForRegister[20] = { 300 0, InvalidIndex, InvalidIndex, InvalidIndex,301 1, InvalidIndex, 2, InvalidIndex,302 3, InvalidIndex, 4, InvalidIndex,301 0, InvalidIndex, 1, InvalidIndex, 302 2, InvalidIndex, 3, InvalidIndex, 303 4, InvalidIndex, 5, InvalidIndex, 303 304 InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, 304 InvalidIndex, InvalidIndex, 5, InvalidIndex,305 InvalidIndex, InvalidIndex, 6, InvalidIndex, 305 306 }; 306 307 unsigned result = indexForRegister[reg]; -
trunk/Source/JavaScriptCore/jit/GPRInfo.h
r181993 r189293 32 32 namespace JSC { 33 33 34 // We use the same conventions in the basline JIT as in the LLint. If you 35 // change mappings in the GPRInfo, you should change them in the offlineasm 36 // compiler adequately. The register naming conventions are described at the 37 // top of the LowLevelInterpreter.asm file. 38 34 39 typedef MacroAssembler::RegisterID GPRReg; 35 40 #define InvalidGPRReg ((::JSC::GPRReg)-1) … … 295 300 #endif // USE(JSVALUE32_64) 296 301 297 // The baseline JIT requires that regT3 be callee-preserved.298 299 302 #if CPU(X86) 300 303 #define NUMBER_OF_ARGUMENT_REGISTERS 0u … … 305 308 static const unsigned numberOfRegisters = 6; 306 309 static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS; 307 308 // Note: regT3 is required to be callee-preserved.309 310 310 311 // Temporary registers. … … 312 313 static const GPRReg regT1 = X86Registers::edx; 313 314 static const GPRReg regT2 = X86Registers::ecx; 314 static const GPRReg regT3 = X86Registers::ebx; 315 static const GPRReg regT4 = X86Registers::edi; 316 static const GPRReg regT5 = X86Registers::esi; 317 // These registers match the baseline JIT. 318 static const GPRReg cachedResultRegister = regT0; 319 static const GPRReg cachedResultRegister2 = regT1; 315 static const GPRReg regT3 = X86Registers::ebx; // Callee-save 316 static const GPRReg regT4 = X86Registers::esi; // Callee-save 317 static const GPRReg regT5 = X86Registers::edi; // Callee-save 320 318 static const GPRReg callFrameRegister = X86Registers::ebp; 321 319 // These constants provide the names for the general purpose argument & return value registers. 322 320 static const GPRReg argumentGPR0 = X86Registers::ecx; // regT2 323 321 static const GPRReg argumentGPR1 = X86Registers::edx; // regT1 322 static const GPRReg argumentGPR2 = X86Registers::eax; // regT0 323 static const GPRReg argumentGPR3 = X86Registers::ebx; // regT3 324 324 static const GPRReg nonArgGPR0 = X86Registers::esi; // regT4 325 static const GPRReg nonArgGPR1 = X86Registers::eax; // regT0 326 static const GPRReg nonArgGPR2 = X86Registers::ebx; // regT3 325 static const GPRReg nonArgGPR1 = X86Registers::edi; // regT5 327 326 static const GPRReg returnValueGPR = X86Registers::eax; // regT0 328 327 static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1 … … 346 345 ASSERT(reg != InvalidGPRReg); 347 346 ASSERT(static_cast<int>(reg) < 8); 348 static const unsigned indexForRegister[8] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 5, 4};347 static const unsigned indexForRegister[8] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 4, 5 }; 349 348 unsigned result = indexForRegister[reg]; 350 349 return result; … … 380 379 static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS; 381 380 382 // Note: regT3 is required to be callee-preserved.383 384 381 // These registers match the baseline JIT. 385 static const GPRReg cachedResultRegister = X86Registers::eax;386 382 static const GPRReg callFrameRegister = X86Registers::ebp; 387 383 static const GPRReg tagTypeNumberRegister = X86Registers::r14; … … 389 385 // Temporary registers. 390 386 static const GPRReg regT0 = X86Registers::eax; 387 #if !OS(WINDOWS) 388 static const GPRReg regT1 = X86Registers::esi; 389 static const GPRReg regT2 = X86Registers::edx; 390 static const GPRReg regT3 = X86Registers::ecx; 391 static const GPRReg regT4 = X86Registers::r8; 392 static const GPRReg regT5 = X86Registers::r10; 393 static const GPRReg regT6 = X86Registers::edi; 394 static const GPRReg regT7 = X86Registers::r9; 395 #else 391 396 static const GPRReg regT1 = X86Registers::edx; 392 static const GPRReg regT2 = X86Registers::ecx; 393 static const GPRReg regT3 = X86Registers::ebx; 394 static const GPRReg regT4 = X86Registers::edi; 395 static const GPRReg regT5 = X86Registers::esi; 396 static const GPRReg regT6 = X86Registers::r8; 397 static const GPRReg regT7 = X86Registers::r9; 398 static const GPRReg regT8 = X86Registers::r10; 399 static const GPRReg regT9 = X86Registers::r12; 400 static const GPRReg regT10 = X86Registers::r13; 397 static const GPRReg regT2 = X86Registers::r8; 398 static const GPRReg regT3 = X86Registers::r9; 399 static const GPRReg regT4 = X86Registers::r10; 400 static const GPRReg regT5 = X86Registers::ecx; 401 #endif 402 403 static const GPRReg regCS0 = X86Registers::ebx; 404 405 #if !OS(WINDOWS) 406 static const GPRReg regCS1 = X86Registers::r12; 407 static const GPRReg regCS2 = X86Registers::r13; 408 static const GPRReg regCS3 = X86Registers::r14; 409 static const GPRReg regCS4 = X86Registers::r15; 410 #else 411 static const GPRReg regCS1 = X86Registers::esi; 412 static const GPRReg regCS2 = X86Registers::edi; 413 static const GPRReg regCS3 = X86Registers::r12; 414 static const GPRReg regCS4 = X86Registers::r13; 415 static const GPRReg regCS5 = X86Registers::r14; 416 static const GPRReg regCS6 = X86Registers::r15; 417 #endif 418 401 419 // These constants provide the names for the general purpose argument & return value registers. 402 420 #if !OS(WINDOWS) 403 static const GPRReg argumentGPR0 = X86Registers::edi; // regT 4404 static const GPRReg argumentGPR1 = X86Registers::esi; // regT 5405 static const GPRReg argumentGPR2 = X86Registers::edx; // regT 1406 static const GPRReg argumentGPR3 = X86Registers::ecx; // regT 2407 static const GPRReg argumentGPR4 = X86Registers::r8; // regT6408 static const GPRReg argumentGPR5 = X86Registers::r9; 421 static const GPRReg argumentGPR0 = X86Registers::edi; // regT6 422 static const GPRReg argumentGPR1 = X86Registers::esi; // regT1 423 static const GPRReg argumentGPR2 = X86Registers::edx; // regT2 424 static const GPRReg argumentGPR3 = X86Registers::ecx; // regT3 425 static const GPRReg argumentGPR4 = X86Registers::r8; // regT4 426 static const GPRReg argumentGPR5 = X86Registers::r9; // regT7 409 427 #else 410 static const GPRReg argumentGPR0 = X86Registers::ecx; 411 static const GPRReg argumentGPR1 = X86Registers::edx; 412 static const GPRReg argumentGPR2 = X86Registers::r8; // regT6 413 static const GPRReg argumentGPR3 = X86Registers::r9; // regT7 414 #endif 415 static const GPRReg nonArgGPR0 = X86Registers::r10; // regT8 416 static const GPRReg nonArgGPR1 = X86Registers::ebx; // regT3 417 static const GPRReg nonArgGPR2 = X86Registers::r12; // regT9 428 static const GPRReg argumentGPR0 = X86Registers::ecx; // regT5 429 static const GPRReg argumentGPR1 = X86Registers::edx; // regT1 430 static const GPRReg argumentGPR2 = X86Registers::r8; // regT2 431 static const GPRReg argumentGPR3 = X86Registers::r9; // regT3 432 #endif 433 static const GPRReg nonArgGPR0 = X86Registers::r10; // regT5 (regT4 on Windows) 434 static const GPRReg nonArgGPR1 = X86Registers::ebx; // Callee save 418 435 static const GPRReg returnValueGPR = X86Registers::eax; // regT0 419 static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1 420 static const GPRReg nonPreservedNonReturnGPR = X86Registers:: esi;421 static const GPRReg nonPreservedNonArgumentGPR = X86Registers::r10; 436 static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1 or regT2 437 static const GPRReg nonPreservedNonReturnGPR = X86Registers::r10; // regT5 (regT4 on Windows) 438 static const GPRReg nonPreservedNonArgumentGPR = X86Registers::r10; // regT5 (regT4 on Windows) 422 439 static const GPRReg patchpointScratchRegister = MacroAssembler::scratchRegister; 423 440 … … 425 442 { 426 443 ASSERT(index < numberOfRegisters); 427 static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regT8, regT9, regT10 }; 444 #if !OS(WINDOWS) 445 static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regCS0, regCS1, regCS2 }; 446 #else 447 static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regCS0, regCS1, regCS2, regCS3, regCS4 }; 448 #endif 428 449 return registerForIndex[index]; 429 450 } … … 444 465 ASSERT(reg != InvalidGPRReg); 445 466 ASSERT(static_cast<int>(reg) < 16); 446 static const unsigned indexForRegister[16] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 5, 4, 6, 7, 8, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex }; 467 #if !OS(WINDOWS) 468 static const unsigned indexForRegister[16] = { 0, 3, 2, 8, InvalidIndex, InvalidIndex, 1, 6, 4, 7, 5, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex }; 469 #else 470 static const unsigned indexForRegister[16] = { 0, 5, 1, 6, InvalidIndex, InvalidIndex, 7, 8, 2, 3, 4, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex }; 471 #endif 447 472 return indexForRegister[reg]; 448 473 } … … 475 500 static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS; 476 501 477 // Note: regT3 is required to be callee-preserved.478 479 502 // Temporary registers. 480 503 static const GPRReg regT0 = ARMRegisters::r0; 481 504 static const GPRReg regT1 = ARMRegisters::r1; 482 505 static const GPRReg regT2 = ARMRegisters::r2; 483 static const GPRReg regT3 = ARMRegisters::r 4;506 static const GPRReg regT3 = ARMRegisters::r3; 484 507 static const GPRReg regT4 = ARMRegisters::r8; 485 508 static const GPRReg regT5 = ARMRegisters::r9; … … 490 513 static const GPRReg regT7 = ARMRegisters::r7; 491 514 #endif 492 static const GPRReg regT8 = ARMRegisters::r 3;515 static const GPRReg regT8 = ARMRegisters::r4; 493 516 // These registers match the baseline JIT. 494 static const GPRReg cachedResultRegister = regT0;495 static const GPRReg cachedResultRegister2 = regT1;496 517 static const GPRReg callFrameRegister = ARMRegisters::fp; 497 518 // These constants provide the names for the general purpose argument & return value registers. … … 499 520 static const GPRReg argumentGPR1 = ARMRegisters::r1; // regT1 500 521 static const GPRReg argumentGPR2 = ARMRegisters::r2; // regT2 501 static const GPRReg argumentGPR3 = ARMRegisters::r3; // regT 8502 static const GPRReg nonArgGPR0 = ARMRegisters::r4; // regT 3522 static const GPRReg argumentGPR3 = ARMRegisters::r3; // regT3 523 static const GPRReg nonArgGPR0 = ARMRegisters::r4; // regT8 503 524 static const GPRReg nonArgGPR1 = ARMRegisters::r8; // regT4 504 525 static const GPRReg nonArgGPR2 = ARMRegisters::r9; // regT5 505 526 static const GPRReg returnValueGPR = ARMRegisters::r0; // regT0 506 527 static const GPRReg returnValueGPR2 = ARMRegisters::r1; // regT1 507 static const GPRReg nonPreservedNonReturnGPR = ARMRegisters::r5; // regT7528 static const GPRReg nonPreservedNonReturnGPR = ARMRegisters::r5; 508 529 509 530 static GPRReg toRegister(unsigned index) … … 527 548 static const unsigned indexForRegister[16] = 528 549 #if CPU(ARM_THUMB2) 529 { 0, 1, 2, 8, 3, 9, InvalidIndex, InvalidIndex, 4, 5, 6, 7, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };550 { 0, 1, 2, 3, 8, InvalidIndex, InvalidIndex, InvalidIndex, 4, 5, 6, 7, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex }; 530 551 #else 531 { 0, 1, 2, 8, 3, 9, InvalidIndex, 7, 4, 5, 6, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };552 { 0, 1, 2, 3, 8, InvalidIndex, InvalidIndex, 7, 4, 5, 6, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex }; 532 553 #endif 533 554 unsigned result = indexForRegister[reg]; … … 562 583 static const unsigned numberOfArgumentRegisters = 8; 563 584 564 // Note: regT3 is required to be callee-preserved.565 566 585 // These registers match the baseline JIT. 567 static const GPRReg cachedResultRegister = ARM64Registers::x0;568 static const GPRReg timeoutCheckRegister = ARM64Registers::x26;569 586 static const GPRReg callFrameRegister = ARM64Registers::fp; 570 587 static const GPRReg tagTypeNumberRegister = ARM64Registers::x27; … … 574 591 static const GPRReg regT1 = ARM64Registers::x1; 575 592 static const GPRReg regT2 = ARM64Registers::x2; 576 static const GPRReg regT3 = ARM64Registers::x 23;577 static const GPRReg regT4 = ARM64Registers::x 5;578 static const GPRReg regT5 = ARM64Registers::x 24;593 static const GPRReg regT3 = ARM64Registers::x3; 594 static const GPRReg regT4 = ARM64Registers::x4; 595 static const GPRReg regT5 = ARM64Registers::x5; 579 596 static const GPRReg regT6 = ARM64Registers::x6; 580 597 static const GPRReg regT7 = ARM64Registers::x7; … … 587 604 static const GPRReg regT14 = ARM64Registers::x14; 588 605 static const GPRReg regT15 = ARM64Registers::x15; 606 static const GPRReg regCS0 = ARM64Registers::x26; // Used by LLInt only 607 static const GPRReg regCS1 = ARM64Registers::x27; // tagTypeNumber 608 static const GPRReg regCS2 = ARM64Registers::x28; // tagMask 589 609 // These constants provide the names for the general purpose argument & return value registers. 590 610 static const GPRReg argumentGPR0 = ARM64Registers::x0; // regT0 591 611 static const GPRReg argumentGPR1 = ARM64Registers::x1; // regT1 592 612 static const GPRReg argumentGPR2 = ARM64Registers::x2; // regT2 593 static const GPRReg argumentGPR3 = ARM64Registers::x3; 594 static const GPRReg argumentGPR4 = ARM64Registers::x4; 595 static const GPRReg argumentGPR5 = ARM64Registers::x5; // regT 4613 static const GPRReg argumentGPR3 = ARM64Registers::x3; // regT3 614 static const GPRReg argumentGPR4 = ARM64Registers::x4; // regT4 615 static const GPRReg argumentGPR5 = ARM64Registers::x5; // regT5 596 616 static const GPRReg argumentGPR6 = ARM64Registers::x6; // regT6 597 617 static const GPRReg argumentGPR7 = ARM64Registers::x7; // regT7 598 618 static const GPRReg nonArgGPR0 = ARM64Registers::x8; // regT8 599 619 static const GPRReg nonArgGPR1 = ARM64Registers::x9; // regT9 600 static const GPRReg nonArgGPR2 = ARM64Registers::x10; // regT10601 620 static const GPRReg returnValueGPR = ARM64Registers::x0; // regT0 602 621 static const GPRReg returnValueGPR2 = ARM64Registers::x1; // regT1 … … 664 683 public: 665 684 typedef GPRReg RegisterType; 666 static const unsigned numberOfRegisters = 7;685 static const unsigned numberOfRegisters = 8; 667 686 static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS; 668 687 669 688 // regT0 must be v0 for returning a 32-bit value. 670 689 // regT1 must be v1 for returning a pair of 32-bit value. 671 // regT3 must be saved in the callee, so use an S register.672 690 673 691 // Temporary registers. 674 692 static const GPRReg regT0 = MIPSRegisters::v0; 675 693 static const GPRReg regT1 = MIPSRegisters::v1; 676 static const GPRReg regT2 = MIPSRegisters::t4; 677 static const GPRReg regT3 = MIPSRegisters::s2; 678 static const GPRReg regT4 = MIPSRegisters::t5; 679 static const GPRReg regT5 = MIPSRegisters::t6; 680 static const GPRReg regT6 = MIPSRegisters::s0; 694 static const GPRReg regT2 = MIPSRegisters::t2; 695 static const GPRReg regT3 = MIPSRegisters::t3; 696 static const GPRReg regT4 = MIPSRegisters::t4; 697 static const GPRReg regT5 = MIPSRegisters::t5; 698 static const GPRReg regT6 = MIPSRegisters::t0; 699 static const GPRReg regT7 = MIPSRegisters::t1; 681 700 // These registers match the baseline JIT. 682 static const GPRReg cachedResultRegister = regT0;683 static const GPRReg cachedResultRegister2 = regT1;684 701 static const GPRReg callFrameRegister = MIPSRegisters::fp; 685 702 // These constants provide the names for the general purpose argument & return value registers. … … 688 705 static const GPRReg argumentGPR2 = MIPSRegisters::a2; 689 706 static const GPRReg argumentGPR3 = MIPSRegisters::a3; 690 static const GPRReg nonArgGPR0 = regT2; 691 static const GPRReg nonArgGPR1 = regT3; 692 static const GPRReg nonArgGPR2 = regT4; 707 static const GPRReg nonArgGPR0 = regT0; 708 static const GPRReg nonArgGPR1 = regT1; 693 709 static const GPRReg returnValueGPR = regT0; 694 710 static const GPRReg returnValueGPR2 = regT1; 695 static const GPRReg nonPreservedNonReturnGPR = regT 5;711 static const GPRReg nonPreservedNonReturnGPR = regT2; 696 712 697 713 static GPRReg toRegister(unsigned index) 698 714 { 699 715 ASSERT(index < numberOfRegisters); 700 static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6 };716 static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7 }; 701 717 return registerForIndex[index]; 702 718 } … … 708 724 static const unsigned indexForRegister[24] = { 709 725 InvalidIndex, InvalidIndex, 0, 1, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, 710 InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, 2, 4, 5, InvalidIndex,711 6, InvalidIndex, 3, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex726 6, 7, 2, 3, 4, 5, InvalidIndex, InvalidIndex, 727 InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex 712 728 }; 713 729 unsigned result = indexForRegister[reg]; … … 746 762 static const GPRReg regT0 = SH4Registers::r0; 747 763 static const GPRReg regT1 = SH4Registers::r1; 748 static const GPRReg regT2 = SH4Registers::r 2;749 static const GPRReg regT3 = SH4Registers::r 10;750 static const GPRReg regT4 = SH4Registers::r 4;751 static const GPRReg regT5 = SH4Registers::r 5;752 static const GPRReg regT6 = SH4Registers::r 6;753 static const GPRReg regT7 = SH4Registers::r 7;764 static const GPRReg regT2 = SH4Registers::r6; 765 static const GPRReg regT3 = SH4Registers::r7; 766 static const GPRReg regT4 = SH4Registers::r2; 767 static const GPRReg regT5 = SH4Registers::r3; 768 static const GPRReg regT6 = SH4Registers::r4; 769 static const GPRReg regT7 = SH4Registers::r5; 754 770 static const GPRReg regT8 = SH4Registers::r8; 755 771 static const GPRReg regT9 = SH4Registers::r9; … … 759 775 static const GPRReg callFrameRegister = SH4Registers::fp; 760 776 // These constants provide the names for the general purpose argument & return value registers. 761 static const GPRReg argumentGPR0 = regT4; 762 static const GPRReg argumentGPR1 = regT5; 763 static const GPRReg argumentGPR2 = regT6; 764 static const GPRReg argumentGPR3 = regT7; 765 static const GPRReg nonArgGPR0 = regT3; 766 static const GPRReg nonArgGPR1 = regT8; 767 static const GPRReg nonArgGPR2 = regT9; 777 static const GPRReg argumentGPR0 = SH4Registers::r4; // regT6 778 static const GPRReg argumentGPR1 = SH4Registers::r5; // regT7 779 static const GPRReg argumentGPR2 = SH4Registers::r6; // regT2 780 static const GPRReg argumentGPR3 = SH4Registers::r7; // regT3 781 static const GPRReg nonArgGPR0 = regT4; 782 static const GPRReg nonArgGPR1 = regT5; 768 783 static const GPRReg returnValueGPR = regT0; 769 784 static const GPRReg returnValueGPR2 = regT1; … … 781 796 ASSERT(reg != InvalidGPRReg); 782 797 ASSERT(reg < 14); 783 static const unsigned indexForRegister[14] = { 0, 1, 2, InvalidIndex, 4, 5, 6, 7, 8, 9, 3, InvalidIndex, InvalidIndex, InvalidIndex };798 static const unsigned indexForRegister[14] = { 0, 1, 4, 5, 6, 7, 2, 3, 8, 9, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex }; 784 799 unsigned result = indexForRegister[reg]; 785 800 return result; -
trunk/Source/JavaScriptCore/jit/JIT.cpp
r189288 r189293 573 573 if (maxFrameExtentForSlowPathCall) 574 574 addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); 575 if (returnValueGPR != regT0) 576 move(returnValueGPR, regT0); 577 branchTest32(Zero, regT0).linkTo(beginLabel, this); 578 GPRReg thunkReg; 579 #if USE(JSVALUE64) 580 thunkReg = GPRInfo::regT7; 581 #else 582 thunkReg = GPRInfo::regT5; 583 #endif 575 branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this); 576 GPRReg thunkReg = GPRInfo::argumentGPR1; 584 577 CodeLocationLabel* failThunkLabels = 585 578 m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters()); 586 579 move(TrustedImmPtr(failThunkLabels), thunkReg); 587 loadPtr(BaseIndex(thunkReg, regT0, timesPtr()), thunkReg); 580 loadPtr(BaseIndex(thunkReg, returnValueGPR, timesPtr()), thunkReg); 581 move(returnValueGPR, GPRInfo::argumentGPR0); 588 582 emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code()); 589 583 -
trunk/Source/JavaScriptCore/jit/JITArithmetic.cpp
r168776 r189293 612 612 613 613 // Make sure registers are correct for x86 IDIV instructions. 614 #if CPU(X86) 615 auto edx = regT1; 616 auto ecx = regT2; 617 #elif OS(WINDOWS) 618 auto edx = regT1; 619 auto ecx = regT5; 620 #else 621 auto edx = regT2; 622 auto ecx = regT3; 623 #endif 614 624 ASSERT(regT0 == X86Registers::eax); 615 ASSERT( regT1== X86Registers::edx);616 ASSERT( regT2== X86Registers::ecx);617 618 emitGetVirtualRegisters(op1, regT 3, op2, regT2);619 emitJumpSlowCaseIfNotImmediateInteger(regT 3);620 emitJumpSlowCaseIfNotImmediateInteger( regT2);621 622 move(regT 3, regT0);623 addSlowCase(branchTest32(Zero, regT2));624 Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));625 ASSERT(edx == X86Registers::edx); 626 ASSERT(ecx == X86Registers::ecx); 627 628 emitGetVirtualRegisters(op1, regT4, op2, ecx); 629 emitJumpSlowCaseIfNotImmediateInteger(regT4); 630 emitJumpSlowCaseIfNotImmediateInteger(ecx); 631 632 move(regT4, regT0); 633 addSlowCase(branchTest32(Zero, ecx)); 634 Jump denominatorNotNeg1 = branch32(NotEqual, ecx, TrustedImm32(-1)); 625 635 addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1))); 626 636 denominatorNotNeg1.link(this); 627 637 m_assembler.cdq(); 628 m_assembler.idivl_r( regT2);629 Jump numeratorPositive = branch32(GreaterThanOrEqual, regT 3, TrustedImm32(0));630 addSlowCase(branchTest32(Zero, regT1));638 m_assembler.idivl_r(ecx); 639 Jump numeratorPositive = branch32(GreaterThanOrEqual, regT4, TrustedImm32(0)); 640 addSlowCase(branchTest32(Zero, edx)); 631 641 numeratorPositive.link(this); 632 emitFastArithReTagImmediate( regT1, regT0);642 emitFastArithReTagImmediate(edx, regT0); 633 643 emitPutVirtualRegister(result); 634 644 } -
trunk/Source/JavaScriptCore/jit/JITOpcodes.cpp
r189288 r189293 929 929 Jump noOptimizedEntry = branchTestPtr(Zero, returnValueGPR); 930 930 if (!ASSERT_DISABLED) { 931 Jump ok = branchPtr(MacroAssembler::Above, re gT0, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000))));931 Jump ok = branchPtr(MacroAssembler::Above, returnValueGPR, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000)))); 932 932 abortWithReason(JITUnreasonableLoopHintJumpTarget); 933 933 ok.link(this); -
trunk/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
r189288 r189293 148 148 { 149 149 ASSERT(returnValueGPR != callFrameRegister); 150 emitLoad(currentInstruction[1].u.operand, regT1, re gT0);150 emitLoad(currentInstruction[1].u.operand, regT1, returnValueGPR); 151 151 emitFunctionEpilogue(); 152 152 ret(); … … 165 165 MarkedAllocator* allocator = &m_vm->heap.allocatorForObjectWithoutDestructor(allocationSize); 166 166 167 RegisterID resultReg = re gT0;167 RegisterID resultReg = returnValueGPR; 168 168 RegisterID allocatorReg = regT1; 169 RegisterID scratchReg = regT 2;169 RegisterID scratchReg = regT3; 170 170 171 171 move(TrustedImmPtr(allocator), allocatorReg); -
trunk/Source/JavaScriptCore/jit/RegisterPreservationWrapperGenerator.cpp
r170876 r189293 66 66 RegisterSet toSave = registersToPreserve(); 67 67 ptrdiff_t offset = registerPreservationOffset(); 68 69 ASSERT(!toSave.get(GPRInfo::regT1)); 70 ASSERT(!toSave.get(GPRInfo::regT2)); 71 ASSERT(!toSave.get(GPRInfo::regT3)); 68 72 69 73 AssemblyHelpers jit(&vm, 0); … … 85 89 GPRInfo::regT2); 86 90 87 ASSERT(!toSave.get(GPRInfo::regT4)); 88 jit.move(AssemblyHelpers::stackPointerRegister, GPRInfo::regT4); 91 jit.move(AssemblyHelpers::stackPointerRegister, GPRInfo::regT3); 89 92 90 93 AssemblyHelpers::Label loop = jit.label(); 91 94 jit.sub32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); 92 jit.load64(AssemblyHelpers::Address(GPRInfo::regT 4, offset), GPRInfo::regT0);93 jit.store64(GPRInfo::regT0, GPRInfo::regT 4);94 jit.addPtr(AssemblyHelpers::TrustedImm32(sizeof(Register)), GPRInfo::regT 4);95 jit.load64(AssemblyHelpers::Address(GPRInfo::regT3, offset), GPRInfo::regT0); 96 jit.store64(GPRInfo::regT0, GPRInfo::regT3); 97 jit.addPtr(AssemblyHelpers::TrustedImm32(sizeof(Register)), GPRInfo::regT3); 95 98 jit.branchTest32(AssemblyHelpers::NonZero, GPRInfo::regT2).linkTo(loop, &jit); 96 99 97 // At this point regT 4+ offset points to where we save things.100 // At this point regT3 + offset points to where we save things. 98 101 ptrdiff_t currentOffset = 0; 99 jit.storePtr(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT 4, currentOffset));102 jit.storePtr(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT3, currentOffset)); 100 103 101 104 for (GPRReg gpr = AssemblyHelpers::firstRegister(); gpr <= AssemblyHelpers::lastRegister(); gpr = static_cast<GPRReg>(gpr + 1)) { … … 103 106 continue; 104 107 currentOffset += sizeof(Register); 105 jit.store64(gpr, AssemblyHelpers::Address(GPRInfo::regT 4, currentOffset));108 jit.store64(gpr, AssemblyHelpers::Address(GPRInfo::regT3, currentOffset)); 106 109 } 107 110 for (FPRReg fpr = AssemblyHelpers::firstFPRegister(); fpr <= AssemblyHelpers::lastFPRegister(); fpr = static_cast<FPRReg>(fpr + 1)) { … … 109 112 continue; 110 113 currentOffset += sizeof(Register); 111 jit.storeDouble(fpr, AssemblyHelpers::Address(GPRInfo::regT 4, currentOffset));114 jit.storeDouble(fpr, AssemblyHelpers::Address(GPRInfo::regT3, currentOffset)); 112 115 } 113 116 … … 152 155 ptrdiff_t offset = registerPreservationOffset(); 153 156 154 ASSERT(!toSave.get(GPRInfo::regT4)); 157 ASSERT(!toSave.get(GPRInfo::regT1)); 158 ASSERT(!toSave.get(GPRInfo::regT2)); 159 ASSERT(!toSave.get(GPRInfo::regT3)); 155 160 156 161 // We need to place the stack pointer back to where the caller thought they left it. … … 162 167 AssemblyHelpers::stackPointerRegister, 163 168 (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) + PayloadOffset), 164 GPRInfo::regT 4);165 166 jit.move(GPRInfo::regT 4, GPRInfo::regT2);169 GPRInfo::regT3); 170 171 jit.move(GPRInfo::regT3, GPRInfo::regT2); 167 172 jit.lshift32(AssemblyHelpers::TrustedImm32(3), GPRInfo::regT2); 168 173 … … 204 209 // Thunks like this rely on the ArgumentCount being intact. Pay it forward. 205 210 jit.store32( 206 GPRInfo::regT 4,211 GPRInfo::regT3, 207 212 AssemblyHelpers::Address( 208 213 AssemblyHelpers::stackPointerRegister, -
trunk/Source/JavaScriptCore/jit/ThunkGenerators.cpp
r187819 r189293 256 256 257 257 #elif CPU(ARM64) 258 COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT1, prev_callframe_not_trampled_by_T1);259 COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT3, prev_callframe_not_trampled_by_T3);260 258 COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0); 261 259 COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1); … … 356 354 JSInterfaceJIT jit(vm); 357 355 358 // We enter with fixup count, in aligned stack units, in regT0 and the return thunk in359 // regT5 on 32-bit and regT7 on 64-bit.356 // We enter with fixup count, in aligned stack units, in argumentGPR0 and the return thunk in argumentGPR1 357 // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-) 360 358 #if USE(JSVALUE64) 359 #if OS(WINDOWS) 360 const GPRReg extraTemp = JSInterfaceJIT::regT0; 361 #else 362 const GPRReg extraTemp = JSInterfaceJIT::regT5; 363 #endif 361 364 # if CPU(X86_64) 362 365 jit.pop(JSInterfaceJIT::regT4); 363 366 # endif 364 jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT:: regT0);365 jit.neg64(JSInterfaceJIT:: regT0);366 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT 6);367 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT:: regT2);368 jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT:: regT2);369 370 // Move current frame down regT0 number of slots367 jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0); 368 jit.neg64(JSInterfaceJIT::argumentGPR0); 369 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3); 370 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2); 371 jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::argumentGPR2); 372 373 // Move current frame down argumentGPR0 number of slots 371 374 JSInterfaceJIT::Label copyLoop(jit.label()); 372 jit.load64(JSInterfaceJIT::regT 6, JSInterfaceJIT::regT1);373 jit.store64( JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));374 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT 6);375 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT:: regT2).linkTo(copyLoop, &jit);376 377 // Fill in regT0 - 1 missing arg slots with undefined378 jit.move(JSInterfaceJIT:: regT0, JSInterfaceJIT::regT2);379 jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1);380 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT:: regT2);375 jit.load64(JSInterfaceJIT::regT3, extraTemp); 376 jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight)); 377 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); 378 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit); 379 380 // Fill in argumentGPR0 - 1 missing arg slots with undefined 381 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2); 382 jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp); 383 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2); 381 384 JSInterfaceJIT::Label fillUndefinedLoop(jit.label()); 382 jit.store64( JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));383 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT 6);384 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT:: regT2).linkTo(fillUndefinedLoop, &jit);385 jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight)); 386 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); 387 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit); 385 388 386 389 // Adjust call frame register and stack pointer to account for missing args 387 jit.move(JSInterfaceJIT:: regT0, JSInterfaceJIT::regT1);388 jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1);389 jit.addPtr( JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);390 jit.addPtr( JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister);390 jit.move(JSInterfaceJIT::argumentGPR0, extraTemp); 391 jit.lshift64(JSInterfaceJIT::TrustedImm32(3), extraTemp); 392 jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister); 393 jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister); 391 394 392 395 // Save the original return PC. 393 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1);394 jit.storePtr( GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));395 396 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), extraTemp); 397 jit.storePtr(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight)); 398 396 399 // Install the new return PC. 397 jit.storePtr(GPRInfo:: regT7, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));400 jit.storePtr(GPRInfo::argumentGPR1, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset())); 398 401 399 402 # if CPU(X86_64) … … 405 408 jit.pop(JSInterfaceJIT::regT4); 406 409 # endif 407 jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT:: regT0);408 jit.neg32(JSInterfaceJIT:: regT0);410 jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0); 411 jit.neg32(JSInterfaceJIT::argumentGPR0); 409 412 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3); 410 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT:: regT2);411 jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT:: regT2);412 413 // Move current frame down regT0 number of slots413 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2); 414 jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::argumentGPR2); 415 416 // Move current frame down argumentGPR0 number of slots 414 417 JSInterfaceJIT::Label copyLoop(jit.label()); 415 jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT 1);416 jit.store32(JSInterfaceJIT::regT 1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));417 jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT 1);418 jit.store32(JSInterfaceJIT::regT 1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));418 jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT5); 419 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight)); 420 jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT5); 421 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, 4)); 419 422 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); 420 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT:: regT2).linkTo(copyLoop, &jit);421 422 // Fill in regT0 - 1 missing arg slots with undefined423 jit.move(JSInterfaceJIT:: regT0, JSInterfaceJIT::regT2);424 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT:: regT2);423 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit); 424 425 // Fill in argumentGPR0 - 1 missing arg slots with undefined 426 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2); 427 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2); 425 428 JSInterfaceJIT::Label fillUndefinedLoop(jit.label()); 426 jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT 1);427 jit.store32(JSInterfaceJIT::regT 1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));428 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT 1);429 jit.store32(JSInterfaceJIT::regT 1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));429 jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5); 430 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight)); 431 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5); 432 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, 4)); 430 433 431 434 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); 432 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT:: regT2).linkTo(fillUndefinedLoop, &jit);435 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit); 433 436 434 437 // Adjust call frame register and stack pointer to account for missing args 435 jit.move(JSInterfaceJIT:: regT0, JSInterfaceJIT::regT1);436 jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT 1);437 jit.addPtr(JSInterfaceJIT::regT 1, JSInterfaceJIT::callFrameRegister);438 jit.addPtr(JSInterfaceJIT::regT 1, JSInterfaceJIT::stackPointerRegister);438 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::regT5); 439 jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT5); 440 jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister); 441 jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister); 439 442 440 443 // Save the original return PC. 441 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT 1);442 jit.storePtr(GPRInfo::regT 1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));444 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT5); 445 jit.storePtr(GPRInfo::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight)); 443 446 444 447 // Install the new return PC. 445 jit.storePtr(GPRInfo:: regT5, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));448 jit.storePtr(GPRInfo::argumentGPR1, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset())); 446 449 447 450 # if CPU(X86) -
trunk/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
r189279 r189293 21 21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 22 22 # THE POSSIBILITY OF SUCH DAMAGE. 23 24 # Crash course on the language that this is written in (which I just call 25 # "assembly" even though it's more than that): 26 # 27 # - Mostly gas-style operand ordering. The last operand tends to be the 28 # destination. So "a := b" is written as "mov b, a". But unlike gas, 29 # comparisons are in-order, so "if (a < b)" is written as 30 # "bilt a, b, ...". 31 # 32 # - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer. 33 # For 32-bit, "i" and "p" are interchangeable except when an op supports one 34 # but not the other. 35 # 36 # - In general, valid operands for macro invocations and instructions are 37 # registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses 38 # (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels 39 # (eg "_foo" or ".foo"). Macro invocations can also take anonymous 40 # macros as operands. Instructions cannot take anonymous macros. 41 # 42 # - Labels must have names that begin with either "_" or ".". A "." label 43 # is local and gets renamed before code gen to minimize namespace 44 # pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_" 45 # may or may not be removed during code gen depending on whether the asm 46 # conventions for C name mangling on the target platform mandate a "_" 47 # prefix. 48 # 49 # - A "macro" is a lambda expression, which may be either anonymous or 50 # named. But this has caveats. "macro" can take zero or more arguments, 51 # which may be macros or any valid operands, but it can only return 52 # code. But you can do Turing-complete things via continuation passing 53 # style: "macro foo (a, b) b(a, a) end foo(foo, foo)". Actually, don't do 54 # that, since you'll just crash the assembler. 55 # 56 # - An "if" is a conditional on settings. Any identifier supplied in the 57 # predicate of an "if" is assumed to be a #define that is available 58 # during code gen. So you can't use "if" for computation in a macro, but 59 # you can use it to select different pieces of code for different 60 # platforms. 61 # 62 # - Arguments to macros follow lexical scoping rather than dynamic scoping. 63 # Const's also follow lexical scoping and may override (hide) arguments 64 # or other consts. All variables (arguments and constants) can be bound 65 # to operands. Additionally, arguments (but not constants) can be bound 66 # to macros. 67 68 # The following general-purpose registers are available: 69 # 70 # - cfr and sp hold the call frame and (native) stack pointer respectively. 71 # They are callee-save registers, and guaranteed to be distinct from all other 72 # registers on all architectures. 73 # 74 # - lr is defined on non-X86 architectures (ARM64, ARMv7, ARM, 75 # ARMv7_TRADITIONAL, MIPS, SH4 and CLOOP) and holds the return PC 76 # 77 # - pc holds the (native) program counter on 32-bits ARM architectures (ARM, 78 # ARMv7, ARMv7_TRADITIONAL) 79 # 80 # - t0, t1, t2, t3, t4 and optionally t5 are temporary registers that can get trashed on 81 # calls, and are pairwise distinct registers. t4 holds the JS program counter, so use 82 # with caution in opcodes (actually, don't use it in opcodes at all, except as PC). 83 # 84 # - r0 and r1 are the platform's customary return registers, and thus are 85 # two distinct registers 86 # 87 # - a0, a1, a2 and a3 are the platform's customary argument registers, and 88 # thus are pairwise distinct registers. Be mindful that: 89 # + On X86, there are no argument registers. a0 and a1 are edx and 90 # ecx following the fastcall convention, but you should still use the stack 91 # to pass your arguments. The cCall2 and cCall4 macros do this for you. 92 # + On X86_64_WIN, you should allocate space on the stack for the arguments, 93 # and the return convention is weird for > 8 bytes types. The only place we 94 # use > 8 bytes return values is on a cCall, and cCall2 and cCall4 handle 95 # this for you. 96 # 97 # - The only registers guaranteed to be caller-saved are r0, r1, a0, a1 and a2, and 98 # you should be mindful of that in functions that are called directly from C. 99 # If you need more registers, you should push and pop them like a good 100 # assembly citizen, because any other register will be callee-saved on X86. 101 # 102 # You can additionally assume: 103 # 104 # - a3, t2, t3, t4 and t5 are never return registers; t0, t1, a0, a1 and a2 105 # can be return registers. 106 # 107 # - t4 and t5 are never argument registers, t3 can only be a3, t1 can only be 108 # a1; but t0 and t2 can be either a0 or a2. 109 # 110 # - On 64 bits, csr0, csr1, csr2 and optionally csr3, csr4, csr5 and csr6 111 # are available as callee-save registers. 112 # csr0 is used to store the PC base, while the last two csr registers are used 113 # to store special tag values. Don't use them for anything else. 114 # 115 # Additional platform-specific details (you shouldn't rely on this remaining 116 # true): 117 # 118 # - For consistency with the baseline JIT, t0 is always r0 (and t1 is always 119 # r1 on 32 bits platforms). You should use the r version when you need return 120 # registers, and the t version otherwise: code using t0 (or t1) should still 121 # work if swapped with e.g. t3, while code using r0 (or r1) should not. There 122 # *may* be legacy code relying on this. 123 # 124 # - On all platforms other than X86, t0 can only be a0 and t2 can only be a2. 125 # 126 # - On all platforms other than X86 and X86_64, a2 is not a return register. 127 # a2 is r0 on X86 (because we have so few registers) and r1 on X86_64 (because 128 # the ABI enforces it). 129 # 130 # The following floating-point registers are available: 131 # 132 # - ft0-ft5 are temporary floating-point registers that get trashed on calls, 133 # and are pairwise distinct. 134 # 135 # - fa0 and fa1 are the platform's customary floating-point argument 136 # registers, and are both distinct. On 64-bits platforms, fa2 and fa3 are 137 # additional floating-point argument registers. 138 # 139 # - fr is the platform's customary floating-point return register 140 # 141 # You can assume that ft1-ft5 or fa1-fa3 are never fr, and that ftX is never 142 # faY if X != Y. 23 143 24 144 # First come the common protocols that both interpreters use. Note that each … … 108 228 # - Use a pair of registers to represent the PC: one register for the 109 229 # base of the bytecodes, and one register for the index. 110 # - The PC base (or PB for short) should be stored in the csr. It will 111 # get clobbered on calls to other JS code, but will get saved on calls 112 # to C functions. 230 # - The PC base (or PB for short) must be stored in a callee-save register. 113 231 # - C calls are still given the Instruction* rather than the PC index. 114 232 # This requires an add before the call, and a sub after. 115 const PC = t5 116 const PB = t6 117 const tagTypeNumber = csr1 118 const tagMask = csr2 119 233 const PC = t4 234 const PB = csr0 235 if ARM64 236 const tagTypeNumber = csr1 237 const tagMask = csr2 238 elsif X86_64 239 const tagTypeNumber = csr3 240 const tagMask = csr4 241 elsif X86_64_WIN 242 const tagTypeNumber = csr5 243 const tagMask = csr6 244 elsif C_LOOP 245 const tagTypeNumber = csr1 246 const tagMask = csr2 247 end 248 120 249 macro loadisFromInstruction(offset, dest) 121 250 loadis offset * 8[PB, PC, 8], dest … … 131 260 132 261 else 133 const PC = t 5262 const PC = t4 134 263 macro loadisFromInstruction(offset, dest) 135 264 loadis offset * 4[PC], dest … … 139 268 loadp offset * 4[PC], dest 140 269 end 270 end 271 272 if X86_64_WIN 273 const extraTempReg = t0 274 else 275 const extraTempReg = t5 141 276 end 142 277 … … 466 601 macro restoreStackPointerAfterCall() 467 602 loadp CodeBlock[cfr], t2 468 getFrameRegisterSizeForCodeBlock(t2, t 4)603 getFrameRegisterSizeForCodeBlock(t2, t2) 469 604 if ARMv7 470 subp cfr, t 4, t4471 move t 4, sp605 subp cfr, t2, t2 606 move t2, sp 472 607 else 473 subp cfr, t 4, sp608 subp cfr, t2, sp 474 609 end 475 610 end … … 495 630 callCallSlowPath( 496 631 slowPath, 497 macro (callee )498 btpz t1, .dontUpdateSP632 macro (callee, calleeFrame) 633 btpz calleeFrame, .dontUpdateSP 499 634 if ARMv7 500 addp CallerFrameAndPCSize, t1, t1501 move t1, sp635 addp CallerFrameAndPCSize, calleeFrame, calleeFrame 636 move calleeFrame, sp 502 637 else 503 addp CallerFrameAndPCSize, t1, sp638 addp CallerFrameAndPCSize, calleeFrame, sp 504 639 end 505 640 .dontUpdateSP: … … 597 732 baddis 5, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t1], .continue 598 733 if JSVALUE64 599 cCall2(osrSlowPath, cfr, PC) 734 move cfr, a0 735 move PC, a1 736 cCall2(osrSlowPath) 600 737 else 601 738 # We are after the function prologue, but before we have set up sp from the CodeBlock. 602 739 # Temporarily align stack pointer for this call. 603 740 subp 8, sp 604 cCall2(osrSlowPath, cfr, PC) 741 move cfr, a0 742 move PC, a1 743 cCall2(osrSlowPath) 605 744 addp 8, sp 606 745 end 607 btpz t0, .recover746 btpz r0, .recover 608 747 move cfr, sp # restore the previous sp 609 748 # pop the callerFrame since we will jump to a function that wants to save it … … 616 755 pop cfr 617 756 end 618 jmp t0757 jmp r0 619 758 .recover: 620 759 codeBlockGetter(t1) … … 641 780 subp maxFrameExtentForSlowPathCall, sp # Set up temporary stack pointer for call 642 781 callSlowPath(_llint_stack_check) 643 bpeq t1, 0, .stackHeightOKGetCodeBlock644 move t1, cfr782 bpeq r1, 0, .stackHeightOKGetCodeBlock 783 move r1, cfr 645 784 dispatch(0) # Go to exception handler in PC 646 785 … … 739 878 global _sanitizeStackForVMImpl 740 879 _sanitizeStackForVMImpl: 741 if X86_64 742 const vm = t4 743 const address = t1 744 const zeroValue = t0 745 elsif X86_64_WIN 746 const vm = t2 747 const address = t1 748 const zeroValue = t0 749 elsif X86 or X86_WIN 750 const vm = t2 751 const address = t1 752 const zeroValue = t0 753 else 754 const vm = a0 755 const address = t1 756 const zeroValue = t2 880 # We need three non-aliased caller-save registers. We are guaranteed 881 # this for a0, a1 and a2 on all architectures. 882 if X86 or X86_WIN 883 loadp 4[sp], a0 757 884 end 758 759 if X86 or X86_WIN 760 loadp 4[sp], vm 761 end 885 const vm = a0 886 const address = a1 887 const zeroValue = a2 762 888 763 889 loadp VM::m_lastStackTop[vm], address … … 778 904 global _vmEntryRecord 779 905 _vmEntryRecord: 780 if X86_64 781 const entryFrame = t4 782 const result = t0 783 elsif X86 or X86_WIN or X86_64_WIN 784 const entryFrame = t2 785 const result = t0 786 else 787 const entryFrame = a0 788 const result = t0 906 if X86 or X86_WIN 907 loadp 4[sp], a0 789 908 end 790 791 if X86 or X86_WIN 792 loadp 4[sp], entryFrame 793 end 794 795 vmEntryRecord(entryFrame, result) 909 910 vmEntryRecord(a0, r0) 796 911 ret 797 912 end … … 801 916 _llint_entry: 802 917 crash() 803 918 else 804 919 macro initPCRelative(pcBase) 805 if X86_64 or X86_64_WIN 920 if X86_64 or X86_64_WIN or X86 or X86_WIN 806 921 call _relativePCBase 807 922 _relativePCBase: 808 923 pop pcBase 809 elsif X86 or X86_WIN810 call _relativePCBase811 _relativePCBase:812 pop pcBase813 loadp 20[sp], t4814 924 elsif ARM64 815 925 elsif ARMv7 … … 832 942 end 833 943 944 # The PC base is in t1, as this is what _llint_entry leaves behind through 945 # initPCRelative(t1) 834 946 macro setEntryAddress(index, label) 835 if X86_64 836 leap (label - _relativePCBase)[t1], t0 837 move index, t2 838 storep t0, [t4, t2, 8] 839 elsif X86_64_WIN 840 leap (label - _relativePCBase)[t1], t0 947 if X86_64 or X86_64_WIN 948 leap (label - _relativePCBase)[t1], t3 841 949 move index, t4 842 storep t 0, [t2, t4, 8]950 storep t3, [a0, t4, 8] 843 951 elsif X86 or X86_WIN 844 leap (label - _relativePCBase)[t1], t 0845 move index, t 2846 storep t 0, [t4, t2, 4]952 leap (label - _relativePCBase)[t1], t3 953 move index, t4 954 storep t3, [a0, t4, 4] 847 955 elsif ARM64 848 956 pcrtoaddr label, t1 849 move index, t 2850 storep t1, [a0, t 2, 8]957 move index, t4 958 storep t1, [a0, t4, 8] 851 959 elsif ARM or ARMv7 or ARMv7_TRADITIONAL 852 mvlbl (label - _relativePCBase), t 2853 addp t 2, t1, t2960 mvlbl (label - _relativePCBase), t4 961 addp t4, t1, t4 854 962 move index, t3 855 storep t 2, [a0, t3, 4]963 storep t4, [a0, t3, 4] 856 964 elsif SH4 857 move (label - _relativePCBase), t 2858 addp t 2, t1, t2965 move (label - _relativePCBase), t4 966 addp t4, t1, t4 859 967 move index, t3 860 storep t 2, [a0, t3, 4]968 storep t4, [a0, t3, 4] 861 969 flushcp # Force constant pool flush to avoid "pcrel too far" link error. 862 970 elsif MIPS 863 la label, t 2971 la label, t4 864 972 la _relativePCBase, t3 865 subp t3, t 2866 addp t 2, t1, t2973 subp t3, t4 974 addp t4, t1, t4 867 975 move index, t3 868 storep t 2, [a0, t3, 4]976 storep t4, [a0, t3, 4] 869 977 end 870 978 end … … 875 983 functionPrologue() 876 984 pushCalleeSaves() 985 if X86 or X86_WIN 986 loadp 20[sp], a0 987 end 877 988 initPCRelative(t1) 878 989 … … 1214 1325 callSlowPath(_llint_slow_path_size_frame_for_varargs) 1215 1326 branchIfException(_llint_throw_from_slow_path_trampoline) 1216 # calleeFrame in t11327 # calleeFrame in r1 1217 1328 if JSVALUE64 1218 move t1, sp1329 move r1, sp 1219 1330 else 1220 1331 # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align 1221 1332 if ARMv7 1222 subp t1, CallerFrameAndPCSize, t21333 subp r1, CallerFrameAndPCSize, t2 1223 1334 move t2, sp 1224 1335 else 1225 subp t1, CallerFrameAndPCSize, sp1336 subp r1, CallerFrameAndPCSize, sp 1226 1337 end 1227 1338 end … … 1232 1343 callSlowPath(_llint_slow_path_size_frame_for_varargs) 1233 1344 branchIfException(_llint_throw_from_slow_path_trampoline) 1234 # calleeFrame in t11345 # calleeFrame in r1 1235 1346 if JSVALUE64 1236 move t1, sp1347 move r1, sp 1237 1348 else 1238 1349 # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align 1239 1350 if ARMv7 1240 subp t1, CallerFrameAndPCSize, t21351 subp r1, CallerFrameAndPCSize, t2 1241 1352 move t2, sp 1242 1353 else 1243 subp t1, CallerFrameAndPCSize, sp1354 subp r1, CallerFrameAndPCSize, sp 1244 1355 end 1245 1356 end -
trunk/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
r189279 r189293 23 23 24 24 25 # Crash course on the language that this is written in (which I just call26 # "assembly" even though it's more than that):27 #28 # - Mostly gas-style operand ordering. The last operand tends to be the29 # destination. So "a := b" is written as "mov b, a". But unlike gas,30 # comparisons are in-order, so "if (a < b)" is written as31 # "bilt a, b, ...".32 #33 # - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer.34 # Currently this is just 32-bit so "i" and "p" are interchangeable35 # except when an op supports one but not the other.36 #37 # - In general, valid operands for macro invocations and instructions are38 # registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses39 # (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels40 # (eg "_foo" or ".foo"). Macro invocations can also take anonymous41 # macros as operands. Instructions cannot take anonymous macros.42 #43 # - Labels must have names that begin with either "_" or ".". A "." label44 # is local and gets renamed before code gen to minimize namespace45 # pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"46 # may or may not be removed during code gen depending on whether the asm47 # conventions for C name mangling on the target platform mandate a "_"48 # prefix.49 #50 # - A "macro" is a lambda expression, which may be either anonymous or51 # named. But this has caveats. "macro" can take zero or more arguments,52 # which may be macros or any valid operands, but it can only return53 # code. But you can do Turing-complete things via continuation passing54 # style: "macro foo (a, b) b(a) end foo(foo, foo)". Actually, don't do55 # that, since you'll just crash the assembler.56 #57 # - An "if" is a conditional on settings. Any identifier supplied in the58 # predicate of an "if" is assumed to be a #define that is available59 # during code gen. So you can't use "if" for computation in a macro, but60 # you can use it to select different pieces of code for different61 # platforms.62 #63 # - Arguments to macros follow lexical scoping rather than dynamic scoping.64 # Const's also follow lexical scoping and may override (hide) arguments65 # or other consts. All variables (arguments and constants) can be bound66 # to operands. Additionally, arguments (but not constants) can be bound67 # to macros.68 69 70 # Below we have a bunch of constant declarations. Each constant must have71 # a corresponding ASSERT() in LLIntData.cpp.72 73 25 # Utilities 74 26 macro dispatch(advance) … … 90 42 macro dispatchAfterCall() 91 43 loadi ArgumentCount + TagOffset[cfr], PC 92 loadi 4[PC], t 293 storei t1, TagOffset[cfr, t2, 8]94 storei t0, PayloadOffset[cfr, t2, 8]95 valueProfile( t1, t0, 4 * (CallOpCodeSize - 1), t3)44 loadi 4[PC], t3 45 storei r1, TagOffset[cfr, t3, 8] 46 storei r0, PayloadOffset[cfr, t3, 8] 47 valueProfile(r1, r0, 4 * (CallOpCodeSize - 1), t3) 96 48 dispatch(CallOpCodeSize) 97 49 end 98 50 99 macro cCall2(function, arg1, arg2) 100 if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS 101 move arg1, a0 102 move arg2, a1 51 macro cCall2(function) 52 if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4 103 53 call function 104 54 elsif X86 or X86_WIN 105 55 subp 8, sp 106 push a rg2107 push a rg156 push a1 57 push a0 108 58 call function 109 59 addp 16, sp 110 elsif SH4111 setargs arg1, arg2112 call function113 60 elsif C_LOOP 114 cloopCallSlowPath function, a rg1, arg261 cloopCallSlowPath function, a0, a1 115 62 else 116 63 error … … 118 65 end 119 66 120 macro cCall2Void(function , arg1, arg2)67 macro cCall2Void(function) 121 68 if C_LOOP 122 cloopCallSlowPathVoid function, a rg1, arg269 cloopCallSlowPathVoid function, a0, a1 123 70 else 124 cCall2(function , arg1, arg2)71 cCall2(function) 125 72 end 126 73 end 127 74 128 # This barely works. arg3 and arg4 should probably be immediates. 129 macro cCall4(function, arg1, arg2, arg3, arg4) 130 if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS 131 move arg1, a0 132 move arg2, a1 133 move arg3, a2 134 move arg4, a3 75 macro cCall4(function) 76 if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4 135 77 call function 136 78 elsif X86 or X86_WIN 137 push a rg4138 push a rg3139 push a rg2140 push a rg179 push a3 80 push a2 81 push a1 82 push a0 141 83 call function 142 84 addp 16, sp 143 elsif SH4144 setargs arg1, arg2, arg3, arg4145 call function146 85 elsif C_LOOP 147 86 error … … 152 91 153 92 macro callSlowPath(slowPath) 154 cCall2(slowPath, cfr, PC) 155 move t0, PC 93 move cfr, a0 94 move PC, a1 95 cCall2(slowPath) 96 move r0, PC 156 97 end 157 98 158 99 macro doVMEntry(makeCall) 159 if X86 or X86_WIN160 const entry = t4161 const vm = t3162 const protoCallFrame = t5163 164 const temp1 = t0165 const temp2 = t1166 const temp3 = t2167 const temp4 = t3 # same as vm168 elsif ARM or ARMv7 or ARMv7_TRADITIONAL or C_LOOP169 const entry = a0170 const vm = a1171 const protoCallFrame = a2172 173 const temp1 = t3174 const temp2 = t4175 const temp3 = t5176 const temp4 = t4 # Same as temp2177 elsif MIPS178 const entry = a0179 const vm = a1180 const protoCallFrame = a2181 182 const temp1 = t3183 const temp2 = t5184 const temp3 = t4185 const temp4 = t6186 elsif SH4187 const entry = a0188 const vm = a1189 const protoCallFrame = a2190 191 const temp1 = t3192 const temp2 = a3193 const temp3 = t8194 const temp4 = t9195 end196 197 100 functionPrologue() 198 101 pushCalleeSaves() 199 102 103 # x86 needs to load arguments from the stack 200 104 if X86 or X86_WIN 201 loadp 12[cfr], vm 202 loadp 8[cfr], entry 105 loadp 16[cfr], a2 106 loadp 12[cfr], a1 107 loadp 8[cfr], a0 203 108 end 204 109 110 const entry = a0 111 const vm = a1 112 const protoCallFrame = a2 113 114 # We are using t3, t4 and t5 as temporaries through the function. 115 # Since we have the guarantee that tX != aY when X != Y, we are safe from 116 # aliasing problems with our arguments. 117 205 118 if ARMv7 206 vmEntryRecord(cfr, t emp1)207 move t emp1, sp119 vmEntryRecord(cfr, t3) 120 move t3, sp 208 121 else 209 122 vmEntryRecord(cfr, sp) … … 211 124 212 125 storep vm, VMEntryRecord::m_vm[sp] 213 loadp VM::topCallFrame[vm], t emp2214 storep t emp2, VMEntryRecord::m_prevTopCallFrame[sp]215 loadp VM::topVMEntryFrame[vm], t emp2216 storep t emp2, VMEntryRecord::m_prevTopVMEntryFrame[sp]126 loadp VM::topCallFrame[vm], t4 127 storep t4, VMEntryRecord::m_prevTopCallFrame[sp] 128 loadp VM::topVMEntryFrame[vm], t4 129 storep t4, VMEntryRecord::m_prevTopVMEntryFrame[sp] 217 130 218 131 # Align stack pointer 219 132 if X86_WIN 220 addp CallFrameAlignSlots * SlotSize, sp, t emp1221 andp ~StackAlignmentMask, t emp1222 subp t emp1, CallFrameAlignSlots * SlotSize, sp133 addp CallFrameAlignSlots * SlotSize, sp, t3 134 andp ~StackAlignmentMask, t3 135 subp t3, CallFrameAlignSlots * SlotSize, sp 223 136 elsif ARM or ARMv7 or ARMv7_TRADITIONAL 224 addp CallFrameAlignSlots * SlotSize, sp, t emp1225 clrbp t emp1, StackAlignmentMask, temp1137 addp CallFrameAlignSlots * SlotSize, sp, t3 138 clrbp t3, StackAlignmentMask, t3 226 139 if ARMv7 227 subp t emp1, CallFrameAlignSlots * SlotSize, temp1228 move t emp1, sp140 subp t3, CallFrameAlignSlots * SlotSize, t3 141 move t3, sp 229 142 else 230 subp t emp1, CallFrameAlignSlots * SlotSize, sp143 subp t3, CallFrameAlignSlots * SlotSize, sp 231 144 end 232 145 end 233 146 234 if X86 or X86_WIN 235 loadp 16[cfr], protoCallFrame 236 end 237 238 loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp2 239 addp CallFrameHeaderSlots, temp2, temp2 240 lshiftp 3, temp2 241 subp sp, temp2, temp1 147 loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t4 148 addp CallFrameHeaderSlots, t4, t4 149 lshiftp 3, t4 150 subp sp, t4, t3 242 151 243 152 # Ensure that we have enough additional stack capacity for the incoming args, 244 153 # and the frame for the JS code we're executing. We need to do this check 245 154 # before we start copying the args from the protoCallFrame below. 246 bpaeq t emp1, VM::m_jsStackLimit[vm], .stackHeightOK155 bpaeq t3, VM::m_jsStackLimit[vm], .stackHeightOK 247 156 248 157 if C_LOOP 249 move entry, t emp2250 move vm, t emp3251 cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, t emp1158 move entry, t4 159 move vm, t5 160 cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, t3 252 161 bpeq t0, 0, .stackCheckFailed 253 move t emp2, entry254 move t emp3, vm162 move t4, entry 163 move t5, vm 255 164 jmp .stackHeightOK 256 165 257 166 .stackCheckFailed: 258 move t emp2, entry259 move t emp3, vm167 move t4, entry 168 move t5, vm 260 169 end 261 170 262 171 subp 8, sp # Align stack for cCall2() to make a call. 263 cCall2(_llint_throw_stack_overflow_error, vm, protoCallFrame) 172 move vm, a0 173 move protoCallFrame, a1 174 cCall2(_llint_throw_stack_overflow_error) 264 175 265 176 if ARMv7 266 vmEntryRecord(cfr, t emp1)267 move t emp1, sp177 vmEntryRecord(cfr, t3) 178 move t3, sp 268 179 else 269 180 vmEntryRecord(cfr, sp) 270 181 end 271 182 272 loadp VMEntryRecord::m_vm[sp], t emp3273 loadp VMEntryRecord::m_prevTopCallFrame[sp], t emp4274 storep t emp4, VM::topCallFrame[temp3]275 loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t emp4276 storep t emp4, VM::topVMEntryFrame[temp3]183 loadp VMEntryRecord::m_vm[sp], t5 184 loadp VMEntryRecord::m_prevTopCallFrame[sp], t4 185 storep t4, VM::topCallFrame[t5] 186 loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t4 187 storep t4, VM::topVMEntryFrame[t5] 277 188 278 189 if ARMv7 279 subp cfr, CalleeRegisterSaveSize, t emp3280 move t emp3, sp190 subp cfr, CalleeRegisterSaveSize, t5 191 move t5, sp 281 192 else 282 193 subp cfr, CalleeRegisterSaveSize, sp … … 288 199 289 200 .stackHeightOK: 290 move t emp1, sp291 move 4, t emp1201 move t3, sp 202 move 4, t3 292 203 293 204 .copyHeaderLoop: 294 subi 1, t emp1295 loadi TagOffset[protoCallFrame, t emp1, 8], temp3296 storei t emp3, TagOffset + CodeBlock[sp, temp1, 8]297 loadi PayloadOffset[protoCallFrame, t emp1, 8], temp3298 storei t emp3, PayloadOffset + CodeBlock[sp, temp1, 8]299 btinz t emp1, .copyHeaderLoop300 301 loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], t emp2302 subi 1, t emp2303 loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t emp3304 subi 1, t emp3305 306 bieq t emp2, temp3, .copyArgs205 subi 1, t3 206 loadi TagOffset[protoCallFrame, t3, 8], t5 207 storei t5, TagOffset + CodeBlock[sp, t3, 8] 208 loadi PayloadOffset[protoCallFrame, t3, 8], t5 209 storei t5, PayloadOffset + CodeBlock[sp, t3, 8] 210 btinz t3, .copyHeaderLoop 211 212 loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], t4 213 subi 1, t4 214 loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t5 215 subi 1, t5 216 217 bieq t4, t5, .copyArgs 307 218 .fillExtraArgsLoop: 308 subi 1, t emp3309 storei UndefinedTag, ThisArgumentOffset + 8 + TagOffset[sp, t emp3, 8]310 storei 0, ThisArgumentOffset + 8 + PayloadOffset[sp, t emp3, 8]311 bineq t emp2, temp3, .fillExtraArgsLoop219 subi 1, t5 220 storei UndefinedTag, ThisArgumentOffset + 8 + TagOffset[sp, t5, 8] 221 storei 0, ThisArgumentOffset + 8 + PayloadOffset[sp, t5, 8] 222 bineq t4, t5, .fillExtraArgsLoop 312 223 313 224 .copyArgs: 314 loadp ProtoCallFrame::args[protoCallFrame], t emp1225 loadp ProtoCallFrame::args[protoCallFrame], t3 315 226 316 227 .copyArgsLoop: 317 btiz t emp2, .copyArgsDone318 subi 1, t emp2319 loadi TagOffset[t emp1, temp2, 8], temp3320 storei t emp3, ThisArgumentOffset + 8 + TagOffset[sp, temp2, 8]321 loadi PayloadOffset[t emp1, temp2, 8], temp3322 storei t emp3, ThisArgumentOffset + 8 + PayloadOffset[sp, temp2, 8]228 btiz t4, .copyArgsDone 229 subi 1, t4 230 loadi TagOffset[t3, t4, 8], t5 231 storei t5, ThisArgumentOffset + 8 + TagOffset[sp, t4, 8] 232 loadi PayloadOffset[t3, t4, 8], t5 233 storei t5, ThisArgumentOffset + 8 + PayloadOffset[sp, t4, 8] 323 234 jmp .copyArgsLoop 324 235 … … 327 238 storep cfr, VM::topVMEntryFrame[vm] 328 239 329 makeCall(entry, t emp1, temp2)240 makeCall(entry, t3, t4) 330 241 331 242 if ARMv7 332 vmEntryRecord(cfr, t emp1)333 move t emp1, sp243 vmEntryRecord(cfr, t3) 244 move t3, sp 334 245 else 335 246 vmEntryRecord(cfr, sp) 336 247 end 337 248 338 loadp VMEntryRecord::m_vm[sp], t emp3339 loadp VMEntryRecord::m_prevTopCallFrame[sp], t emp4340 storep t emp4, VM::topCallFrame[temp3]341 loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t emp4342 storep t emp4, VM::topVMEntryFrame[temp3]249 loadp VMEntryRecord::m_vm[sp], t5 250 loadp VMEntryRecord::m_prevTopCallFrame[sp], t4 251 storep t4, VM::topCallFrame[t5] 252 loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t4 253 storep t4, VM::topVMEntryFrame[t5] 343 254 344 255 if ARMv7 345 subp cfr, CalleeRegisterSaveSize, t emp3346 move t emp3, sp256 subp cfr, CalleeRegisterSaveSize, t5 257 move t5, sp 347 258 else 348 259 subp cfr, CalleeRegisterSaveSize, sp … … 356 267 macro makeJavaScriptCall(entry, temp, unused) 357 268 addp CallerFrameAndPCSize, sp 358 checkStackPointerAlignment(t 2, 0xbad0dc02)269 checkStackPointerAlignment(temp, 0xbad0dc02) 359 270 if C_LOOP 360 271 cloopCallJSFunction entry … … 362 273 call entry 363 274 end 364 checkStackPointerAlignment(t 2, 0xbad0dc03)275 checkStackPointerAlignment(temp, 0xbad0dc03) 365 276 subp CallerFrameAndPCSize, sp 366 277 end … … 377 288 move 0, temp2 378 289 move temp2, 4[sp] # put 0 in ReturnPC 379 move sp, t2 # t2is ecx290 move sp, a0 # a0 is ecx 380 291 push temp2 # Push dummy arg1 381 push t2292 push a0 382 293 call temp1 383 294 addp 8, sp … … 430 341 # in the instruction stream you'd like to print out. 431 342 macro traceOperand(fromWhere, operand) 432 cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand) 433 move t0, PC 434 move t1, cfr 343 move fromWhere, a2 344 move operand, a3 345 move cfr, a0 346 move PC, a1 347 cCall4(_llint_trace_operand) 348 move r0, PC 349 move r1, cfr 435 350 end 436 351 … … 439 354 # value. 440 355 macro traceValue(fromWhere, operand) 441 cCall4(_llint_trace_value, cfr, PC, fromWhere, operand) 442 move t0, PC 443 move t1, cfr 356 move fromWhere, a2 357 move operand, a3 358 move cfr, a0 359 move PC, a1 360 cCall4(_llint_trace_value) 361 move r0, PC 362 move r1, cfr 444 363 end 445 364 … … 447 366 macro callCallSlowPath(slowPath, action) 448 367 storep PC, ArgumentCount + TagOffset[cfr] 449 cCall2(slowPath, cfr, PC) 450 action(t0) 368 move cfr, a0 369 move PC, a1 370 cCall2(slowPath) 371 action(r0, r1) 451 372 end 452 373 453 374 macro callWatchdogTimerHandler(throwHandler) 454 375 storei PC, ArgumentCount + TagOffset[cfr] 455 cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC) 456 btpnz t0, throwHandler 376 move cfr, a0 377 move PC, a1 378 cCall2(_llint_slow_path_handle_watchdog_timer) 379 btpnz r0, throwHandler 457 380 loadi ArgumentCount + TagOffset[cfr], PC 458 381 end … … 463 386 macro () 464 387 storei PC, ArgumentCount + TagOffset[cfr] 465 cCall2(_llint_loop_osr, cfr, PC) 466 btpz t0, .recover 467 move t1, sp 468 jmp t0 388 move cfr, a0 389 move PC, a1 390 cCall2(_llint_loop_osr) 391 btpz r0, .recover 392 move r1, sp 393 jmp r0 469 394 .recover: 470 395 loadi ArgumentCount + TagOffset[cfr], PC … … 577 502 # We make two extra slots because cCall2 will poke. 578 503 subp 8, sp 579 cCall2Void(_llint_write_barrier_slow, cfr, t2) 504 move t2, a1 # t2 can be a0 on x86 505 move cfr, a0 506 cCall2Void(_llint_write_barrier_slow) 580 507 addp 8, sp 581 508 pop PC, cfr … … 611 538 # We make two extra slots because cCall2 will poke. 612 539 subp 8, sp 613 cCall2Void(_llint_write_barrier_slow, cfr, t3) 540 move cfr, a0 541 move t3, a1 542 cCall2Void(_llint_write_barrier_slow) 614 543 addp 8, sp 615 544 pop PC, cfr … … 650 579 loadi PayloadOffset + ArgumentCount[cfr], t0 651 580 biaeq t0, CodeBlock::m_numParameters[t1], doneLabel 652 cCall2(slowPath, cfr, PC) # This slowPath has a simple protocol: t0 = 0 => no error, t0 != 0 => error 653 btiz t0, .noError 654 move t1, cfr # t1 contains caller frame 581 move cfr, a0 582 move PC, a1 583 cCall2(slowPath) # This slowPath has a simple protocol: t0 = 0 => no error, t0 != 0 => error 584 btiz r0, .noError 585 move r1, cfr # r1 contains caller frame 655 586 jmp _llint_throw_from_slow_path_trampoline 656 587 657 588 .noError: 658 # t1 points to ArityCheckData.659 loadp CommonSlowPaths::ArityCheckData::thunkToCall[ t1], t2660 btpz t 2, .proceedInline589 # r1 points to ArityCheckData. 590 loadp CommonSlowPaths::ArityCheckData::thunkToCall[r1], t3 591 btpz t3, .proceedInline 661 592 662 loadp CommonSlowPaths::ArityCheckData:: returnPC[t1], t5663 loadp CommonSlowPaths::ArityCheckData:: paddedStackSpace[t1], t0664 call t 2593 loadp CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], a0 594 loadp CommonSlowPaths::ArityCheckData::returnPC[r1], a1 595 call t3 665 596 if ASSERT_ENABLED 666 597 loadp ReturnPC[cfr], t0 … … 670 601 671 602 .proceedInline: 672 loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[ t1], t1603 loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], t1 673 604 btiz t1, .continue 674 605 … … 752 683 loadi 8[PC], t0 753 684 loadp PayloadOffset[cfr, t0, 8], t0 754 loadp JSFunction::m_rareData[t0], t 4755 btpz t 4, .opCreateThisSlow756 loadp FunctionRareData::m_allocationProfile + ObjectAllocationProfile::m_allocator[t 4], t1757 loadp FunctionRareData::m_allocationProfile + ObjectAllocationProfile::m_structure[t 4], t2685 loadp JSFunction::m_rareData[t0], t5 686 btpz t5, .opCreateThisSlow 687 loadp FunctionRareData::m_allocationProfile + ObjectAllocationProfile::m_allocator[t5], t1 688 loadp FunctionRareData::m_allocationProfile + ObjectAllocationProfile::m_structure[t5], t2 758 689 btpz t1, .opCreateThisSlow 759 loadpFromInstruction(4, t 4)760 bpeq t 4, 1, .hasSeenMultipleCallee761 bpneq t 4, t0, .opCreateThisSlow690 loadpFromInstruction(4, t5) 691 bpeq t5, 1, .hasSeenMultipleCallee 692 bpneq t5, t0, .opCreateThisSlow 762 693 .hasSeenMultipleCallee: 763 694 allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow) … … 2007 1938 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t3 2008 1939 storep cfr, VM::topCallFrame[t3] 2009 move cfr, t2 # t2= ecx2010 storep t2, [sp]1940 move cfr, a0 # a0 = ecx 1941 storep a0, [sp] 2011 1942 loadi Callee + PayloadOffset[cfr], t1 2012 1943 loadp JSFunction::m_executable[t1], t1 … … 2023 1954 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1 2024 1955 storep cfr, VM::topCallFrame[t1] 2025 if MIPS or SH4 2026 move cfr, a0 2027 else 2028 move cfr, t0 2029 end 1956 move cfr, a0 2030 1957 loadi Callee + PayloadOffset[cfr], t1 2031 1958 loadp JSFunction::m_executable[t1], t1 … … 2270 2197 loadisFromInstruction(3, t1) 2271 2198 loadConstantOrVariable(t1, t2, t3) 2272 loadpFromInstruction(5, t 4)2273 btpz t 4, .noVariableWatchpointSet2274 notifyWrite(t 4, .pDynamic)2199 loadpFromInstruction(5, t5) 2200 btpz t5, .noVariableWatchpointSet 2201 notifyWrite(t5, .pDynamic) 2275 2202 .noVariableWatchpointSet: 2276 2203 loadisFromInstruction(6, t1) … … 2397 2324 loadp VM::m_typeProfilerLog[t1], t1 2398 2325 2399 # t0 is holding the payload, t 4is holding the tag.2326 # t0 is holding the payload, t5 is holding the tag. 2400 2327 loadisFromInstruction(1, t2) 2401 loadConstantOrVariable(t2, t 4, t0)2328 loadConstantOrVariable(t2, t5, t0) 2402 2329 2403 2330 bieq t4, EmptyValueTag, .opProfileTypeDone … … 2407 2334 2408 2335 # Store the JSValue onto the log entry. 2409 storei t 4, TypeProfilerLog::LogEntry::value + TagOffset[t2]2336 storei t5, TypeProfilerLog::LogEntry::value + TagOffset[t2] 2410 2337 storei t0, TypeProfilerLog::LogEntry::value + PayloadOffset[t2] 2411 2338 … … 2414 2341 storep t3, TypeProfilerLog::LogEntry::location[t2] 2415 2342 2416 bieq t 4, CellTag, .opProfileTypeIsCell2343 bieq t5, CellTag, .opProfileTypeIsCell 2417 2344 storei 0, TypeProfilerLog::LogEntry::structureID[t2] 2418 2345 jmp .opProfileTypeSkipIsCell -
trunk/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
r189279 r189293 47 47 loadp CodeBlock::m_instructions[PB], PB 48 48 loadisFromInstruction(1, t1) 49 storeq t0, [cfr, t1, 8]50 valueProfile( t0, (CallOpCodeSize - 1), t2)49 storeq r0, [cfr, t1, 8] 50 valueProfile(r0, (CallOpCodeSize - 1), t3) 51 51 dispatch(CallOpCodeSize) 52 52 end 53 53 54 macro cCall2(function , arg1, arg2)54 macro cCall2(function) 55 55 checkStackPointerAlignment(t4, 0xbad0c002) 56 if X86_64 57 move arg1, t4 58 move arg2, t5 56 if X86_64 or ARM64 59 57 call function 60 58 elsif X86_64_WIN … … 62 60 # See macro cCall2Void for an implementation when the return type <= 8 bytes. 63 61 # On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value. 64 # On entry rcx ( t2), should contain a pointer to this stack space. The other parameters are shifted to the right,65 # rdx ( t1) should contain the first argument, and r8 (t6) should contain the second argument.66 # On return, rax contains a pointer to this stack value, and we then need to copy the 16 byte return value into rax ( t0) and rdx (t1)62 # On entry rcx (a0), should contain a pointer to this stack space. The other parameters are shifted to the right, 63 # rdx (a1) should contain the first argument, and r8 (a2) should contain the second argument. 64 # On return, rax contains a pointer to this stack value, and we then need to copy the 16 byte return value into rax (r0) and rdx (r1) 67 65 # since the return value is expected to be split between the two. 68 66 # See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx 69 move a rg1, t170 move a rg2, t667 move a1, a2 68 move a0, a1 71 69 subp 48, sp 72 move sp, t273 addp 32, t270 move sp, a0 71 addp 32, a0 74 72 call function 75 73 addp 48, sp 76 move 8[t0], t1 77 move [t0], t0 78 elsif ARM64 79 move arg1, t0 80 move arg2, t1 81 call function 74 move 8[r0], r1 75 move [r0], r0 82 76 elsif C_LOOP 83 cloopCallSlowPath function, a rg1, arg277 cloopCallSlowPath function, a0, a1 84 78 else 85 79 error … … 87 81 end 88 82 89 macro cCall2Void(function , arg1, arg2)83 macro cCall2Void(function) 90 84 if C_LOOP 91 cloopCallSlowPathVoid function, a rg1, arg285 cloopCallSlowPathVoid function, a0, a1 92 86 elsif X86_64_WIN 93 87 # Note: we cannot use the cCall2 macro for Win64 in this case, … … 96 90 # We also need to make room on the stack for all four parameter registers. 97 91 # See http://msdn.microsoft.com/en-us/library/ms235286.aspx 98 move arg2, t199 move arg1, t2100 92 subp 32, sp 101 93 call function 102 94 addp 32, sp 103 95 else 104 cCall2(function , arg1, arg2)96 cCall2(function) 105 97 end 106 98 end 107 99 108 100 # This barely works. arg3 and arg4 should probably be immediates. 109 macro cCall4(function , arg1, arg2, arg3, arg4)101 macro cCall4(function) 110 102 checkStackPointerAlignment(t4, 0xbad0c004) 111 if X86_64 112 move arg1, t4 113 move arg2, t5 114 move arg3, t1 115 move arg4, t2 103 if X86_64 or ARM64 116 104 call function 117 105 elsif X86_64_WIN … … 119 107 # We also need to make room on the stack for all four parameter registers. 120 108 # See http://msdn.microsoft.com/en-us/library/ms235286.aspx 121 move arg1, t2 122 move arg2, t1 123 move arg3, t6 124 move arg4, t7 125 subp 32, sp 109 subp 64, sp 126 110 call function 127 addp 32, sp 128 elsif ARM64 129 move arg1, t0 130 move arg2, t1 131 move arg3, t2 132 move arg4, t3 133 call function 134 elsif C_LOOP 135 error 111 addp 64, sp 136 112 else 137 113 error … … 140 116 141 117 macro doVMEntry(makeCall) 142 if X86_64143 const entry = t4144 const vm = t5145 const protoCallFrame = t1146 147 const previousCFR = t0148 const previousPC = t6149 const temp1 = t0150 const temp2 = t3151 const temp3 = t6152 elsif X86_64_WIN153 const entry = t2154 const vm = t1155 const protoCallFrame = t6156 157 const previousCFR = t0158 const previousPC = t4159 const temp1 = t0160 const temp2 = t3161 const temp3 = t7162 elsif ARM64 or C_LOOP163 const entry = a0164 const vm = a1165 const protoCallFrame = a2166 167 const previousCFR = t5168 const previousPC = lr169 const temp1 = t3170 const temp2 = t4171 const temp3 = t6172 end173 174 118 functionPrologue() 175 119 pushCalleeSaves() 176 120 121 const entry = a0 122 const vm = a1 123 const protoCallFrame = a2 124 177 125 vmEntryRecord(cfr, sp) 178 126 179 checkStackPointerAlignment(t emp2, 0xbad0dc01)127 checkStackPointerAlignment(t4, 0xbad0dc01) 180 128 181 129 storep vm, VMEntryRecord::m_vm[sp] 182 loadp VM::topCallFrame[vm], t emp2183 storep t emp2, VMEntryRecord::m_prevTopCallFrame[sp]184 loadp VM::topVMEntryFrame[vm], t emp2185 storep t emp2, VMEntryRecord::m_prevTopVMEntryFrame[sp]186 187 loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t emp2188 addp CallFrameHeaderSlots, t emp2, temp2189 lshiftp 3, t emp2190 subp sp, t emp2, temp1130 loadp VM::topCallFrame[vm], t4 131 storep t4, VMEntryRecord::m_prevTopCallFrame[sp] 132 loadp VM::topVMEntryFrame[vm], t4 133 storep t4, VMEntryRecord::m_prevTopVMEntryFrame[sp] 134 135 loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t4 136 addp CallFrameHeaderSlots, t4, t4 137 lshiftp 3, t4 138 subp sp, t4, t3 191 139 192 140 # Ensure that we have enough additional stack capacity for the incoming args, 193 141 # and the frame for the JS code we're executing. We need to do this check 194 142 # before we start copying the args from the protoCallFrame below. 195 bpaeq t emp1, VM::m_jsStackLimit[vm], .stackHeightOK143 bpaeq t3, VM::m_jsStackLimit[vm], .stackHeightOK 196 144 197 145 if C_LOOP 198 move entry, t emp2199 move vm, t emp3200 cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, t emp1146 move entry, t4 147 move vm, t5 148 cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, t3 201 149 bpeq t0, 0, .stackCheckFailed 202 move t emp2, entry203 move t emp3, vm150 move t4, entry 151 move t5, vm 204 152 jmp .stackHeightOK 205 153 206 154 .stackCheckFailed: 207 move t emp2, entry208 move t emp3, vm155 move t4, entry 156 move t5, vm 209 157 end 210 158 211 cCall2(_llint_throw_stack_overflow_error, vm, protoCallFrame) 212 213 vmEntryRecord(cfr, temp2) 214 215 loadp VMEntryRecord::m_vm[temp2], vm 216 loadp VMEntryRecord::m_prevTopCallFrame[temp2], temp3 217 storep temp3, VM::topCallFrame[vm] 218 loadp VMEntryRecord::m_prevTopVMEntryFrame[temp2], temp3 219 storep temp3, VM::topVMEntryFrame[vm] 159 move vm, a0 160 move protoCallFrame, a1 161 cCall2(_llint_throw_stack_overflow_error) 162 163 vmEntryRecord(cfr, t4) 164 165 loadp VMEntryRecord::m_vm[t4], vm 166 loadp VMEntryRecord::m_prevTopCallFrame[t4], extraTempReg 167 storep extraTempReg, VM::topCallFrame[vm] 168 loadp VMEntryRecord::m_prevTopVMEntryFrame[t4], extraTempReg 169 storep extraTempReg, VM::topVMEntryFrame[vm] 220 170 221 171 subp cfr, CalleeRegisterSaveSize, sp … … 226 176 227 177 .stackHeightOK: 228 move t emp1, sp229 move 4, t emp1178 move t3, sp 179 move 4, t3 230 180 231 181 .copyHeaderLoop: 232 subi 1, t emp1233 loadq [protoCallFrame, t emp1, 8], temp3234 storeq temp3, CodeBlock[sp, temp1, 8]235 btinz t emp1, .copyHeaderLoop236 237 loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], t emp2238 subi 1, t emp2239 loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp3240 subi 1, temp3241 242 bieq t emp2, temp3, .copyArgs243 move ValueUndefined, t emp1182 subi 1, t3 183 loadq [protoCallFrame, t3, 8], extraTempReg 184 storeq extraTempReg, CodeBlock[sp, t3, 8] 185 btinz t3, .copyHeaderLoop 186 187 loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], t4 188 subi 1, t4 189 loadi ProtoCallFrame::paddedArgCount[protoCallFrame], extraTempReg 190 subi 1, extraTempReg 191 192 bieq t4, extraTempReg, .copyArgs 193 move ValueUndefined, t3 244 194 .fillExtraArgsLoop: 245 subi 1, temp3246 storeq t emp1, ThisArgumentOffset + 8[sp, temp3, 8]247 bineq t emp2, temp3, .fillExtraArgsLoop195 subi 1, extraTempReg 196 storeq t3, ThisArgumentOffset + 8[sp, extraTempReg, 8] 197 bineq t4, extraTempReg, .fillExtraArgsLoop 248 198 249 199 .copyArgs: 250 loadp ProtoCallFrame::args[protoCallFrame], t emp1200 loadp ProtoCallFrame::args[protoCallFrame], t3 251 201 252 202 .copyArgsLoop: 253 btiz t emp2, .copyArgsDone254 subi 1, t emp2255 loadq [t emp1, temp2, 8], temp3256 storeq temp3, ThisArgumentOffset + 8[sp, temp2, 8]203 btiz t4, .copyArgsDone 204 subi 1, t4 205 loadq [t3, t4, 8], extraTempReg 206 storeq extraTempReg, ThisArgumentOffset + 8[sp, t4, 8] 257 207 jmp .copyArgsLoop 258 208 259 209 .copyArgsDone: 260 210 if ARM64 261 move sp, t emp2262 storep t emp2, VM::topCallFrame[vm]211 move sp, t4 212 storep t4, VM::topCallFrame[vm] 263 213 else 264 214 storep sp, VM::topCallFrame[vm] … … 266 216 storep cfr, VM::topVMEntryFrame[vm] 267 217 268 move 0xffff000000000000, csr1 269 addp 2, csr1, csr2 270 271 checkStackPointerAlignment(temp3, 0xbad0dc02) 272 273 makeCall(entry, temp1) 274 275 checkStackPointerAlignment(temp3, 0xbad0dc03) 276 277 vmEntryRecord(cfr, temp2) 278 279 loadp VMEntryRecord::m_vm[temp2], vm 280 loadp VMEntryRecord::m_prevTopCallFrame[temp2], temp3 281 storep temp3, VM::topCallFrame[vm] 282 loadp VMEntryRecord::m_prevTopVMEntryFrame[temp2], temp3 283 storep temp3, VM::topVMEntryFrame[vm] 218 move TagTypeNumber, tagTypeNumber 219 addp TagBitTypeOther, tagTypeNumber, tagMask 220 221 checkStackPointerAlignment(extraTempReg, 0xbad0dc02) 222 223 makeCall(entry, t3) 224 225 # We may have just made a call into a JS function, so we can't rely on sp 226 # for anything but the fact that our own locals (ie the VMEntryRecord) are 227 # not below it. It also still has to be aligned, though. 228 checkStackPointerAlignment(t2, 0xbad0dc03) 229 230 vmEntryRecord(cfr, t4) 231 232 loadp VMEntryRecord::m_vm[t4], vm 233 loadp VMEntryRecord::m_prevTopCallFrame[t4], t2 234 storep t2, VM::topCallFrame[vm] 235 loadp VMEntryRecord::m_prevTopVMEntryFrame[t4], t2 236 storep t2, VM::topVMEntryFrame[vm] 284 237 285 238 subp cfr, CalleeRegisterSaveSize, sp … … 306 259 move entry, temp 307 260 storep cfr, [sp] 308 if X86_64 309 move sp, t4 310 elsif X86_64_WIN 311 move sp, t2 312 elsif ARM64 or C_LOOP 313 move sp, a0 314 end 261 move sp, a0 315 262 if C_LOOP 316 263 storep lr, 8[sp] … … 337 284 338 285 loadp VMEntryRecord::m_vm[t2], t3 339 loadp VMEntryRecord::m_prevTopCallFrame[t2], t5340 storep t5, VM::topCallFrame[t3]341 loadp VMEntryRecord::m_prevTopVMEntryFrame[t2], t5342 storep t5, VM::topVMEntryFrame[t3]286 loadp VMEntryRecord::m_prevTopCallFrame[t2], extraTempReg 287 storep extraTempReg, VM::topCallFrame[t3] 288 loadp VMEntryRecord::m_prevTopVMEntryFrame[t2], extraTempReg 289 storep extraTempReg, VM::topVMEntryFrame[t3] 343 290 344 291 subp cfr, CalleeRegisterSaveSize, sp … … 351 298 macro prepareStateForCCall() 352 299 leap [PB, PC, 8], PC 353 move PB, t3354 300 end 355 301 356 302 macro restoreStateAfterCCall() 357 move t0, PC 358 move t3, PB 303 move r0, PC 359 304 subp PB, PC 360 305 rshiftp 3, PC … … 363 308 macro callSlowPath(slowPath) 364 309 prepareStateForCCall() 365 cCall2(slowPath, cfr, PC) 310 move cfr, a0 311 move PC, a1 312 cCall2(slowPath) 366 313 restoreStateAfterCCall() 367 314 end … … 369 316 macro traceOperand(fromWhere, operand) 370 317 prepareStateForCCall() 371 cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand) 318 move fromWhere, a2 319 move operand, a3 320 move cfr, a0 321 move PC, a1 322 cCall4(_llint_trace_operand) 372 323 restoreStateAfterCCall() 373 324 end … … 375 326 macro traceValue(fromWhere, operand) 376 327 prepareStateForCCall() 377 cCall4(_llint_trace_value, cfr, PC, fromWhere, operand) 328 move fromWhere, a2 329 move operand, a3 330 move cfr, a0 331 move PC, a1 332 cCall4(_llint_trace_value) 378 333 restoreStateAfterCCall() 379 334 end … … 383 338 storei PC, ArgumentCount + TagOffset[cfr] 384 339 prepareStateForCCall() 385 cCall2(slowPath, cfr, PC) 386 action(t0) 340 move cfr, a0 341 move PC, a1 342 cCall2(slowPath) 343 action(r0, r1) 387 344 end 388 345 … … 390 347 storei PC, ArgumentCount + TagOffset[cfr] 391 348 prepareStateForCCall() 392 cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC) 393 btpnz t0, throwHandler 394 move t3, PB 349 move cfr, a0 350 move PC, a1 351 cCall2(_llint_slow_path_handle_watchdog_timer) 352 btpnz r0, throwHandler 395 353 loadi ArgumentCount + TagOffset[cfr], PC 396 354 end … … 402 360 storei PC, ArgumentCount + TagOffset[cfr] 403 361 prepareStateForCCall() 404 cCall2(_llint_loop_osr, cfr, PC) 405 btpz t0, .recover 406 move t1, sp 407 jmp t0 362 move cfr, a0 363 move PC, a1 364 cCall2(_llint_loop_osr) 365 btpz r0, .recover 366 move r1, sp 367 jmp r0 408 368 .recover: 409 move t3, PB410 369 loadi ArgumentCount + TagOffset[cfr], PC 411 370 end) … … 448 407 btbnz gcData, .writeBarrierDone 449 408 push PB, PC 450 cCall2Void(_llint_write_barrier_slow, cfr, t2) 409 move t2, a1 # t2 can be a0 (not on 64 bits, but better safe than sorry) 410 move cfr, a0 411 cCall2Void(_llint_write_barrier_slow) 451 412 pop PC, PB 452 413 end … … 478 439 btbnz gcData, .writeBarrierDone 479 440 push PB, PC 480 cCall2Void(_llint_write_barrier_slow, cfr, t3) 441 move cfr, a0 442 move t3, a1 443 cCall2Void(_llint_write_barrier_slow) 481 444 pop PC, PB 482 445 end … … 539 502 biaeq t0, CodeBlock::m_numParameters[t1], doneLabel 540 503 prepareStateForCCall() 541 cCall2(slowPath, cfr, PC) # This slowPath has the protocol: t0 = 0 => no error, t0 != 0 => error 542 btiz t0, .noError 543 move t1, cfr # t1 contains caller frame 504 move cfr, a0 505 move PC, a1 506 cCall2(slowPath) # This slowPath has the protocol: r0 = 0 => no error, r0 != 0 => error 507 btiz r0, .noError 508 move r1, cfr # r1 contains caller frame 544 509 jmp _llint_throw_from_slow_path_trampoline 545 510 546 511 .noError: 547 # t1 points to ArityCheckData.548 loadp CommonSlowPaths::ArityCheckData::thunkToCall[ t1], t2549 btpz t 2, .proceedInline512 # r1 points to ArityCheckData. 513 loadp CommonSlowPaths::ArityCheckData::thunkToCall[r1], t3 514 btpz t3, .proceedInline 550 515 551 loadp CommonSlowPaths::ArityCheckData:: returnPC[t1], t7552 loadp CommonSlowPaths::ArityCheckData:: paddedStackSpace[t1], t0553 call t 2516 loadp CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], a0 517 loadp CommonSlowPaths::ArityCheckData::returnPC[r1], a1 518 call t3 554 519 if ASSERT_ENABLED 555 520 loadp ReturnPC[cfr], t0 … … 559 524 560 525 .proceedInline: 561 loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[ t1], t1526 loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], t1 562 527 btiz t1, .continue 563 528 … … 605 570 606 571 # Instruction implementations 607 608 572 _llint_op_enter: 609 573 traceExecution() … … 637 601 loadisFromInstruction(2, t0) 638 602 loadp [cfr, t0, 8], t0 639 loadp JSFunction::m_rareData[t0], t 4640 btpz t 4, .opCreateThisSlow641 loadp FunctionRareData::m_allocationProfile + ObjectAllocationProfile::m_allocator[t 4], t1642 loadp FunctionRareData::m_allocationProfile + ObjectAllocationProfile::m_structure[t 4], t2603 loadp JSFunction::m_rareData[t0], t3 604 btpz t3, .opCreateThisSlow 605 loadp FunctionRareData::m_allocationProfile + ObjectAllocationProfile::m_allocator[t3], t1 606 loadp FunctionRareData::m_allocationProfile + ObjectAllocationProfile::m_structure[t3], t2 643 607 btpz t1, .opCreateThisSlow 644 loadpFromInstruction(4, t 4)645 bpeq t 4, 1, .hasSeenMultipleCallee646 bpneq t 4, t0, .opCreateThisSlow608 loadpFromInstruction(4, t3) 609 bpeq t3, 1, .hasSeenMultipleCallee 610 bpneq t3, t0, .opCreateThisSlow 647 611 .hasSeenMultipleCallee: 648 612 allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow) … … 1777 1741 checkSwitchToJITForEpilogue() 1778 1742 loadisFromInstruction(1, t2) 1779 loadConstantOrVariable(t2, t0)1743 loadConstantOrVariable(t2, r0) 1780 1744 doReturn() 1781 1745 … … 1840 1804 loadisFromInstruction(1, t0) 1841 1805 assertNotConstant(t0) 1842 loadq [cfr, t0, 8], t01806 loadq [cfr, t0, 8], r0 1843 1807 doReturn() 1844 1808 … … 1865 1829 functionPrologue() 1866 1830 storep 0, CodeBlock[cfr] 1867 if X86_64 or X86_64_WIN 1868 if X86_64 1869 const arg1 = t4 # t4 = rdi 1870 const arg2 = t5 # t5 = rsi 1871 const temp = t1 1872 elsif X86_64_WIN 1873 const arg1 = t2 # t2 = rcx 1874 const arg2 = t1 # t1 = rdx 1875 const temp = t0 1876 end 1877 loadp Callee[cfr], t0 1878 andp MarkedBlockMask, t0, t1 1879 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1 1880 storep cfr, VM::topCallFrame[t1] 1881 move cfr, arg1 1882 loadp Callee[cfr], arg2 1883 loadp JSFunction::m_executable[arg2], temp 1884 checkStackPointerAlignment(t3, 0xdead0001) 1831 loadp Callee[cfr], t0 1832 andp MarkedBlockMask, t0, t1 1833 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1 1834 storep cfr, VM::topCallFrame[t1] 1835 if ARM64 or C_LOOP 1836 storep lr, ReturnPC[cfr] 1837 end 1838 move cfr, a0 1839 loadp Callee[cfr], t1 1840 loadp JSFunction::m_executable[t1], t1 1841 checkStackPointerAlignment(t3, 0xdead0001) 1842 if C_LOOP 1843 cloopCallNative executableOffsetToFunction[t1] 1844 else 1885 1845 if X86_64_WIN 1886 1846 subp 32, sp 1887 1847 end 1888 call executableOffsetToFunction[t emp]1848 call executableOffsetToFunction[t1] 1889 1849 if X86_64_WIN 1890 1850 addp 32, sp 1891 1851 end 1892 loadp Callee[cfr], t31893 andp MarkedBlockMask, t31894 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t31895 elsif ARM64 or C_LOOP1896 loadp Callee[cfr], t01897 andp MarkedBlockMask, t0, t11898 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t11899 storep cfr, VM::topCallFrame[t1]1900 preserveReturnAddressAfterCall(t3)1901 storep t3, ReturnPC[cfr]1902 move cfr, t01903 loadp Callee[cfr], t11904 loadp JSFunction::m_executable[t1], t11905 if C_LOOP1906 cloopCallNative executableOffsetToFunction[t1]1907 else1908 call executableOffsetToFunction[t1]1909 end1910 restoreReturnAddressBeforeReturn(t3)1911 loadp Callee[cfr], t31912 andp MarkedBlockMask, t31913 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t31914 else1915 error1916 1852 end 1853 loadp Callee[cfr], t3 1854 andp MarkedBlockMask, t3 1855 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 1917 1856 1918 1857 functionEpilogue() -
trunk/Source/JavaScriptCore/offlineasm/arm.rb
r172429 r189293 27 27 require "opt" 28 28 require "risc" 29 30 # GPR conventions, to match the baseline JIT 31 # 32 # x0 => t0, a0, r0 33 # x1 => t1, a1, r1 34 # x2 => t2, a2, r2 35 # x3 => t3, a3, r3 36 # x6 => (callee-save scratch) 37 # x7 => cfr (ARMv7 only) 38 # x8 => t4 (callee-save) 39 # x9 => t5 (callee-save) 40 # x10 => (callee-save scratch) 41 # x11 => cfr (ARM and ARMv7 traditional) 42 # x12 => (callee-save scratch) 43 # lr => lr 44 # sp => sp 45 # pc => pc 46 # 47 # FPR conventions, to match the baseline JIT 48 # 49 # d0 => ft0, fa0, fr 50 # d1 => ft1, fa1 51 # d2 => ft2 52 # d3 => ft3 53 # d4 => ft4 54 # d5 => ft5 55 # d6 => (scratch) 56 # d7 => (scratch) 29 57 30 58 def isARMv7 … … 120 148 def armOperand 121 149 case name 122 when "ft0", "fr" 150 when "ft0", "fr", "fa0" 123 151 "d0" 124 when "ft1" 152 when "ft1", "fa1" 125 153 "d1" 126 154 when "ft2" -
trunk/Source/JavaScriptCore/offlineasm/arm64.rb
r175514 r189293 38 38 # GPR conventions, to match the baseline JIT: 39 39 # 40 # x0 => return value, cached result, first argument,t0, a0, r040 # x0 => t0, a0, r0 41 41 # x1 => t1, a1, r1 42 42 # x2 => t2, a2 43 # x3 => a3 44 # x5 => t4 45 # x6 => t6 46 # x9 => (nonArgGPR1 in baseline) 47 # x13 => scratch (unused in baseline) 48 # x16 => scratch 49 # x17 => scratch 50 # x23 => t3 51 # x24 => t5 52 # x27 => csr1 (tagTypeNumber) 53 # x28 => csr2 (tagMask) 43 # x3 => t3, a3 44 # x4 => t4 45 # x5 => t5 46 # x13 => (scratch) 47 # x16 => (scratch) 48 # x17 => (scratch) 49 # x26 => csr0 (PB) 50 # x27 => csr1 (tagTypeNumber) 51 # x28 => csr2 (tagMask) 54 52 # x29 => cfr 55 53 # sp => sp 56 54 # lr => lr 57 55 # 58 # FPR con entions, to match the baseline JIT:56 # FPR conventions, to match the baseline JIT: 59 57 # 60 # q0 => ft0 61 # q1 => ft1 62 # q2 => ft2 63 # q3 => ft3 64 # q4 => ft4 (unused in baseline)65 # q5 => ft5 (unused in baseline)58 # q0 => ft0, fa0, fr 59 # q1 => ft1, fa1 60 # q2 => ft2, fa2 61 # q3 => ft3, fa3 62 # q4 => ft4 (unused in baseline) 63 # q5 => ft5 (unused in baseline) 66 64 # q31 => scratch 67 65 … … 110 108 when 't2', 'a2' 111 109 arm64GPRName('x2', kind) 112 when ' a3'110 when 't3', 'a3' 113 111 arm64GPRName('x3', kind) 114 when 't3'115 arm64GPRName('x23', kind)116 112 when 't4' 113 arm64GPRName('x4', kind) 114 when 't5' 117 115 arm64GPRName('x5', kind) 118 when 't5'119 arm64GPRName('x24', kind)120 when 't6'121 arm64GPRName('x6', kind)122 when 't7'123 arm64GPRName('x7', kind)124 116 when 'cfr' 125 117 arm64GPRName('x29', kind) 118 when 'csr0' 119 arm64GPRName('x26', kind) 126 120 when 'csr1' 127 121 arm64GPRName('x27', kind) … … 141 135 def arm64Operand(kind) 142 136 case @name 143 when 'ft0' 137 when 'ft0', 'fr', 'fa0' 144 138 arm64FPRName('q0', kind) 145 when 'ft1' 139 when 'ft1', 'fa1' 146 140 arm64FPRName('q1', kind) 147 when 'ft2' 141 when 'ft2', 'fa2' 148 142 arm64FPRName('q2', kind) 149 when 'ft3' 143 when 'ft3', 'fa3' 150 144 arm64FPRName('q3', kind) 151 145 when 'ft4' -
trunk/Source/JavaScriptCore/offlineasm/cloop.rb
r172429 r189293 71 71 # The cloop is modelled on the ARM implementation. Hence, the a0-a3 72 72 # registers are aliases for r0-r3 i.e. t0-t3 in our case. 73 when "t0", "a0" 73 when "t0", "a0", "r0" 74 74 "t0" 75 when "t1", "a1" 75 when "t1", "a1", "r1" 76 76 "t1" 77 77 when "t2", "a2" … … 83 83 when "t5" 84 84 "t5" 85 when " t6"85 when "csr0" 86 86 "pcBase" 87 when "t7"88 "t7"89 87 when "csr1" 90 88 "tagTypeNumber" -
trunk/Source/JavaScriptCore/offlineasm/mips.rb
r173232 r189293 25 25 require 'risc' 26 26 27 # GPR conventions, to match the baseline JIT 28 # 29 # $a0 => a0 30 # $a1 => a1 31 # $a2 => a2 32 # $a3 => a3 33 # $v0 => t0, r0 34 # $v1 => t1, r1 35 # $t2 => t2 36 # $t3 => t3 37 # $t4 => t4 38 # $t5 => t5 39 # $t6 => (scratch) 40 # $t7 => (scratch) 41 # $t8 => (scratch) 42 # $t9 => (stores the callee of a call opcode) 43 # $gp => (globals) 44 # $s4 => (callee-save used to preserve $gp across calls) 45 # $ra => lr 46 # $sp => sp 47 # $fp => cfr 48 # 49 # FPR conventions, to match the baseline JIT 50 # We don't have fa2 or fa3! 51 # $f0 => ft0, fr 52 # $f2 => ft1 53 # $f4 => ft2 54 # $f6 => ft3 55 # $f8 => ft4 56 # $f10 => ft5 57 # $f12 => fa0 58 # $f14 => fa1 59 # $f16 => (scratch) 60 # $f18 => (scratch) 61 27 62 class Assembler 28 63 def putStr(str) … … 58 93 end 59 94 60 MIPS_TEMP_GPRS = [SpecialRegister.new("$t5"), SpecialRegister.new("$t6"), SpecialRegister.new("$t7"), 61 SpecialRegister.new("$t8")] 95 MIPS_TEMP_GPRS = [SpecialRegister.new("$t6"), SpecialRegister.new("$t7"), SpecialRegister.new("$t8")] 62 96 MIPS_ZERO_REG = SpecialRegister.new("$zero") 63 97 MIPS_GP_REG = SpecialRegister.new("$gp") … … 86 120 when "a3" 87 121 "$a3" 88 when " r0", "t0"122 when "t0", "r0" 89 123 "$v0" 90 when " r1", "t1"124 when "t1", "r1" 91 125 "$v1" 92 126 when "t2" 93 127 "$t2" 94 128 when "t3" 95 "$ s3"96 when "t4" # PC reg in llint97 "$ s2"129 "$t3" 130 when "t4" 131 "$t4" 98 132 when "t5" 99 133 "$t5" 100 when "t6"101 "$t6"102 when "t7"103 "$t7"104 when "t8"105 "$t8"106 134 when "cfr" 107 135 "$fp" -
trunk/Source/JavaScriptCore/offlineasm/registers.rb
r167094 r189293 32 32 "t4", 33 33 "t5", 34 "t6",35 "t7",36 "t8",37 "t9",38 34 "cfr", 39 35 "a0", … … 47 43 "pc", 48 44 # 64-bit only registers: 49 "csr1", # tag type number register 50 "csr2" # tag mask register 45 "csr0", 46 "csr1", 47 "csr2", 48 "csr3", 49 "csr4", 50 "csr5", 51 "csr6" 51 52 ] 52 53 -
trunk/Source/JavaScriptCore/offlineasm/sh4.rb
r172429 r189293 25 25 require 'risc' 26 26 27 # GPR conventions, to match the baseline JIT 28 # 29 # r0 => t0, r0 30 # r1 => t1, r1 31 # r2 => t4 32 # r3 => t5 33 # r4 => a0 34 # r5 => a1 35 # r6 => t2, a2 36 # r7 => t3, a3 37 # r10 => (scratch) 38 # r11 => (scratch) 39 # r13 => (scratch) 40 # r14 => cfr 41 # r15 => sp 42 # pr => lr 43 44 # FPR conventions, to match the baseline JIT 45 # We don't have fa2 or fa3! 46 # dr0 => ft0, fr 47 # dr2 => ft1 48 # dr4 => ft2, fa0 49 # dr6 => ft3, fa1 50 # dr8 => ft4 51 # dr10 => ft5 52 # dr12 => (scratch) 53 27 54 class Node 28 55 def sh4SingleHi … … 52 79 end 53 80 54 SH4_TMP_GPRS = [ SpecialRegister.new("r 3"), SpecialRegister.new("r11"), SpecialRegister.new("r13") ]55 SH4_TMP_FPRS = [ SpecialRegister.new("dr1 0") ]81 SH4_TMP_GPRS = [ SpecialRegister.new("r10"), SpecialRegister.new("r11"), SpecialRegister.new("r13") ] 82 SH4_TMP_FPRS = [ SpecialRegister.new("dr12") ] 56 83 57 84 class RegisterID 58 85 def sh4Operand 59 86 case name 60 when "t0" 87 when "a0" 88 "r4" 89 when "a1" 90 "r5" 91 when "r0", "t0" 61 92 "r0" 62 when " t1"93 when "r1", "t1" 63 94 "r1" 64 when "t2" 95 when "a2", "t2" 96 "r6" 97 when "a3", "t3" 98 "r7" 99 when "t4" 65 100 "r2" 66 when "t3" 67 "r10" 68 when "t4", "a0" 69 "r4" 70 when "t5", "a1" 71 "r5" 72 when "t6", "a2" 73 "r6" 74 when "t7", "a3" 75 "r7" 76 when "t8" 77 "r8" 78 when "t9" 79 "r9" 101 when "t5" 102 "r3" 80 103 when "cfr" 81 104 "r14" … … 97 120 when "ft1" 98 121 "dr2" 99 when "ft2" 122 when "ft2", "fa0" 100 123 "dr4" 101 when "ft3" 124 when "ft3", "fa1" 102 125 "dr6" 103 126 when "ft4" 104 127 "dr8" 105 when "f a0"106 "dr1 2"128 when "ft5" 129 "dr10" 107 130 else 108 131 raise "Bad register #{name} for SH4 at #{codeOriginString}" -
trunk/Source/JavaScriptCore/offlineasm/x86.rb
r172754 r189293 25 25 require "config" 26 26 27 # GPR conventions, to match the baseline JIT: 28 # 29 # 30 # On x86-32 bits (windows and non-windows) 31 # a0, a1, a2, a3 are only there for ease-of-use of offlineasm; they are not 32 # actually considered as such by the ABI and we need to push/pop our arguments 33 # on the stack. a0 and a1 are ecx and edx to follow fastcall. 34 # 35 # eax => t0, a2, r0 36 # edx => t1, a1, r1 37 # ecx => t2, a0 38 # ebx => t3, a3 (callee-save) 39 # esi => t4 (callee-save) 40 # edi => t5 (callee-save) 41 # ebp => cfr 42 # esp => sp 43 # 44 # On x86-64 non-windows 45 # 46 # rax => t0, r0 47 # rdi => a0 48 # rsi => t1, a1 49 # rdx => t2, a2, r1 50 # rcx => t3, a3 51 # r8 => t4 52 # r10 => t5 53 # rbx => csr0 (callee-save, PB, unused in baseline) 54 # r12 => csr1 (callee-save) 55 # r13 => csr2 (callee-save) 56 # r14 => csr3 (callee-save, tagTypeNumber) 57 # r15 => csr4 (callee-save, tagMask) 58 # rsp => sp 59 # rbp => cfr 60 # r11 => (scratch) 61 # 62 # On x86-64 windows 63 # Arguments need to be push/pop'd on the stack in addition to being stored in 64 # the registers. Also, >8 return types are returned in a weird way. 65 # 66 # rax => t0, r0 67 # rcx => a0 68 # rdx => t1, a1, r1 69 # r8 => t2, a2 70 # r9 => t3, a3 71 # r10 => t4 72 # rbx => csr0 (callee-save, PB, unused in baseline) 73 # rsi => csr1 (callee-save) 74 # rdi => csr2 (callee-save) 75 # r12 => csr3 (callee-save) 76 # r13 => csr4 (callee-save) 77 # r14 => csr5 (callee-save, tagTypeNumber) 78 # r15 => csr6 (callee-save, tagMask) 79 # rsp => sp 80 # rbp => cfr 81 # r11 => (scratch) 82 27 83 def isX64 28 84 case $activeBackend … … 40 96 end 41 97 98 def isWin 99 case $activeBackend 100 when "X86" 101 false 102 when "X86_WIN" 103 true 104 when "X86_64" 105 false 106 when "X86_64_WIN" 107 true 108 else 109 raise "bad value for $activeBackend: #{$activeBackend}" 110 end 111 end 112 42 113 def useX87 43 114 case $activeBackend … … 55 126 end 56 127 57 def is Windows128 def isCompilingOnWindows 58 129 ENV['OS'] == 'Windows_NT' 59 130 end 60 131 61 132 def isGCC 62 !is Windows133 !isCompilingOnWindows 63 134 end 64 135 65 136 def isMSVC 66 is Windows137 isCompilingOnWindows 67 138 end 68 139 69 140 def isIntelSyntax 70 is Windows141 isCompilingOnWindows 71 142 end 72 143 … … 142 213 X64_SCRATCH_REGISTER = SpecialRegister.new("r11") 143 214 215 def x86GPRName(name, kind) 216 case name 217 when "eax", "ebx", "ecx", "edx" 218 name8 = name[1] + 'l' 219 name16 = name[1..2] 220 when "esi", "edi", "ebp", "esp" 221 name16 = name[1..2] 222 name8 = name16 + 'l' 223 when "rax", "rbx", "rcx", "rdx" 224 raise "bad GPR name #{name} in 32-bit X86" unless isX64 225 name8 = name[1] + 'l' 226 name16 = name[1..2] 227 when "r8", "r9", "r10", "r12", "r13", "r14", "r15" 228 raise "bad GPR name #{name} in 32-bit X86" unless isX64 229 case kind 230 when :half 231 return register(name + "w") 232 when :int 233 return register(name + "d") 234 when :ptr 235 return register(name) 236 when :quad 237 return register(name) 238 end 239 else 240 raise "bad GPR name #{name}" 241 end 242 case kind 243 when :byte 244 register(name8) 245 when :half 246 register(name16) 247 when :int 248 register("e" + name16) 249 when :ptr 250 register((isX64 ? "r" : "e") + name16) 251 when :quad 252 isX64 ? register("r" + name16) : raise 253 else 254 raise "invalid kind #{kind} for GPR #{name} in X86" 255 end 256 end 257 144 258 class RegisterID 145 259 def supports8BitOnX86 146 case name147 when " t0", "a0", "r0", "t1", "a1", "r1", "t2", "t3", "t4", "t5"260 case x86GPR 261 when "eax", "ebx", "ecx", "edx", "edi", "esi", "ebp", "esp" 148 262 true 149 when " cfr", "ttnr", "tmr"263 when "r8", "r9", "r10", "r12", "r13", "r14", "r15" 150 264 false 151 when "t6"152 isX64153 265 else 154 266 raise 155 267 end 156 268 end 157 269 270 def x86GPR 271 if isX64 272 case name 273 when "t0", "r0" 274 "eax" 275 when "r1" 276 "edx" # t1 = a1 when isWin, t2 = a2 otherwise 277 when "a0" 278 isWin ? "ecx" : "edi" 279 when "t1", "a1" 280 isWin ? "edx" : "esi" 281 when "t2", "a2" 282 isWin ? "r8" : "edx" 283 when "t3", "a3" 284 isWin ? "r9" : "ecx" 285 when "t4" 286 isWin ? "r10" : "r8" 287 when "t5" 288 raise "cannot use register #{name} on X86-64 Windows" unless not isWin 289 "r10" 290 when "csr0" 291 "ebx" 292 when "csr1" 293 "r12" 294 when "csr2" 295 "r13" 296 when "csr3" 297 isWin ? "esi" : "r14" 298 when "csr4" 299 isWin ? "edi" : "r15" 300 "r15" 301 when "csr5" 302 raise "cannot use register #{name} on X86-64" unless isWin 303 "r14" 304 when "csr6" 305 raise "cannot use register #{name} on X86-64" unless isWin 306 "r15" 307 when "cfr" 308 "ebp" 309 when "sp" 310 "esp" 311 else 312 raise "cannot use register #{name} on X86" 313 end 314 else 315 case name 316 when "t0", "r0", "a2" 317 "eax" 318 when "t1", "r1", "a1" 319 "edx" 320 when "t2", "a0" 321 "ecx" 322 when "t3", "a3" 323 "ebx" 324 when "t4" 325 "esi" 326 when "t5" 327 "edi" 328 when "cfr" 329 "ebp" 330 when "sp" 331 "esp" 332 end 333 end 334 end 335 158 336 def x86Operand(kind) 159 case name 160 when "t0", "a0", "r0" 161 case kind 162 when :byte 163 register("al") 164 when :half 165 register("ax") 166 when :int 167 register("eax") 168 when :ptr 169 isX64 ? register("rax") : register("eax") 170 when :quad 171 isX64 ? register("rax") : raise 172 else 173 raise "Invalid kind #{kind} for name #{name}" 174 end 175 when "t1", "a1", "r1" 176 case kind 177 when :byte 178 register("dl") 179 when :half 180 register("dx") 181 when :int 182 register("edx") 183 when :ptr 184 isX64 ? register("rdx") : register("edx") 185 when :quad 186 isX64 ? register("rdx") : raise 187 else 188 raise 189 end 190 when "t2" 191 case kind 192 when :byte 193 register("cl") 194 when :half 195 register("cx") 196 when :int 197 register("ecx") 198 when :ptr 199 isX64 ? register("rcx") : register("ecx") 200 when :quad 201 isX64 ? register("rcx") : raise 202 else 203 raise 204 end 205 when "t3" 206 case kind 207 when :byte 208 register("bl") 209 when :half 210 register("bx") 211 when :int 212 register("ebx") 213 when :ptr 214 isX64 ? register("rbx") : register("ebx") 215 when :quad 216 isX64 ? register("rbx") : raise 217 else 218 raise 219 end 220 when "t4" 221 case kind 222 when :byte 223 register("dil") 224 when :half 225 register("di") 226 when :int 227 register("edi") 228 when :ptr 229 isX64 ? register("rdi") : register("edi") 230 when :quad 231 isX64 ? register("rdi") : raise 232 else 233 raise 234 end 235 when "cfr" 236 if isX64 237 case kind 238 when :half 239 register("bp") 240 when :int 241 register("ebp") 242 when :ptr 243 register("rbp") 244 when :quad 245 register("rbp") 246 else 247 raise 248 end 249 else 250 case kind 251 when :half 252 register("bp") 253 when :int 254 register("ebp") 255 when :ptr 256 register("ebp") 257 else 258 raise 259 end 260 end 261 when "sp" 262 case kind 263 when :byte 264 register("spl") 265 when :half 266 register("sp") 267 when :int 268 register("esp") 269 when :ptr 270 isX64 ? register("rsp") : register("esp") 271 when :quad 272 isX64 ? register("rsp") : raise 273 else 274 raise 275 end 276 when "t5" 277 case kind 278 when :byte 279 register("sil") 280 when :half 281 register("si") 282 when :int 283 register("esi") 284 when :ptr 285 isX64 ? register("rsi") : register("esi") 286 when :quad 287 isX64 ? register("rsi") : raise 288 end 289 when "t6" 290 raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64 291 case kind 292 when :half 293 register("r8w") 294 when :int 295 register("r8d") 296 when :ptr 297 register("r8") 298 when :quad 299 register("r8") 300 end 301 when "t7" 302 raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64 303 case kind 304 when :half 305 register("r9w") 306 when :int 307 register("r9d") 308 when :ptr 309 register("r9") 310 when :quad 311 register("r9") 312 end 313 when "csr1" 314 raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64 315 case kind 316 when :half 317 register("r14w") 318 when :int 319 register("r14d") 320 when :ptr 321 register("r14") 322 when :quad 323 register("r14") 324 end 325 when "csr2" 326 raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64 327 case kind 328 when :half 329 register("r15w") 330 when :int 331 register("r15d") 332 when :ptr 333 register("r15") 334 when :quad 335 register("r15") 336 end 337 else 338 raise "Bad register #{name} for X86 at #{codeOriginString}" 339 end 340 end 337 x86GPRName(x86GPR, kind) 338 end 339 341 340 def x86CallOperand(kind) 342 isX64 ? "#{callPrefix}#{x86Operand(:quad)}" :"#{callPrefix}#{x86Operand(:ptr)}"341 "#{callPrefix}#{x86Operand(:ptr)}" 343 342 end 344 343 end … … 598 597 599 598 def handleX86Shift(opcode, kind) 600 if operands[0].is_a? Immediate or operands[0] == RegisterID.forName(nil, "t2")599 if operands[0].is_a? Immediate or operands[0].x86GPR == "ecx" 601 600 $asm.puts "#{opcode} #{orderOperands(operands[0].x86Operand(:byte), operands[1].x86Operand(kind))}" 602 601 else 603 cx = RegisterID.forName(nil, "t2") 604 $asm.puts "xchg#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{cx.x86Operand(:ptr)}" 602 $asm.puts "xchg#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{x86GPRName("ecx", :ptr)}" 605 603 $asm.puts "#{opcode} #{orderOperands(register("cl"), operands[1].x86Operand(kind))}" 606 $asm.puts "xchg#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{ cx.x86Operand(:ptr)}"604 $asm.puts "xchg#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{x86GPRName("ecx", :ptr)}" 607 605 end 608 606 end … … 648 646 end 649 647 else 650 ax = RegisterID.new(nil, " t0")648 ax = RegisterID.new(nil, "r0") 651 649 $asm.puts "xchg#{x86Suffix(:ptr)} #{operand.x86Operand(:ptr)}, #{ax.x86Operand(:ptr)}" 652 $asm.puts "#{setOpcode} %al" 653 $asm.puts "movzbl %al, %eax" 650 $asm.puts "#{setOpcode} #{ax.x86Operand(:byte)}" 651 if !isIntelSyntax 652 $asm.puts "movzbl #{ax.x86Operand(:byte)}, #{ax.x86Operand(:int)}" 653 else 654 $asm.puts "movzx #{ax.x86Operand(:int)}, #{ax.x86Operand(:byte)}" 655 end 654 656 $asm.puts "xchg#{x86Suffix(:ptr)} #{operand.x86Operand(:ptr)}, #{ax.x86Operand(:ptr)}" 655 657 end
Note:
See TracChangeset
for help on using the changeset viewer.