Changeset 109834 in webkit
- Timestamp:
- Mar 5, 2012 6:40:05 PM (12 years ago)
- Location:
- trunk/Source/JavaScriptCore
- Files:
-
- 22 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/ChangeLog
r109824 r109834 1 2012-03-05 Oliver Hunt <oliver@apple.com> 2 3 Add basic support for constant blinding to the JIT 4 https://bugs.webkit.org/show_bug.cgi?id=80354 5 6 Reviewed by Filip Pizlo. 7 8 This patch adds basic constant blinding support to the JIT, at the 9 MacroAssembler level. This means all JITs in JSC (Yarr, baseline, and DFG) 10 get constant blinding. Woo! 11 12 This patch only introduces blinding for Imm32, a later patch will do similar 13 for ImmPtr. In order to make misuse of Imm32 as a trusted type essentially 14 impossible, we make TrustedImm32 a private parent of Imm32 and add an explicit 15 accessor that's needed to access the actual value. This also means you cannot 16 accidentally pass an untrusted value to a function that does not perform 17 blinding. 18 19 To make everything work sensibly, this patch also corrects some code that was using 20 Imm32 when TrustedImm32 could be used, and refactors a few callers that use 21 untrusted immediates, so that they call slightly different varaints of the functions 22 that they used previously. This is largely necessary to deal with x86-32 not having 23 sufficient registers to handle the additional work required when we choose to blind 24 a constant. 25 26 * assembler/AbstractMacroAssembler.h: 27 (JSC::AbstractMacroAssembler::Imm32::asTrustedImm32): 28 (Imm32): 29 (JSC::AbstractMacroAssembler::beginUninterruptedSequence): 30 (JSC::AbstractMacroAssembler::endUninterruptedSequence): 31 (JSC::AbstractMacroAssembler::AbstractMacroAssembler): 32 (AbstractMacroAssembler): 33 (JSC::AbstractMacroAssembler::inUninterruptedSequence): 34 (JSC::AbstractMacroAssembler::random): 35 (JSC::AbstractMacroAssembler::scratchRegisterForBlinding): 36 (JSC::AbstractMacroAssembler::shouldBlindForSpecificArch): 37 * assembler/MacroAssembler.h: 38 (JSC::MacroAssembler::addressForPoke): 39 (MacroAssembler): 40 (JSC::MacroAssembler::poke): 41 (JSC::MacroAssembler::branchPtr): 42 (JSC::MacroAssembler::branch32): 43 (JSC::MacroAssembler::convertInt32ToDouble): 44 (JSC::MacroAssembler::shouldBlind): 45 (JSC::MacroAssembler::BlindedImm32::BlindedImm32): 46 (BlindedImm32): 47 (JSC::MacroAssembler::keyForConstant): 48 (JSC::MacroAssembler::xorBlindConstant): 49 (JSC::MacroAssembler::additionBlindedConstant): 50 (JSC::MacroAssembler::andBlindedConstant): 51 (JSC::MacroAssembler::orBlindedConstant): 52 (JSC::MacroAssembler::loadXorBlindedConstant): 53 (JSC::MacroAssembler::add32): 54 (JSC::MacroAssembler::addPtr): 55 (JSC::MacroAssembler::and32): 56 (JSC::MacroAssembler::andPtr): 57 (JSC::MacroAssembler::move): 58 (JSC::MacroAssembler::or32): 59 (JSC::MacroAssembler::store32): 60 (JSC::MacroAssembler::sub32): 61 (JSC::MacroAssembler::subPtr): 62 (JSC::MacroAssembler::xor32): 63 (JSC::MacroAssembler::branchAdd32): 64 (JSC::MacroAssembler::branchMul32): 65 (JSC::MacroAssembler::branchSub32): 66 (JSC::MacroAssembler::trustedImm32ForShift): 67 (JSC::MacroAssembler::lshift32): 68 (JSC::MacroAssembler::rshift32): 69 (JSC::MacroAssembler::urshift32): 70 * assembler/MacroAssemblerARMv7.h: 71 (MacroAssemblerARMv7): 72 (JSC::MacroAssemblerARMv7::scratchRegisterForBlinding): 73 (JSC::MacroAssemblerARMv7::shouldBlindForSpecificArch): 74 * assembler/MacroAssemblerX86_64.h: 75 (JSC::MacroAssemblerX86_64::branchSubPtr): 76 (MacroAssemblerX86_64): 77 (JSC::MacroAssemblerX86_64::scratchRegisterForBlinding): 78 * dfg/DFGJITCompiler.cpp: 79 (JSC::DFG::JITCompiler::linkOSRExits): 80 (JSC::DFG::JITCompiler::compileBody): 81 (JSC::DFG::JITCompiler::compileFunction): 82 * dfg/DFGOSRExitCompiler32_64.cpp: 83 (JSC::DFG::OSRExitCompiler::compileExit): 84 * dfg/DFGOSRExitCompiler64.cpp: 85 (JSC::DFG::OSRExitCompiler::compileExit): 86 * dfg/DFGSpeculativeJIT.cpp: 87 (JSC::DFG::SpeculativeJIT::compile): 88 (JSC::DFG::SpeculativeJIT::compileArithSub): 89 (JSC::DFG::SpeculativeJIT::compileStrictEqForConstant): 90 * dfg/DFGSpeculativeJIT.h: 91 (JSC::DFG::SpeculativeJIT::callOperation): 92 * dfg/DFGSpeculativeJIT32_64.cpp: 93 (JSC::DFG::SpeculativeJIT::emitCall): 94 (JSC::DFG::SpeculativeJIT::compileObjectEquality): 95 (JSC::DFG::SpeculativeJIT::compileDoubleCompare): 96 (JSC::DFG::SpeculativeJIT::compile): 97 * dfg/DFGSpeculativeJIT64.cpp: 98 (JSC::DFG::SpeculativeJIT::emitCall): 99 (JSC::DFG::SpeculativeJIT::compileDoubleCompare): 100 (JSC::DFG::SpeculativeJIT::compile): 101 * jit/JIT.cpp: 102 (JSC::JIT::privateCompileSlowCases): 103 (JSC::JIT::privateCompile): 104 * jit/JITArithmetic.cpp: 105 (JSC::JIT::compileBinaryArithOp): 106 (JSC::JIT::emit_op_add): 107 (JSC::JIT::emit_op_mul): 108 (JSC::JIT::emit_op_div): 109 * jit/JITArithmetic32_64.cpp: 110 (JSC::JIT::emitAdd32Constant): 111 (JSC::JIT::emitSub32Constant): 112 (JSC::JIT::emitBinaryDoubleOp): 113 (JSC::JIT::emitSlow_op_mul): 114 (JSC::JIT::emit_op_div): 115 * jit/JITCall.cpp: 116 (JSC::JIT::compileLoadVarargs): 117 * jit/JITCall32_64.cpp: 118 (JSC::JIT::compileLoadVarargs): 119 * jit/JITInlineMethods.h: 120 (JSC::JIT::updateTopCallFrame): 121 (JSC::JIT::emitValueProfilingSite): 122 * jit/JITOpcodes32_64.cpp: 123 (JSC::JIT::emitSlow_op_jfalse): 124 (JSC::JIT::emitSlow_op_jtrue): 125 * jit/JITStubCall.h: 126 (JITStubCall): 127 (JSC::JITStubCall::addArgument): 128 * yarr/YarrJIT.cpp: 129 (JSC::Yarr::YarrGenerator::backtrack): 130 1 131 2012-03-05 Gavin Barraclough <barraclough@apple.com> 2 132 -
trunk/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
r106590 r109834 29 29 #include "CodeLocation.h" 30 30 #include "MacroAssemblerCodeRef.h" 31 #include <wtf/CryptographicallyRandomNumber.h> 31 32 #include <wtf/Noncopyable.h> 32 33 #include <wtf/UnusedParam.h> … … 233 234 234 235 235 struct Imm32 : p ublicTrustedImm32 {236 struct Imm32 : private TrustedImm32 { 236 237 explicit Imm32(int32_t value) 237 238 : TrustedImm32(value) … … 244 245 } 245 246 #endif 247 const TrustedImm32& asTrustedImm32() const { return *this; } 248 246 249 }; 247 250 … … 536 539 } 537 540 538 void beginUninterruptedSequence() { }539 void endUninterruptedSequence() { }541 void beginUninterruptedSequence() { m_inUninterruptedSequence = true; } 542 void endUninterruptedSequence() { m_inUninterruptedSequence = false; } 540 543 541 544 unsigned debugOffset() { return m_assembler.debugOffset(); } 542 545 543 546 protected: 547 AbstractMacroAssembler() 548 : m_inUninterruptedSequence(false) 549 , m_randomSource(cryptographicallyRandomNumber()) 550 { 551 } 552 544 553 AssemblerType m_assembler; 554 555 bool inUninterruptedSequence() 556 { 557 return m_inUninterruptedSequence; 558 } 559 560 bool m_inUninterruptedSequence; 561 562 563 uint32_t random() 564 { 565 return m_randomSource.getUint32(); 566 } 567 568 WeakRandom m_randomSource; 569 570 static bool scratchRegisterForBlinding() { return false; } 571 static bool shouldBlindForSpecificArch(uint32_t) { return true; } 545 572 546 573 friend class LinkBuffer; -
trunk/Source/JavaScriptCore/assembler/MacroAssembler.h
r107744 r109834 67 67 public: 68 68 69 using MacroAssemblerBase::add32; 70 using MacroAssemblerBase::and32; 69 71 using MacroAssemblerBase::pop; 70 72 using MacroAssemblerBase::jump; … … 74 76 using MacroAssemblerBase::branchTestPtr; 75 77 #endif 78 using MacroAssemblerBase::branchAdd32; 79 using MacroAssemblerBase::branchMul32; 80 using MacroAssemblerBase::branchSub32; 81 using MacroAssemblerBase::lshift32; 82 using MacroAssemblerBase::move; 83 using MacroAssemblerBase::or32; 84 using MacroAssemblerBase::rshift32; 85 using MacroAssemblerBase::store32; 86 using MacroAssemblerBase::sub32; 87 using MacroAssemblerBase::urshift32; 88 using MacroAssemblerBase::xor32; 76 89 77 90 // Utilities used by the DFG JIT. … … 149 162 } 150 163 164 Address addressForPoke(int index) 165 { 166 return Address(stackPointerRegister, (index * sizeof(void*))); 167 } 168 151 169 void poke(RegisterID src, int index = 0) 152 170 { 153 storePtr(src, Address(stackPointerRegister, (index * sizeof(void*))));171 storePtr(src, addressForPoke(index)); 154 172 } 155 173 156 174 void poke(TrustedImm32 value, int index = 0) 157 175 { 158 store32(value, Address(stackPointerRegister, (index * sizeof(void*))));176 store32(value, addressForPoke(index)); 159 177 } 160 178 161 179 void poke(TrustedImmPtr imm, int index = 0) 162 180 { 163 storePtr(imm, Address(stackPointerRegister, (index * sizeof(void*))));181 storePtr(imm, addressForPoke(index)); 164 182 } 165 183 … … 170 188 branchPtr(cond, op1, imm).linkTo(target, this); 171 189 } 190 void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target) 191 { 192 branchPtr(cond, op1, imm).linkTo(target, this); 193 } 172 194 173 195 void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target) … … 180 202 branch32(cond, op1, imm).linkTo(target, this); 181 203 } 204 205 void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target) 206 { 207 branch32(cond, op1, imm).linkTo(target, this); 208 } 182 209 183 210 void branch32(RelationalCondition cond, RegisterID left, Address right, Label target) … … 187 214 188 215 Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right) 216 { 217 return branch32(commute(cond), right, left); 218 } 219 220 Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right) 189 221 { 190 222 return branch32(commute(cond), right, left); … … 447 479 return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask); 448 480 } 481 #else 482 483 using MacroAssemblerBase::addPtr; 484 using MacroAssemblerBase::andPtr; 485 using MacroAssemblerBase::branchSubPtr; 486 using MacroAssemblerBase::convertInt32ToDouble; 487 using MacroAssemblerBase::subPtr; 488 489 void convertInt32ToDouble(Imm32 imm, FPRegisterID dest) 490 { 491 if (shouldBlind(imm)) { 492 RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding(); 493 ASSERT(scratchRegister); 494 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister); 495 convertInt32ToDouble(scratchRegister, dest); 496 } else 497 convertInt32ToDouble(imm.asTrustedImm32(), dest); 498 } 499 449 500 #endif // !CPU(X86_64) 450 501 502 bool shouldBlind(Imm32 imm) 503 { 504 ASSERT(!inUninterruptedSequence()); 505 #if !defined(NDEBUG) 506 UNUSED_PARAM(imm); 507 // Debug always blind all constants, if only so we know 508 // if we've broken blinding during patch development. 509 return true; 510 #else 511 512 // First off we'll special case common, "safe" values to avoid hurting 513 // performance too much 514 uint32_t value = imm.asTrustedImm32().m_value; 515 switch (value) { 516 case 0xffff: 517 case 0xffffff: 518 case 0xffffffff: 519 return false; 520 default: 521 if (value <= 0xff) 522 return false; 523 } 524 return shouldBlindForSpecificArch(value); 525 #endif 526 } 527 528 struct BlindedImm32 { 529 BlindedImm32(int32_t v1, int32_t v2) 530 : value1(v1) 531 , value2(v2) 532 { 533 } 534 TrustedImm32 value1; 535 TrustedImm32 value2; 536 }; 537 538 uint32_t keyForConstant(uint32_t value, uint32_t& mask) 539 { 540 uint32_t key = random(); 541 if (value <= 0xff) 542 mask = 0xff; 543 else if (value <= 0xffff) 544 mask = 0xffff; 545 else if (value <= 0xffffff) 546 mask = 0xffffff; 547 else 548 mask = 0xffffffff; 549 return key & mask; 550 } 551 552 uint32_t keyForConstant(uint32_t value) 553 { 554 uint32_t mask = 0; 555 return keyForConstant(value, mask); 556 } 557 558 BlindedImm32 xorBlindConstant(Imm32 imm) 559 { 560 uint32_t baseValue = imm.asTrustedImm32().m_value; 561 uint32_t key = keyForConstant(baseValue); 562 return BlindedImm32(baseValue ^ key, key); 563 } 564 565 BlindedImm32 additionBlindedConstant(Imm32 imm) 566 { 567 uint32_t baseValue = imm.asTrustedImm32().m_value; 568 uint32_t key = keyForConstant(baseValue); 569 if (key > baseValue) 570 key = key - baseValue; 571 return BlindedImm32(baseValue - key, key); 572 } 573 574 BlindedImm32 andBlindedConstant(Imm32 imm) 575 { 576 uint32_t baseValue = imm.asTrustedImm32().m_value; 577 uint32_t mask = 0; 578 uint32_t key = keyForConstant(baseValue, mask); 579 ASSERT((baseValue & mask) == baseValue); 580 return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask); 581 } 582 583 BlindedImm32 orBlindedConstant(Imm32 imm) 584 { 585 uint32_t baseValue = imm.asTrustedImm32().m_value; 586 uint32_t mask = 0; 587 uint32_t key = keyForConstant(baseValue, mask); 588 ASSERT((baseValue & mask) == baseValue); 589 return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask); 590 } 591 592 void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest) 593 { 594 move(constant.value1, dest); 595 xor32(constant.value2, dest); 596 } 597 598 void add32(Imm32 imm, RegisterID dest) 599 { 600 if (shouldBlind(imm)) { 601 BlindedImm32 key = additionBlindedConstant(imm); 602 add32(key.value1, dest); 603 add32(key.value2, dest); 604 } else 605 add32(imm.asTrustedImm32(), dest); 606 } 607 608 void addPtr(Imm32 imm, RegisterID dest) 609 { 610 if (shouldBlind(imm)) { 611 BlindedImm32 key = additionBlindedConstant(imm); 612 addPtr(key.value1, dest); 613 addPtr(key.value2, dest); 614 } else 615 addPtr(imm.asTrustedImm32(), dest); 616 } 617 618 void and32(Imm32 imm, RegisterID dest) 619 { 620 if (shouldBlind(imm)) { 621 BlindedImm32 key = andBlindedConstant(imm); 622 and32(key.value1, dest); 623 and32(key.value2, dest); 624 } else 625 and32(imm.asTrustedImm32(), dest); 626 } 627 628 void andPtr(Imm32 imm, RegisterID dest) 629 { 630 if (shouldBlind(imm)) { 631 BlindedImm32 key = andBlindedConstant(imm); 632 andPtr(key.value1, dest); 633 andPtr(key.value2, dest); 634 } else 635 andPtr(imm.asTrustedImm32(), dest); 636 } 637 638 void and32(Imm32 imm, RegisterID src, RegisterID dest) 639 { 640 if (shouldBlind(imm)) { 641 if (src == dest) 642 return and32(imm.asTrustedImm32(), dest); 643 loadXorBlindedConstant(xorBlindConstant(imm), dest); 644 and32(src, dest); 645 } else 646 and32(imm.asTrustedImm32(), src, dest); 647 } 648 649 void move(Imm32 imm, RegisterID dest) 650 { 651 if (shouldBlind(imm)) 652 loadXorBlindedConstant(xorBlindConstant(imm), dest); 653 else 654 move(imm.asTrustedImm32(), dest); 655 } 656 657 void or32(Imm32 imm, RegisterID src, RegisterID dest) 658 { 659 if (shouldBlind(imm)) { 660 if (src == dest) 661 return or32(imm, dest); 662 loadXorBlindedConstant(xorBlindConstant(imm), dest); 663 or32(src, dest); 664 } else 665 or32(imm.asTrustedImm32(), src, dest); 666 } 667 668 void or32(Imm32 imm, RegisterID dest) 669 { 670 if (shouldBlind(imm)) { 671 BlindedImm32 key = orBlindedConstant(imm); 672 or32(key.value1, dest); 673 or32(key.value2, dest); 674 } else 675 or32(imm.asTrustedImm32(), dest); 676 } 677 678 void poke(Imm32 value, int index = 0) 679 { 680 store32(value, addressForPoke(index)); 681 } 682 683 void store32(Imm32 imm, Address dest) 684 { 685 if (shouldBlind(imm)) { 686 #if CPU(X86) || CPU(X86_64) 687 BlindedImm32 blind = xorBlindConstant(imm); 688 store32(blind.value1, dest); 689 xor32(blind.value2, dest); 690 #else 691 RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding(); 692 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister); 693 store32(scratchRegister, dest); 694 #endif 695 } else 696 store32(imm.asTrustedImm32(), dest); 697 } 698 699 void sub32(Imm32 imm, RegisterID dest) 700 { 701 if (shouldBlind(imm)) { 702 BlindedImm32 key = additionBlindedConstant(imm); 703 sub32(key.value1, dest); 704 sub32(key.value2, dest); 705 } else 706 sub32(imm.asTrustedImm32(), dest); 707 } 708 709 void subPtr(Imm32 imm, RegisterID dest) 710 { 711 if (shouldBlind(imm)) { 712 BlindedImm32 key = additionBlindedConstant(imm); 713 subPtr(key.value1, dest); 714 subPtr(key.value2, dest); 715 } else 716 subPtr(imm.asTrustedImm32(), dest); 717 } 718 719 void xor32(Imm32 imm, RegisterID src, RegisterID dest) 720 { 721 if (shouldBlind(imm)) { 722 BlindedImm32 blind = xorBlindConstant(imm); 723 xor32(blind.value1, src, dest); 724 xor32(blind.value2, dest); 725 } else 726 xor32(imm.asTrustedImm32(), src, dest); 727 } 728 729 void xor32(Imm32 imm, RegisterID dest) 730 { 731 if (shouldBlind(imm)) { 732 BlindedImm32 blind = xorBlindConstant(imm); 733 xor32(blind.value1, dest); 734 xor32(blind.value2, dest); 735 } else 736 xor32(imm.asTrustedImm32(), dest); 737 } 738 739 Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right) 740 { 741 if (shouldBlind(right)) { 742 if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { 743 loadXorBlindedConstant(xorBlindConstant(right), scratchRegister); 744 return branch32(cond, left, scratchRegister); 745 } 746 // If we don't have a scratch register available for use, we'll just 747 // place a random number of nops. 748 uint32_t nopCount = random() & 3; 749 while (nopCount--) 750 nop(); 751 return branch32(cond, left, right.asTrustedImm32()); 752 } 753 754 return branch32(cond, left, right.asTrustedImm32()); 755 } 756 757 Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest) 758 { 759 if (src == dest) { 760 if (!scratchRegisterForBlinding()) { 761 // Release mode ASSERT, if this fails we will perform incorrect codegen. 762 CRASH(); 763 } 764 } 765 if (shouldBlind(imm)) { 766 if (src == dest) { 767 if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { 768 move(src, scratchRegister); 769 src = scratchRegister; 770 } 771 } 772 loadXorBlindedConstant(xorBlindConstant(imm), dest); 773 return branchAdd32(cond, src, dest); 774 } 775 return branchAdd32(cond, src, imm.asTrustedImm32(), dest); 776 } 777 778 Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest) 779 { 780 if (src == dest) { 781 if (!scratchRegisterForBlinding()) { 782 // Release mode ASSERT, if this fails we will perform incorrect codegen. 783 CRASH(); 784 } 785 } 786 if (shouldBlind(imm)) { 787 if (src == dest) { 788 if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { 789 move(src, scratchRegister); 790 src = scratchRegister; 791 } 792 } 793 loadXorBlindedConstant(xorBlindConstant(imm), dest); 794 return branchMul32(cond, src, dest); 795 } 796 return branchMul32(cond, imm.asTrustedImm32(), src, dest); 797 } 798 799 // branchSub32 takes a scratch register as 32 bit platforms make use of this, 800 // with src == dst, and on x86-32 we don't have a platform scratch register. 801 Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch) 802 { 803 if (shouldBlind(imm)) { 804 ASSERT(scratch != dest); 805 ASSERT(scratch != src); 806 loadXorBlindedConstant(xorBlindConstant(imm), scratch); 807 return branchSub32(cond, src, scratch, dest); 808 } 809 return branchSub32(cond, src, imm.asTrustedImm32(), dest); 810 } 811 812 // Immediate shifts only have 5 controllable bits 813 // so we'll consider them safe for now. 814 TrustedImm32 trustedImm32ForShift(Imm32 imm) 815 { 816 return TrustedImm32(imm.asTrustedImm32().m_value & 31); 817 } 818 819 void lshift32(Imm32 imm, RegisterID dest) 820 { 821 lshift32(trustedImm32ForShift(imm), dest); 822 } 823 824 void lshift32(RegisterID src, Imm32 amount, RegisterID dest) 825 { 826 lshift32(src, trustedImm32ForShift(amount), dest); 827 } 828 829 void rshift32(Imm32 imm, RegisterID dest) 830 { 831 rshift32(trustedImm32ForShift(imm), dest); 832 } 833 834 void rshift32(RegisterID src, Imm32 amount, RegisterID dest) 835 { 836 rshift32(src, trustedImm32ForShift(amount), dest); 837 } 838 839 void urshift32(Imm32 imm, RegisterID dest) 840 { 841 urshift32(trustedImm32ForShift(imm), dest); 842 } 843 844 void urshift32(RegisterID src, Imm32 amount, RegisterID dest) 845 { 846 urshift32(src, trustedImm32ForShift(amount), dest); 847 } 451 848 }; 452 849 -
trunk/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
r109038 r109834 52 52 // a LDR_imm_T2 encoding 53 53 static const int MaximumCompactPtrAlignedAddressOffset = 124; 54 55 MacroAssemblerARMv7() 56 : m_inUninterruptedSequence(false) 57 { 58 } 59 60 void beginUninterruptedSequence() { m_inUninterruptedSequence = true; } 61 void endUninterruptedSequence() { m_inUninterruptedSequence = false; } 54 62 55 Vector<LinkRecord>& jumpsToLink() { return m_assembler.jumpsToLink(); } 63 56 void* unlinkedCode() { return m_assembler.unlinkedCode(); } … … 531 524 } 532 525 526 protected: 533 527 void store32(RegisterID src, ArmAddress address) 534 528 { … … 545 539 } 546 540 541 private: 547 542 void store8(RegisterID src, ArmAddress address) 548 543 { … … 727 722 } 728 723 724 static RegisterID scratchRegisterForBlinding() { return dataTempRegister; } 725 static bool shouldBlindForSpecificArch(uint32_t value) 726 { 727 ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value); 728 729 // Couldn't be encoded as an immediate, so assume it's untrusted. 730 if (!immediate.isValid()) 731 return true; 732 733 // If we can encode the immediate, we have less than 16 attacker 734 // controlled bits. 735 if (immediate.isEncodedImm()) 736 return false; 737 738 // Don't let any more than 12 bits of an instruction word 739 // be controlled by an attacker. 740 return !immediate.isUInt12(); 741 } 729 742 730 743 // Floating-point operations: … … 963 976 // overflow the result will be equal to -2. 964 977 Jump underflow = branchAdd32(Zero, dest, dest, dataTempRegister); 965 Jump noOverflow = branch32(NotEqual, dataTempRegister, Imm32(-2));978 Jump noOverflow = branch32(NotEqual, dataTempRegister, TrustedImm32(-2)); 966 979 967 980 // For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps. … … 1602 1615 1603 1616 protected: 1604 bool inUninterruptedSequence()1605 {1606 return m_inUninterruptedSequence;1607 }1608 1617 1609 1618 ALWAYS_INLINE Jump jump() … … 1713 1722 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); 1714 1723 } 1715 1716 bool m_inUninterruptedSequence; 1724 1717 1725 }; 1718 1726 -
trunk/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
r109038 r109834 91 91 92 92 static const RegisterID stackPointerRegister = X86Registers::esp; 93 94 static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; } 93 95 94 96 // Integer arithmetic operations: -
trunk/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
r109040 r109834 450 450 } 451 451 452 Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest) 453 { 454 move(src1, dest); 455 return branchSubPtr(cond, src2, dest); 456 } 457 452 458 DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest) 453 459 { … … 494 500 } 495 501 502 static RegisterID scratchRegisterForBlinding() { return scratchRegister; } 503 496 504 private: 497 505 friend class LinkBuffer; -
trunk/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
r109307 r109834 45 45 OSRExit& exit = codeBlock()->osrExit(i); 46 46 exit.m_check.initialJump().link(this); 47 store32( Imm32(i), &globalData()->osrExitIndex);47 store32(TrustedImm32(i), &globalData()->osrExitIndex); 48 48 beginUninterruptedSequence(); 49 49 exit.m_check.switchToLateJump(jump()); … … 76 76 #endif 77 77 78 addPtr( Imm32(1), AbsoluteAddress(codeBlock()->addressOfSpeculativeSuccessCounter()));78 addPtr(TrustedImm32(1), AbsoluteAddress(codeBlock()->addressOfSpeculativeSuccessCounter())); 79 79 80 80 bool compiledSpeculative = speculative.compile(); … … 226 226 // Plant a check that sufficient space is available in the RegisterFile. 227 227 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291 228 addPtr( Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);228 addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); 229 229 Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1); 230 230 // Return here after register file check. … … 262 262 263 263 load32(AssemblyHelpers::payloadFor((VirtualRegister)RegisterFile::ArgumentCount), GPRInfo::regT1); 264 branch32(AboveOrEqual, GPRInfo::regT1, Imm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);264 branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this); 265 265 move(stackPointerRegister, GPRInfo::argumentGPR0); 266 266 poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); -
trunk/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
r109705 r109834 99 99 m_jit.load32(scratchBuffer, scratch); 100 100 } else if (exit.m_jsValueSource.hasKnownTag()) { 101 m_jit.store32(AssemblyHelpers:: Imm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);101 m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag); 102 102 m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload); 103 103 } else { … … 563 563 // counterValueForOptimizeAfterWarmUp(). 564 564 565 m_jit.add32(AssemblyHelpers:: Imm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));565 m_jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count)); 566 566 567 567 m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0); … … 569 569 m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2); 570 570 m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1); 571 m_jit.add32(AssemblyHelpers:: Imm32(1), GPRInfo::regT2);572 m_jit.add32(AssemblyHelpers:: Imm32(-1), GPRInfo::regT1);571 m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); 572 m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1); 573 573 m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter())); 574 574 m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter())); … … 577 577 578 578 AssemblyHelpers::Jump fewFails = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::Imm32(m_jit.codeBlock()->largeFailCountThreshold())); 579 m_jit.mul32(AssemblyHelpers:: Imm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2);579 m_jit.mul32(AssemblyHelpers::TrustedImm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2); 580 580 581 581 AssemblyHelpers::Jump lowFailRate = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1); 582 582 583 583 // Reoptimize as soon as possible. 584 m_jit.store32(AssemblyHelpers:: Imm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));585 m_jit.store32(AssemblyHelpers:: Imm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));584 m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); 585 m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); 586 586 AssemblyHelpers::Jump doneAdjusting = m_jit.jump(); 587 587 … … 594 594 m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp(), 595 595 m_jit.baselineCodeBlock()); 596 m_jit.store32(AssemblyHelpers:: Imm32(-targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));597 m_jit.store32(AssemblyHelpers:: Imm32(targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));598 m_jit.store32(AssemblyHelpers:: Imm32(ExecutionCounter::formattedTotalCount(targetValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));596 m_jit.store32(AssemblyHelpers::TrustedImm32(-targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); 597 m_jit.store32(AssemblyHelpers::TrustedImm32(targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); 598 m_jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(targetValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount())); 599 599 600 600 doneAdjusting.link(&m_jit); … … 627 627 GPRReg callerFrameGPR; 628 628 if (inlineCallFrame->caller.inlineCallFrame) { 629 m_jit.add32(AssemblyHelpers:: Imm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);629 m_jit.add32(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3); 630 630 callerFrameGPR = GPRInfo::regT3; 631 631 } else … … 644 644 645 645 if (exit.m_codeOrigin.inlineCallFrame) 646 m_jit.addPtr(AssemblyHelpers:: Imm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);646 m_jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister); 647 647 648 648 // 14) Jump into the corresponding baseline JIT code. -
trunk/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
r109705 r109834 542 542 // counterValueForOptimizeAfterWarmUp(). 543 543 544 m_jit.add32(AssemblyHelpers:: Imm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));544 m_jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count)); 545 545 546 546 m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0); … … 548 548 m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2); 549 549 m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1); 550 m_jit.add32(AssemblyHelpers:: Imm32(1), GPRInfo::regT2);551 m_jit.add32(AssemblyHelpers:: Imm32(-1), GPRInfo::regT1);550 m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); 551 m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1); 552 552 m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter())); 553 553 m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter())); … … 555 555 m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0); 556 556 557 AssemblyHelpers::Jump fewFails = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers:: Imm32(m_jit.codeBlock()->largeFailCountThreshold()));558 m_jit.mul32(AssemblyHelpers:: Imm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2);557 AssemblyHelpers::Jump fewFails = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(m_jit.codeBlock()->largeFailCountThreshold())); 558 m_jit.mul32(AssemblyHelpers::TrustedImm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2); 559 559 560 560 AssemblyHelpers::Jump lowFailRate = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1); 561 561 562 562 // Reoptimize as soon as possible. 563 m_jit.store32(AssemblyHelpers:: Imm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));564 m_jit.store32(AssemblyHelpers:: Imm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));563 m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); 564 m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); 565 565 AssemblyHelpers::Jump doneAdjusting = m_jit.jump(); 566 566 … … 573 573 m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp(), 574 574 m_jit.baselineCodeBlock()); 575 m_jit.store32(AssemblyHelpers:: Imm32(-targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));576 m_jit.store32(AssemblyHelpers:: Imm32(targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));577 m_jit.store32(AssemblyHelpers:: Imm32(ExecutionCounter::formattedTotalCount(targetValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));575 m_jit.store32(AssemblyHelpers::TrustedImm32(-targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); 576 m_jit.store32(AssemblyHelpers::TrustedImm32(targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); 577 m_jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(targetValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount())); 578 578 579 579 doneAdjusting.link(&m_jit); … … 604 604 GPRReg callerFrameGPR; 605 605 if (inlineCallFrame->caller.inlineCallFrame) { 606 m_jit.addPtr(AssemblyHelpers:: Imm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);606 m_jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3); 607 607 callerFrameGPR = GPRInfo::regT3; 608 608 } else -
trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
r109318 r109834 957 957 if (DFG_ENABLE_EDGE_CODE_VERIFICATION) { 958 958 JITCompiler::Jump verificationSucceeded = 959 m_jit.branch32(JITCompiler::Equal, GPRInfo::regT0, Imm32(m_block));959 m_jit.branch32(JITCompiler::Equal, GPRInfo::regT0, TrustedImm32(m_block)); 960 960 m_jit.breakpoint(); 961 961 verificationSucceeded.link(&m_jit); … … 1234 1234 1235 1235 if (DFG_ENABLE_EDGE_CODE_VERIFICATION) 1236 m_jit.move( Imm32(0), GPRInfo::regT0);1236 m_jit.move(TrustedImm32(0), GPRInfo::regT0); 1237 1237 1238 1238 ASSERT(!m_compileIndex); … … 2320 2320 m_jit.move(op1.gpr(), result.gpr()); 2321 2321 m_jit.sub32(Imm32(imm2), result.gpr()); 2322 } else 2323 speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr())); 2322 } else { 2323 GPRTemporary scratch(this); 2324 speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr())); 2325 } 2324 2326 2325 2327 integerResult(result.gpr(), m_compileIndex); … … 2536 2538 m_jit.move(MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(ValueFalse)), resultGPR); 2537 2539 MacroAssembler::Jump notEqual = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(constant)))); 2538 m_jit.or32(MacroAssembler:: Imm32(1), resultGPR);2540 m_jit.or32(MacroAssembler::TrustedImm32(1), resultGPR); 2539 2541 notEqual.link(&m_jit); 2540 2542 jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean); … … 2543 2545 GPRReg op1TagGPR = op1.tagGPR(); 2544 2546 GPRReg resultGPR = result.gpr(); 2545 m_jit.move( Imm32(0), resultGPR);2547 m_jit.move(TrustedImm32(0), resultGPR); 2546 2548 MacroAssembler::JumpList notEqual; 2547 2549 notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, MacroAssembler::Imm32(constant.tag()))); 2548 2550 notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1PayloadGPR, MacroAssembler::Imm32(constant.payload()))); 2549 m_jit.move( Imm32(1), resultGPR);2551 m_jit.move(TrustedImm32(1), resultGPR); 2550 2552 notEqual.link(&m_jit); 2551 2553 booleanResult(resultGPR, m_compileIndex); -
trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
r109318 r109834 1124 1124 JITCompiler::Call callOperation(J_DFGOperation_ESS operation, GPRReg result, int startConstant, int numConstants) 1125 1125 { 1126 m_jit.setupArgumentsWithExecState( Imm32(startConstant),Imm32(numConstants));1126 m_jit.setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants)); 1127 1127 return appendCallWithExceptionCheckSetResult(operation, result); 1128 1128 } … … 1187 1187 return appendCallWithExceptionCheckSetResult(operation, result); 1188 1188 } 1189 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, MacroAssembler:: Imm32 imm)1189 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, MacroAssembler::TrustedImm32 imm) 1190 1190 { 1191 1191 m_jit.setupArgumentsWithExecState(arg1, MacroAssembler::ImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value))))); 1192 1192 return appendCallWithExceptionCheckSetResult(operation, result); 1193 1193 } 1194 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, MacroAssembler:: Imm32 imm, GPRReg arg2)1194 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, MacroAssembler::TrustedImm32 imm, GPRReg arg2) 1195 1195 { 1196 1196 m_jit.setupArgumentsWithExecState(MacroAssembler::ImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))), arg2); … … 1365 1365 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1366 1366 } 1367 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, MacroAssembler:: Imm32 imm)1367 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, MacroAssembler::TrustedImm32 imm) 1368 1368 { 1369 1369 m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, imm, TrustedImm32(JSValue::Int32Tag)); 1370 1370 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1371 1371 } 1372 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, MacroAssembler:: Imm32 imm, GPRReg arg2Tag, GPRReg arg2Payload)1372 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, MacroAssembler::TrustedImm32 imm, GPRReg arg2Tag, GPRReg arg2Payload) 1373 1373 { 1374 1374 m_jit.setupArgumentsWithExecState(imm, TrustedImm32(JSValue::Int32Tag), arg2Payload, arg2Tag); -
trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
r109318 r109834 951 951 m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag), callFrameTagSlot(RegisterFile::ScopeChain)); 952 952 953 m_jit.addPtr( Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);953 m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister); 954 954 955 955 CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin; … … 962 962 slowPath.link(&m_jit); 963 963 964 m_jit.addPtr( Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);964 m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 965 965 m_jit.poke(GPRInfo::argumentGPR0); 966 966 token = m_jit.beginCall(); 967 967 JITCompiler::Call slowCall = m_jit.appendCall(slowCallFunction); 968 968 m_jit.addFastExceptionCheck(slowCall, codeOrigin, token); 969 m_jit.addPtr( Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);969 m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister); 970 970 token = m_jit.beginJSCall(); 971 971 JITCompiler::Call theCall = m_jit.call(GPRInfo::returnValueGPR); … … 1391 1391 1392 1392 MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR); 1393 m_jit.move( Imm32(1), resultPayloadGPR);1393 m_jit.move(TrustedImm32(1), resultPayloadGPR); 1394 1394 MacroAssembler::Jump done = m_jit.jump(); 1395 1395 falseCase.link(&m_jit); 1396 m_jit.move( Imm32(0), resultPayloadGPR);1396 m_jit.move(TrustedImm32(0), resultPayloadGPR); 1397 1397 done.link(&m_jit); 1398 1398 … … 1418 1418 GPRTemporary resultPayload(this); 1419 1419 1420 m_jit.move( Imm32(1), resultPayload.gpr());1420 m_jit.move(TrustedImm32(1), resultPayload.gpr()); 1421 1421 MacroAssembler::Jump trueCase = m_jit.branchDouble(condition, op1.fpr(), op2.fpr()); 1422 m_jit.move( Imm32(0), resultPayload.gpr());1422 m_jit.move(TrustedImm32(0), resultPayload.gpr()); 1423 1423 trueCase.link(&m_jit); 1424 1424 … … 2544 2544 m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); 2545 2545 2546 m_jit.add32( Imm32(1), storageLengthGPR);2546 m_jit.add32(TrustedImm32(1), storageLengthGPR); 2547 2547 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length))); 2548 m_jit.add32( Imm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));2548 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); 2549 2549 m_jit.move(Imm32(JSValue::Int32Tag), storageGPR); 2550 2550 … … 2584 2584 MacroAssembler::Jump emptyArrayCase = m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR); 2585 2585 2586 m_jit.sub32( Imm32(1), storageLengthGPR);2586 m_jit.sub32(TrustedImm32(1), storageLengthGPR); 2587 2587 2588 2588 MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(baseGPR, JSArray::vectorLengthOffset())); … … 2593 2593 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length))); 2594 2594 2595 MacroAssembler::Jump holeCase = m_jit.branch32(MacroAssembler::Equal, Imm32(JSValue::EmptyValueTag), valueTagGPR);2595 MacroAssembler::Jump holeCase = m_jit.branch32(MacroAssembler::Equal, TrustedImm32(JSValue::EmptyValueTag), valueTagGPR); 2596 2596 2597 2597 m_jit.store32(TrustedImm32(JSValue::EmptyValueTag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); 2598 2598 2599 m_jit.sub32( MacroAssembler::Imm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));2599 m_jit.sub32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); 2600 2600 2601 2601 MacroAssembler::JumpList done; -
trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
r109318 r109834 975 975 slowPath.link(&m_jit); 976 976 977 m_jit.addPtr( Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);977 m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 978 978 token = m_jit.beginCall(); 979 979 JITCompiler::Call slowCall = m_jit.appendCall(slowCallFunction); … … 1507 1507 m_jit.move(TrustedImm32(ValueTrue), result.gpr()); 1508 1508 MacroAssembler::Jump trueCase = m_jit.branchDouble(condition, op1.fpr(), op2.fpr()); 1509 m_jit.xorPtr( Imm32(true), result.gpr());1509 m_jit.xorPtr(TrustedImm32(true), result.gpr()); 1510 1510 trueCase.link(&m_jit); 1511 1511 … … 2586 2586 m_jit.storePtr(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); 2587 2587 2588 m_jit.add32( Imm32(1), storageLengthGPR);2588 m_jit.add32(TrustedImm32(1), storageLengthGPR); 2589 2589 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length))); 2590 m_jit.add32( Imm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));2590 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); 2591 2591 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, storageLengthGPR); 2592 2592 … … 2624 2624 MacroAssembler::Jump emptyArrayCase = m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR); 2625 2625 2626 m_jit.sub32( Imm32(1), storageLengthGPR);2626 m_jit.sub32(TrustedImm32(1), storageLengthGPR); 2627 2627 2628 2628 MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(baseGPR, JSArray::vectorLengthOffset())); … … 2635 2635 2636 2636 m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); 2637 m_jit.sub32(MacroAssembler:: Imm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));2637 m_jit.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); 2638 2638 2639 2639 MacroAssembler::JumpList done; -
trunk/Source/JavaScriptCore/jit/JIT.cpp
r109705 r109834 502 502 #if ENABLE(VALUE_PROFILER) 503 503 if (m_canBeOptimized) 504 add32( Imm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));504 add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter)); 505 505 #endif 506 506 … … 574 574 #endif 575 575 576 addPtr( Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);576 addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1); 577 577 registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1); 578 578 } … … 582 582 #if ENABLE(VALUE_PROFILER) 583 583 if (m_canBeOptimized) 584 add32( Imm32(1), AbsoluteAddress(&m_codeBlock->m_executionEntryCount));584 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->m_executionEntryCount)); 585 585 #endif 586 586 -
trunk/Source/JavaScriptCore/jit/JITArithmetic.cpp
r109038 r109834 834 834 JumpList done; 835 835 done.append(branchTest32(NonZero, regT2)); 836 Jump negativeZero = branch32(LessThan, regT0, Imm32(0));837 done.append(branch32(GreaterThanOrEqual, regT1, Imm32(0)));836 Jump negativeZero = branch32(LessThan, regT0, TrustedImm32(0)); 837 done.append(branch32(GreaterThanOrEqual, regT1, TrustedImm32(0))); 838 838 negativeZero.link(this); 839 839 // We only get here if we have a genuine negative zero. Record this, 840 840 // so that the speculative JIT knows that we failed speculation 841 841 // because of a negative zero. 842 add32( Imm32(1), AbsoluteAddress(&profile->m_counter));842 add32(TrustedImm32(1), AbsoluteAddress(&profile->m_counter)); 843 843 addSlowCase(jump()); 844 844 done.link(this); … … 964 964 emitGetVirtualRegister(op2, regT0); 965 965 emitJumpSlowCaseIfNotImmediateInteger(regT0); 966 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0));967 emitFastArithIntToImmNoCheck(regT 0, regT0);966 addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op1)), regT1)); 967 emitFastArithIntToImmNoCheck(regT1, regT0); 968 968 } else if (isOperandConstantImmediateInt(op2)) { 969 969 emitGetVirtualRegister(op1, regT0); 970 970 emitJumpSlowCaseIfNotImmediateInteger(regT0); 971 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0));972 emitFastArithIntToImmNoCheck(regT 0, regT0);971 addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op2)), regT1)); 972 emitFastArithIntToImmNoCheck(regT1, regT0); 973 973 } else 974 974 compileBinaryArithOp(op_add, result, op1, op2, types); … … 1010 1010 emitGetVirtualRegister(op2, regT0); 1011 1011 emitJumpSlowCaseIfNotImmediateInteger(regT0); 1012 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT 0));1013 emitFastArithReTagImmediate(regT 0, regT0);1012 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1)); 1013 emitFastArithReTagImmediate(regT1, regT0); 1014 1014 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) { 1015 1015 #if ENABLE(VALUE_PROFILER) … … 1019 1019 emitGetVirtualRegister(op1, regT0); 1020 1020 emitJumpSlowCaseIfNotImmediateInteger(regT0); 1021 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT 0));1022 emitFastArithReTagImmediate(regT 0, regT0);1021 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1)); 1022 emitFastArithReTagImmediate(regT1, regT0); 1023 1023 } else 1024 1024 compileBinaryArithOp(op_mul, result, op1, op2, types); … … 1106 1106 Jump isInteger = jump(); 1107 1107 notInteger.link(this); 1108 add32( Imm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter));1108 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter)); 1109 1109 moveDoubleToPtr(fpRegT0, regT0); 1110 1110 subPtr(tagTypeNumberRegister, regT0); -
trunk/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
r109007 r109834 620 620 { 621 621 // Int32 case. 622 emitLoad(op, regT1, regT 0);622 emitLoad(op, regT1, regT2); 623 623 Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)); 624 addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));624 addSlowCase(branchAdd32(Overflow, regT2, Imm32(constant), regT0)); 625 625 emitStoreInt32(dst, regT0, (op == dst)); 626 626 … … 733 733 emitLoad(op, regT1, regT0); 734 734 Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)); 735 addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));736 emitStoreInt32(dst, regT 0, (op == dst));735 addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2, regT3)); 736 emitStoreInt32(dst, regT2, (op == dst)); 737 737 738 738 // Double case. … … 862 862 Jump isInteger = jump(); 863 863 notInteger.link(this); 864 add32( Imm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));864 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter)); 865 865 emitStoreDouble(dst, fpRegT1); 866 866 isInteger.link(this); … … 966 966 Jump isInteger = jump(); 967 967 notInteger.link(this); 968 add32( Imm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));968 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter)); 969 969 emitStoreDouble(dst, fpRegT0); 970 970 isInteger.link(this); … … 1072 1072 // so that the speculative JIT knows that we failed speculation 1073 1073 // because of a negative zero. 1074 add32( Imm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));1074 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter)); 1075 1075 #endif 1076 1076 overflow.link(this); … … 1151 1151 end.append(jump()); 1152 1152 notInteger.link(this); 1153 add32( Imm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));1153 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter)); 1154 1154 emitStoreDouble(dst, fpRegT0); 1155 1155 #else -
trunk/Source/JavaScriptCore/jit/JITCall.cpp
r109627 r109834 93 93 neg32(regT0); 94 94 signExtend32ToPtr(regT0, regT0); 95 end.append(branchAddPtr(Zero, Imm32(1), regT0));95 end.append(branchAddPtr(Zero, TrustedImm32(1), regT0)); 96 96 // regT0: -argumentCount 97 97 … … 99 99 loadPtr(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2); 100 100 storePtr(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); 101 branchAddPtr(NonZero, Imm32(1), regT0).linkTo(copyLoop, this);101 branchAddPtr(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this); 102 102 103 103 end.append(jump()); -
trunk/Source/JavaScriptCore/jit/JITCall32_64.cpp
r105533 r109834 172 172 // Copy arguments. 173 173 neg32(regT2); 174 end.append(branchAdd32(Zero, Imm32(1), regT2));174 end.append(branchAdd32(Zero, TrustedImm32(1), regT2)); 175 175 // regT2: -argumentCount; 176 176 … … 180 180 store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); 181 181 store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); 182 branchAdd32(NonZero, Imm32(1), regT2).linkTo(copyLoop, this);182 branchAdd32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this); 183 183 184 184 end.append(jump()); -
trunk/Source/JavaScriptCore/jit/JITInlineMethods.h
r108934 r109834 270 270 storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount)); 271 271 #else 272 store32( Imm32(m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));272 store32(TrustedImm32(m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount)); 273 273 #endif 274 274 } … … 558 558 559 559 if (m_randomGenerator.getUint32() & 1) 560 add32( Imm32(1), bucketCounterRegister);560 add32(TrustedImm32(1), bucketCounterRegister); 561 561 else 562 562 add32(Imm32(3), bucketCounterRegister); -
trunk/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
r108112 r109834 818 818 if (supportsFloatingPoint()) { 819 819 // regT1 contains the tag from the hot path. 820 Jump notNumber = branch32(Above, regT1, Imm32(JSValue::LowestTag));820 Jump notNumber = branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)); 821 821 822 822 emitLoadDouble(cond, fpRegT0); … … 854 854 if (supportsFloatingPoint()) { 855 855 // regT1 contains the tag from the hot path. 856 Jump notNumber = branch32(Above, regT1, Imm32(JSValue::LowestTag));856 Jump notNumber = branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)); 857 857 858 858 emitLoadDouble(cond, fpRegT0); -
trunk/Source/JavaScriptCore/jit/JITStubCall.h
r105533 r109834 105 105 m_stackIndex += stackIndexStep; 106 106 } 107 108 void addArgument(JIT::Imm32 argument) 109 { 110 m_jit->poke(argument, m_stackIndex); 111 m_stackIndex += stackIndexStep; 112 } 107 113 108 114 void addArgument(JIT::TrustedImmPtr argument) -
trunk/Source/JavaScriptCore/yarr/YarrJIT.cpp
r108753 r109834 1749 1749 sub32(Imm32(alternative->m_minimumSize - 1), regT0); 1750 1750 else 1751 add32( Imm32(1), regT0);1751 add32(TrustedImm32(1), regT0); 1752 1752 store32(regT0, Address(output)); 1753 1753 } … … 1848 1848 // If the last alternative had the same minimum size as the disjunction, 1849 1849 // just simply increment input pos by 1, no adjustment based on minimum size. 1850 add32( Imm32(1), index);1850 add32(TrustedImm32(1), index); 1851 1851 } else { 1852 1852 // If the minumum for the last alternative was one greater than than that
Note: See TracChangeset
for help on using the changeset viewer.