Changeset 192035 in webkit
- Timestamp:
- Nov 4, 2015 2:15:00 PM (9 years ago)
- Location:
- trunk/Source/JavaScriptCore
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/ChangeLog
r192034 r192035 1 2015-11-03 Filip Pizlo <fpizlo@apple.com> 2 3 B3 should be able to compile a Check 4 https://bugs.webkit.org/show_bug.cgi?id=150878 5 6 Reviewed by Saam Barati. 7 8 The Check opcode in B3 is going to be our main OSR exit mechanism. It is a stackmap 9 value, so you can pass it any number of additional arguments, and you will get to find 10 out how those arguments are represented at the point that the value lands in the machine 11 code. Unlike a Patchpoint, a Check branches on a value, with the goal of supporting full 12 compare/branch fusion. The stackmap's generator runs in an out-of-line path to which 13 that branch is linked. 14 15 This change fills in the glue necessary to compile a Check and it includes a simple 16 test of this functionality. That test also happens to check that such simple code will 17 never use callee-saves, which I think is sensible. 18 19 * b3/B3LowerToAir.cpp: 20 (JSC::B3::Air::LowerToAir::append): 21 (JSC::B3::Air::LowerToAir::ensureSpecial): 22 (JSC::B3::Air::LowerToAir::fillStackmap): 23 (JSC::B3::Air::LowerToAir::tryStackSlot): 24 (JSC::B3::Air::LowerToAir::tryPatchpoint): 25 (JSC::B3::Air::LowerToAir::tryCheck): 26 (JSC::B3::Air::LowerToAir::tryUpsilon): 27 * b3/B3LoweringMatcher.patterns: 28 * b3/testb3.cpp: 29 (JSC::B3::testSimplePatchpoint): 30 (JSC::B3::testSimpleCheck): 31 (JSC::B3::run): 32 1 33 2015-10-30 Keith Miller <keith_miller@apple.com> 2 34 -
trunk/Source/JavaScriptCore/b3/B3LowerToAir.cpp
r192008 r192035 36 36 #include "B3ArgumentRegValue.h" 37 37 #include "B3BasicBlockInlines.h" 38 #include "B3CheckSpecial.h" 38 39 #include "B3Commutativity.h" 39 40 #include "B3IndexMap.h" … … 463 464 } 464 465 465 template<typename T> 466 void ensureSpecial(T*& field) 467 { 468 if (!field) 469 field = static_cast<T*>(code.addSpecial(std::make_unique<T>())); 470 } 471 472 IndexSet<Value> locked; // These are values that will have no Tmp in Air. 473 IndexMap<Value, Tmp> valueToTmp; // These are values that must have a Tmp in Air. We say that a Value* with a non-null Tmp is "pinned". 474 IndexMap<B3::BasicBlock, Air::BasicBlock*> blockToBlock; 475 HashMap<StackSlotValue*, Air::StackSlot*> stackToStack; 476 477 UseCounts useCounts; 478 479 Vector<Vector<Inst, 4>> insts; 480 Vector<Inst> prologue; 481 482 B3::BasicBlock* currentBlock; 483 unsigned currentIndex; 484 Value* currentValue; 485 486 PatchpointSpecial* patchpointSpecial { 0 }; 487 488 // The address selector will match any pattern where the input operands are available as Tmps. 489 // It doesn't care about sharing. It will happily emit the same address expression over and over 490 // again regardless of the expression's complexity. This works out fine, since at the machine 491 // level, address expressions are super cheap. However, this does have a hack to avoid 492 // "unhoisting" address expressions. 493 class AddressSelector { 494 public: 495 AddressSelector(LowerToAir& lower) 496 : lower(lower) 497 { 498 } 499 500 bool acceptRoot(Value* root) 501 { 502 this->root = root; 503 // We don't want to match an address expression that has already been computed 504 // explicitly. This mostly makes sense. Note that we never hoist basic address 505 // expressions like offset(base), because those are sunk into the MemoryValue. So, 506 // this is mainly just relevant to Index expressions and other more complicated 507 // things. It stands to reason that most of the time, we won't hoist an Index 508 // expression. And if we do, then probably it's a good thing to compute it outside 509 // a loop. That being said, this might turn into a problem. For example, it will 510 // require an extra register to be live. That's unlikely to be profitable. 511 // FIXME: Consider matching an address expression even if we've already assigned a 512 // Tmp to it. https://bugs.webkit.org/show_bug.cgi?id=150777 513 return !lower.valueToTmp[root]; 514 } 515 516 void acceptRootLate(Value*) { } 517 518 template<typename... Arguments> 519 bool acceptInternals(Value* value, Arguments... arguments) 520 { 521 if (lower.valueToTmp[value]) 522 return false; 523 return acceptInternals(arguments...); 524 } 525 bool acceptInternals() { return true; } 526 527 template<typename... Arguments> 528 void acceptInternalsLate(Arguments...) { } 529 530 template<typename... Arguments> 531 bool acceptOperands(Value* value, Arguments... arguments) 532 { 533 if (lower.locked.contains(value)) 534 return false; 535 return acceptOperands(arguments...); 536 } 537 bool acceptOperands() { return true; } 538 539 template<typename... Arguments> 540 void acceptOperandsLate(Arguments...) { } 541 542 bool tryAddShift1(Value* index, Value* logScale, Value* base) 543 { 544 if (logScale->asInt() < 0 || logScale->asInt() > 3) 545 return false; 546 selectedAddress = Arg::index( 547 lower.tmp(base), lower.tmp(index), 1 << logScale->asInt()); 548 return true; 549 } 550 551 bool tryAddShift2(Value* base, Value* index, Value* logScale) 552 { 553 return tryAddShift1(index, logScale, base); 554 } 555 556 bool tryAdd(Value* left, Value* right) 557 { 558 if (right->hasInt32()) { 559 // This production isn't strictly necessary since we expect 560 // Load(Add(@x, @const1), offset = const2) to have already been strength-reduced 561 // to Load(@x, offset = const1 + const2). 562 selectedAddress = Arg::addr(lower.tmp(left), right->asInt32()); 563 return true; 564 } 565 566 selectedAddress = Arg::index(lower.tmp(left), lower.tmp(right)); 567 return true; 568 } 569 570 bool tryFramePointer() 571 { 572 selectedAddress = Arg::addr(Tmp(GPRInfo::callFrameRegister)); 573 return true; 574 } 575 576 bool tryStackSlot() 577 { 578 selectedAddress = Arg::stack(lower.stackToStack.get(root->as<StackSlotValue>())); 579 return true; 580 } 581 582 bool tryDirect() 583 { 584 selectedAddress = Arg::addr(lower.tmp(root)); 585 return true; 586 } 587 588 LowerToAir& lower; 589 Value* root; 590 Arg selectedAddress; 591 }; 592 AddressSelector addressSelector; 593 594 // Below is the code for a lowering selector, so that we can pass *this to runLoweringMatcher. 595 // This will match complex multi-value expressions, but only if there is no sharing. For example, 596 // it won't match a Load twice and cause the generated code to do two loads when the original 597 // code just did one. 598 599 bool acceptRoot(Value* root) 600 { 601 ASSERT_UNUSED(root, !locked.contains(root)); 602 return true; 603 } 604 605 void acceptRootLate(Value*) { } 606 607 template<typename... Arguments> 608 bool acceptInternals(Value* value, Arguments... arguments) 609 { 610 if (!canBeInternal(value)) 611 return false; 612 613 return acceptInternals(arguments...); 614 } 615 bool acceptInternals() { return true; } 616 617 template<typename... Arguments> 618 void acceptInternalsLate(Value* value, Arguments... arguments) 619 { 620 commitInternal(value); 621 acceptInternalsLate(arguments...); 622 } 623 void acceptInternalsLate() { } 624 625 template<typename... Arguments> 626 bool acceptOperands(Value* value, Arguments... arguments) 627 { 628 if (locked.contains(value)) 629 return false; 630 return acceptOperands(arguments...); 631 } 632 bool acceptOperands() { return true; } 633 634 template<typename... Arguments> 635 void acceptOperandsLate(Arguments...) { } 636 637 bool tryLoad(Value* address) 638 { 639 append( 640 moveForType(currentValue->type()), 641 effectiveAddr(address, currentValue), tmp(currentValue)); 642 return true; 643 } 644 645 bool tryAdd(Value* left, Value* right) 646 { 647 switch (left->type()) { 648 case Int32: 649 appendBinOp<Add32, Commutative>(left, right); 650 return true; 651 case Int64: 652 appendBinOp<Add64, Commutative>(left, right); 653 return true; 654 default: 655 // FIXME: Implement more types! 656 return false; 657 } 658 } 659 660 bool trySub(Value* left, Value* right) 661 { 662 switch (left->type()) { 663 case Int32: 664 if (left->isInt32(0)) 665 appendUnOp<Neg32>(right); 666 else 667 appendBinOp<Sub32>(left, right); 668 return true; 669 case Int64: 670 if (left->isInt64(0)) 671 appendUnOp<Neg64>(right); 672 else 673 appendBinOp<Sub64>(left, right); 674 return true; 675 default: 676 // FIXME: Implement more types! 677 return false; 678 } 679 } 680 681 bool tryAnd(Value* left, Value* right) 682 { 683 switch (left->type()) { 684 case Int32: 685 appendBinOp<And32, Commutative>(left, right); 686 return true; 687 case Int64: 688 appendBinOp<And64, Commutative>(left, right); 689 return true; 690 default: 691 // FIXME: Implement more types! 692 return false; 693 } 694 } 695 696 bool tryOr(Value* left, Value* right) 697 { 698 switch (left->type()) { 699 case Int32: 700 appendBinOp<Or32, Commutative>(left, right); 701 return true; 702 case Int64: 703 appendBinOp<Or64, Commutative>(left, right); 704 return true; 705 default: 706 // FIXME: Implement more types! 707 return false; 708 } 709 } 710 711 bool tryXor(Value* left, Value* right) 712 { 713 switch (left->type()) { 714 case Int32: 715 appendBinOp<Xor32, Commutative>(left, right); 716 return true; 717 case Int64: 718 appendBinOp<Xor64, Commutative>(left, right); 719 return true; 720 default: 721 // FIXME: Implement more types! 722 return false; 723 } 724 } 725 726 bool tryShl(Value* value, Value* amount) 727 { 728 Air::Opcode opcode = value->type() == Int32 ? Lshift32 : Lshift64; 729 730 if (imm(amount)) { 731 append(Move, tmp(value), tmp(currentValue)); 732 append(opcode, imm(amount), tmp(currentValue)); 733 return true; 734 } 735 736 append(Move, tmp(value), tmp(currentValue)); 737 append(Move, tmp(amount), Tmp(X86Registers::ecx)); 738 append(opcode, Tmp(X86Registers::ecx), tmp(currentValue)); 739 return true; 740 } 741 742 bool tryStoreAddLoad(Value* left, Value* right, Value*) 743 { 744 switch (left->type()) { 745 case Int32: 746 return tryAppendStoreBinOp<Add32, Commutative>(left, right); 747 default: 748 // FIXME: Implement more types! 749 return false; 750 } 751 } 752 753 bool tryStoreSubLoad(Value* left, Value* right, Value*) 754 { 755 switch (left->type()) { 756 case Int32: 757 if (left->isInt32(0)) 758 return tryAppendStoreUnOp<Neg32>(right); 759 return tryAppendStoreBinOp<Sub32, NotCommutative>(left, right); 760 default: 761 // FIXME: Implement more types! 762 return false; 763 } 764 } 765 766 bool tryStoreAndLoad(Value* left, Value* right, Value*) 767 { 768 switch (left->type()) { 769 case Int32: 770 return tryAppendStoreBinOp<And32, Commutative>(left, right); 771 default: 772 // FIXME: Implement more types! 773 return false; 774 } 775 } 776 777 bool tryStore(Value* value, Value* address) 778 { 779 appendStore(value, effectiveAddr(address, currentValue)); 780 return true; 781 } 782 783 bool tryTrunc(Value* value) 784 { 785 ASSERT_UNUSED(value, tmp(value) == tmp(currentValue)); 786 return true; 787 } 788 789 bool tryZExt32(Value* value) 790 { 791 if (highBitsAreZero(value)) { 792 ASSERT(tmp(value) == tmp(currentValue)); 793 return true; 794 } 795 796 append(Move32, tmp(value), tmp(currentValue)); 797 return true; 798 } 799 800 bool tryArgumentReg() 801 { 802 prologue.append(Inst( 803 moveForType(currentValue->type()), currentValue, 804 Tmp(currentValue->as<ArgumentRegValue>()->argumentReg()), 805 tmp(currentValue))); 806 return true; 807 } 808 809 bool tryConst32() 810 { 811 append(Move, imm(currentValue), tmp(currentValue)); 812 return true; 813 } 814 815 bool tryConst64() 816 { 817 if (imm(currentValue)) { 818 append(Move, imm(currentValue), tmp(currentValue)); 819 return true; 820 } 821 append(Move, Arg::imm64(currentValue->asInt64()), tmp(currentValue)); 822 return true; 823 } 824 825 bool tryFramePointer() 826 { 827 append(Move, Tmp(GPRInfo::callFrameRegister), tmp(currentValue)); 828 return true; 829 } 830 831 bool tryStackSlot() 832 { 833 append( 834 Lea, 835 Arg::stack(stackToStack.get(currentValue->as<StackSlotValue>())), 836 tmp(currentValue)); 837 return true; 838 } 839 840 bool tryPatchpoint() 841 { 842 PatchpointValue* patchpointValue = currentValue->as<PatchpointValue>(); 843 ensureSpecial(patchpointSpecial); 844 845 Inst inst(Patch, patchpointValue, Arg::special(patchpointSpecial)); 846 847 if (patchpointValue->type() != Void) 848 inst.args.append(tmp(patchpointValue)); 849 850 for (ConstrainedValue value : patchpointValue->constrainedChildren()) { 466 template<typename T, typename... Arguments> 467 T* ensureSpecial(T*& field, Arguments&&... arguments) 468 { 469 if (!field) { 470 field = static_cast<T*>( 471 code.addSpecial(std::make_unique<T>(std::forward<Arguments>(arguments)...))); 472 } 473 return field; 474 } 475 476 void fillStackmap(Inst& inst, StackmapValue* stackmap, unsigned numSkipped) 477 { 478 for (unsigned i = numSkipped; i < stackmap->numChildren(); ++i) { 479 ConstrainedValue value = stackmap->constrainedChild(i); 480 851 481 Arg arg; 852 482 switch (value.rep().kind()) { … … 871 501 inst.args.append(arg); 872 502 } 503 } 504 505 IndexSet<Value> locked; // These are values that will have no Tmp in Air. 506 IndexMap<Value, Tmp> valueToTmp; // These are values that must have a Tmp in Air. We say that a Value* with a non-null Tmp is "pinned". 507 IndexMap<B3::BasicBlock, Air::BasicBlock*> blockToBlock; 508 HashMap<StackSlotValue*, Air::StackSlot*> stackToStack; 509 510 UseCounts useCounts; 511 512 Vector<Vector<Inst, 4>> insts; 513 Vector<Inst> prologue; 514 515 B3::BasicBlock* currentBlock; 516 unsigned currentIndex; 517 Value* currentValue; 518 519 // The address selector will match any pattern where the input operands are available as Tmps. 520 // It doesn't care about sharing. It will happily emit the same address expression over and over 521 // again regardless of the expression's complexity. This works out fine, since at the machine 522 // level, address expressions are super cheap. However, this does have a hack to avoid 523 // "unhoisting" address expressions. 524 class AddressSelector { 525 public: 526 AddressSelector(LowerToAir& lower) 527 : lower(lower) 528 { 529 } 530 531 bool acceptRoot(Value* root) 532 { 533 this->root = root; 534 // We don't want to match an address expression that has already been computed 535 // explicitly. This mostly makes sense. Note that we never hoist basic address 536 // expressions like offset(base), because those are sunk into the MemoryValue. So, 537 // this is mainly just relevant to Index expressions and other more complicated 538 // things. It stands to reason that most of the time, we won't hoist an Index 539 // expression. And if we do, then probably it's a good thing to compute it outside 540 // a loop. That being said, this might turn into a problem. For example, it will 541 // require an extra register to be live. That's unlikely to be profitable. 542 // FIXME: Consider matching an address expression even if we've already assigned a 543 // Tmp to it. https://bugs.webkit.org/show_bug.cgi?id=150777 544 return !lower.valueToTmp[root]; 545 } 546 547 void acceptRootLate(Value*) { } 548 549 template<typename... Arguments> 550 bool acceptInternals(Value* value, Arguments... arguments) 551 { 552 if (lower.valueToTmp[value]) 553 return false; 554 return acceptInternals(arguments...); 555 } 556 bool acceptInternals() { return true; } 557 558 template<typename... Arguments> 559 void acceptInternalsLate(Arguments...) { } 560 561 template<typename... Arguments> 562 bool acceptOperands(Value* value, Arguments... arguments) 563 { 564 if (lower.locked.contains(value)) 565 return false; 566 return acceptOperands(arguments...); 567 } 568 bool acceptOperands() { return true; } 569 570 template<typename... Arguments> 571 void acceptOperandsLate(Arguments...) { } 572 573 bool tryAddShift1(Value* index, Value* logScale, Value* base) 574 { 575 if (logScale->asInt() < 0 || logScale->asInt() > 3) 576 return false; 577 selectedAddress = Arg::index( 578 lower.tmp(base), lower.tmp(index), 1 << logScale->asInt()); 579 return true; 580 } 581 582 bool tryAddShift2(Value* base, Value* index, Value* logScale) 583 { 584 return tryAddShift1(index, logScale, base); 585 } 586 587 bool tryAdd(Value* left, Value* right) 588 { 589 if (right->hasInt32()) { 590 // This production isn't strictly necessary since we expect 591 // Load(Add(@x, @const1), offset = const2) to have already been strength-reduced 592 // to Load(@x, offset = const1 + const2). 593 selectedAddress = Arg::addr(lower.tmp(left), right->asInt32()); 594 return true; 595 } 596 597 selectedAddress = Arg::index(lower.tmp(left), lower.tmp(right)); 598 return true; 599 } 600 601 bool tryFramePointer() 602 { 603 selectedAddress = Arg::addr(Tmp(GPRInfo::callFrameRegister)); 604 return true; 605 } 606 607 bool tryStackSlot() 608 { 609 selectedAddress = Arg::stack(lower.stackToStack.get(root->as<StackSlotValue>())); 610 return true; 611 } 612 613 bool tryDirect() 614 { 615 selectedAddress = Arg::addr(lower.tmp(root)); 616 return true; 617 } 873 618 619 LowerToAir& lower; 620 Value* root; 621 Arg selectedAddress; 622 }; 623 AddressSelector addressSelector; 624 625 // Below is the code for a lowering selector, so that we can pass *this to runLoweringMatcher. 626 // This will match complex multi-value expressions, but only if there is no sharing. For example, 627 // it won't match a Load twice and cause the generated code to do two loads when the original 628 // code just did one. 629 630 bool acceptRoot(Value* root) 631 { 632 ASSERT_UNUSED(root, !locked.contains(root)); 633 return true; 634 } 635 636 void acceptRootLate(Value*) { } 637 638 template<typename... Arguments> 639 bool acceptInternals(Value* value, Arguments... arguments) 640 { 641 if (!canBeInternal(value)) 642 return false; 643 644 return acceptInternals(arguments...); 645 } 646 bool acceptInternals() { return true; } 647 648 template<typename... Arguments> 649 void acceptInternalsLate(Value* value, Arguments... arguments) 650 { 651 commitInternal(value); 652 acceptInternalsLate(arguments...); 653 } 654 void acceptInternalsLate() { } 655 656 template<typename... Arguments> 657 bool acceptOperands(Value* value, Arguments... arguments) 658 { 659 if (locked.contains(value)) 660 return false; 661 return acceptOperands(arguments...); 662 } 663 bool acceptOperands() { return true; } 664 665 template<typename... Arguments> 666 void acceptOperandsLate(Arguments...) { } 667 668 bool tryLoad(Value* address) 669 { 670 append( 671 moveForType(currentValue->type()), 672 effectiveAddr(address, currentValue), tmp(currentValue)); 673 return true; 674 } 675 676 bool tryAdd(Value* left, Value* right) 677 { 678 switch (left->type()) { 679 case Int32: 680 appendBinOp<Add32, Commutative>(left, right); 681 return true; 682 case Int64: 683 appendBinOp<Add64, Commutative>(left, right); 684 return true; 685 default: 686 // FIXME: Implement more types! 687 return false; 688 } 689 } 690 691 bool trySub(Value* left, Value* right) 692 { 693 switch (left->type()) { 694 case Int32: 695 if (left->isInt32(0)) 696 appendUnOp<Neg32>(right); 697 else 698 appendBinOp<Sub32>(left, right); 699 return true; 700 case Int64: 701 if (left->isInt64(0)) 702 appendUnOp<Neg64>(right); 703 else 704 appendBinOp<Sub64>(left, right); 705 return true; 706 default: 707 // FIXME: Implement more types! 708 return false; 709 } 710 } 711 712 bool tryAnd(Value* left, Value* right) 713 { 714 switch (left->type()) { 715 case Int32: 716 appendBinOp<And32, Commutative>(left, right); 717 return true; 718 case Int64: 719 appendBinOp<And64, Commutative>(left, right); 720 return true; 721 default: 722 // FIXME: Implement more types! 723 return false; 724 } 725 } 726 727 bool tryOr(Value* left, Value* right) 728 { 729 switch (left->type()) { 730 case Int32: 731 appendBinOp<Or32, Commutative>(left, right); 732 return true; 733 case Int64: 734 appendBinOp<Or64, Commutative>(left, right); 735 return true; 736 default: 737 // FIXME: Implement more types! 738 return false; 739 } 740 } 741 742 bool tryXor(Value* left, Value* right) 743 { 744 switch (left->type()) { 745 case Int32: 746 appendBinOp<Xor32, Commutative>(left, right); 747 return true; 748 case Int64: 749 appendBinOp<Xor64, Commutative>(left, right); 750 return true; 751 default: 752 // FIXME: Implement more types! 753 return false; 754 } 755 } 756 757 bool tryShl(Value* value, Value* amount) 758 { 759 Air::Opcode opcode = value->type() == Int32 ? Lshift32 : Lshift64; 760 761 if (imm(amount)) { 762 append(Move, tmp(value), tmp(currentValue)); 763 append(opcode, imm(amount), tmp(currentValue)); 764 return true; 765 } 766 767 append(Move, tmp(value), tmp(currentValue)); 768 append(Move, tmp(amount), Tmp(X86Registers::ecx)); 769 append(opcode, Tmp(X86Registers::ecx), tmp(currentValue)); 770 return true; 771 } 772 773 bool tryStoreAddLoad(Value* left, Value* right, Value*) 774 { 775 switch (left->type()) { 776 case Int32: 777 return tryAppendStoreBinOp<Add32, Commutative>(left, right); 778 default: 779 // FIXME: Implement more types! 780 return false; 781 } 782 } 783 784 bool tryStoreSubLoad(Value* left, Value* right, Value*) 785 { 786 switch (left->type()) { 787 case Int32: 788 if (left->isInt32(0)) 789 return tryAppendStoreUnOp<Neg32>(right); 790 return tryAppendStoreBinOp<Sub32, NotCommutative>(left, right); 791 default: 792 // FIXME: Implement more types! 793 return false; 794 } 795 } 796 797 bool tryStoreAndLoad(Value* left, Value* right, Value*) 798 { 799 switch (left->type()) { 800 case Int32: 801 return tryAppendStoreBinOp<And32, Commutative>(left, right); 802 default: 803 // FIXME: Implement more types! 804 return false; 805 } 806 } 807 808 bool tryStore(Value* value, Value* address) 809 { 810 appendStore(value, effectiveAddr(address, currentValue)); 811 return true; 812 } 813 814 bool tryTrunc(Value* value) 815 { 816 ASSERT_UNUSED(value, tmp(value) == tmp(currentValue)); 817 return true; 818 } 819 820 bool tryZExt32(Value* value) 821 { 822 if (highBitsAreZero(value)) { 823 ASSERT(tmp(value) == tmp(currentValue)); 824 return true; 825 } 826 827 append(Move32, tmp(value), tmp(currentValue)); 828 return true; 829 } 830 831 bool tryArgumentReg() 832 { 833 prologue.append(Inst( 834 moveForType(currentValue->type()), currentValue, 835 Tmp(currentValue->as<ArgumentRegValue>()->argumentReg()), 836 tmp(currentValue))); 837 return true; 838 } 839 840 bool tryConst32() 841 { 842 append(Move, imm(currentValue), tmp(currentValue)); 843 return true; 844 } 845 846 bool tryConst64() 847 { 848 if (imm(currentValue)) { 849 append(Move, imm(currentValue), tmp(currentValue)); 850 return true; 851 } 852 append(Move, Arg::imm64(currentValue->asInt64()), tmp(currentValue)); 853 return true; 854 } 855 856 bool tryFramePointer() 857 { 858 append(Move, Tmp(GPRInfo::callFrameRegister), tmp(currentValue)); 859 return true; 860 } 861 862 bool tryStackSlot() 863 { 864 append( 865 Lea, 866 Arg::stack(stackToStack.get(currentValue->as<StackSlotValue>())), 867 tmp(currentValue)); 868 return true; 869 } 870 871 PatchpointSpecial* patchpointSpecial { nullptr }; 872 bool tryPatchpoint() 873 { 874 PatchpointValue* patchpointValue = currentValue->as<PatchpointValue>(); 875 ensureSpecial(patchpointSpecial); 876 877 Inst inst(Patch, patchpointValue, Arg::special(patchpointSpecial)); 878 879 if (patchpointValue->type() != Void) 880 inst.args.append(tmp(patchpointValue)); 881 882 fillStackmap(inst, patchpointValue, 0); 883 884 insts.last().append(WTF::move(inst)); 885 return true; 886 } 887 888 CheckSpecial* checkBranchTest32Special { nullptr }; 889 CheckSpecial* checkBranchTest64Special { nullptr }; 890 bool tryCheck(Value* value) 891 { 892 if (!isInt(value->type())) { 893 // FIXME: Implement double branches. 894 // https://bugs.webkit.org/show_bug.cgi?id=150727 895 return false; 896 } 897 898 CheckSpecial* special; 899 switch (value->type()) { 900 case Int32: 901 special = ensureSpecial(checkBranchTest32Special, BranchTest32, 3); 902 break; 903 case Int64: 904 special = ensureSpecial(checkBranchTest64Special, BranchTest64, 3); 905 break; 906 default: 907 RELEASE_ASSERT_NOT_REACHED(); 908 break; 909 } 910 911 CheckValue* checkValue = currentValue->as<CheckValue>(); 912 913 Inst inst( 914 Patch, checkValue, Arg::special(special), 915 Arg::resCond(MacroAssembler::NonZero), tmp(value), Arg::imm(-1)); 916 917 fillStackmap(inst, checkValue, 1); 918 874 919 insts.last().append(WTF::move(inst)); 875 920 return true; -
trunk/Source/JavaScriptCore/b3/B3LoweringMatcher.patterns
r192008 r192035 57 57 58 58 Patchpoint = Patchpoint() 59 Check = Check(value) 59 60 60 61 Upsilon = Upsilon(value) -
trunk/Source/JavaScriptCore/b3/testb3.cpp
r192008 r192035 1995 1995 1996 1996 CHECK(compileAndRun<int>(proc, 1, 2) == 3); 1997 } 1998 1999 void testSimpleCheck() 2000 { 2001 Procedure proc; 2002 BasicBlock* root = proc.addBlock(); 2003 Value* arg = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0); 2004 CheckValue* check = root->appendNew<CheckValue>(proc, Check, Origin(), arg); 2005 check->setGenerator( 2006 [&] (CCallHelpers& jit, const StackmapGenerationParams& params) { 2007 CHECK(params.reps.size() == 1); 2008 CHECK(params.reps[0].isConstant()); 2009 CHECK(params.reps[0].value() == 1); 2010 2011 // This should always work because a function this simple should never have callee 2012 // saves. 2013 jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR); 2014 jit.emitFunctionEpilogue(); 2015 jit.ret(); 2016 }); 2017 root->appendNew<ControlValue>( 2018 proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0)); 2019 2020 MacroAssemblerCodeRef code = compile(proc); 2021 2022 CHECK(invoke<int>(code, 0) == 0); 2023 CHECK(invoke<int>(code, 1) == 42); 1997 2024 } 1998 2025 … … 2272 2299 2273 2300 RUN(testSimplePatchpoint()); 2301 RUN(testSimpleCheck()); 2274 2302 2275 2303 if (!didRun)
Note: See TracChangeset
for help on using the changeset viewer.