@@ -1827,7 +1827,7 @@ const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType
1827
1827
}
1828
1828
break;
1829
1829
case Op_MaskAll:
1830
- if (!is_LP64 || ! VM_Version::supports_evex()) {
1830
+ if (!VM_Version::supports_evex()) {
1831
1831
return false;
1832
1832
}
1833
1833
if ((vlen > 16 || is_subword_type(bt)) && !VM_Version::supports_avx512bw()) {
@@ -9460,64 +9460,18 @@ instruct evcmp_masked(kReg dst, vec src1, vec src2, immI8 cond, kReg mask, rRegP
9460
9460
ins_pipe( pipe_slow );
9461
9461
%}
9462
9462
9463
- #ifdef _LP64
9464
- instruct mask_all_evexI_imm(kReg dst, immI cnt, rRegL tmp) %{
9465
- match(Set dst (MaskAll cnt));
9466
- effect(TEMP_DEF dst, TEMP tmp);
9467
- format %{ "mask_all_evexI $dst, $cnt \t! using $tmp as TEMP" %}
9468
- ins_encode %{
9469
- int vec_len = Matcher::vector_length(this);
9470
- if (VM_Version::supports_avx512bw()) {
9471
- __ movq($tmp$$Register, $cnt$$constant);
9472
- __ kmovql($dst$$KRegister, $tmp$$Register);
9473
- __ kshiftrql($dst$$KRegister, $dst$$KRegister, 64 - vec_len);
9474
- } else {
9475
- assert(vec_len <= 16, "");
9476
- __ movq($tmp$$Register, $cnt$$constant);
9477
- __ kmovwl($dst$$KRegister, $tmp$$Register);
9478
- __ kshiftrwl($dst$$KRegister, $dst$$KRegister, 16 - vec_len);
9479
- }
9480
- %}
9481
- ins_pipe( pipe_slow );
9482
- %}
9483
-
9484
- instruct mask_all_evexI(kReg dst, rRegI src, rRegL tmp) %{
9463
+ instruct mask_all_evexI_LE32(kReg dst, rRegI src) %{
9464
+ predicate(Matcher::vector_length(n) <= 32);
9485
9465
match(Set dst (MaskAll src));
9486
- effect(TEMP_DEF dst, TEMP tmp);
9487
- format %{ "mask_all_evexI $dst, $src \t! using $tmp as TEMP" %}
9466
+ format %{ "mask_all_evexI_LE32 $dst, $src \t" %}
9488
9467
ins_encode %{
9489
- int vec_len = Matcher::vector_length(this);
9490
- if (VM_Version::supports_avx512bw()) {
9491
- __ movslq($tmp$$Register, $src$$Register);
9492
- __ kmovql($dst$$KRegister, $tmp$$Register);
9493
- __ kshiftrql($dst$$KRegister, $dst$$KRegister, 64 - vec_len);
9494
- } else {
9495
- assert(vec_len <= 16, "");
9496
- __ kmovwl($dst$$KRegister, $src$$Register);
9497
- __ kshiftrwl($dst$$KRegister, $dst$$KRegister, 16 - vec_len);
9498
- }
9499
- %}
9500
- ins_pipe( pipe_slow );
9501
- %}
9502
-
9503
- instruct mask_all_evexL(kReg dst, rRegL src) %{
9504
- match(Set dst (MaskAll src));
9505
- effect(TEMP_DEF dst);
9506
- format %{ "mask_all_evexL $dst, $src \t! mask all operation" %}
9507
- ins_encode %{
9508
- int vec_len = Matcher::vector_length(this);
9509
- if (VM_Version::supports_avx512bw()) {
9510
- __ kmovql($dst$$KRegister, $src$$Register);
9511
- __ kshiftrql($dst$$KRegister, $dst$$KRegister, 64 - vec_len);
9512
- } else {
9513
- assert(vec_len <= 16, "");
9514
- __ kmovwl($dst$$KRegister, $src$$Register);
9515
- __ kshiftrwl($dst$$KRegister, $dst$$KRegister, 16 - vec_len);
9516
- }
9468
+ int mask_len = Matcher::vector_length(this);
9469
+ __ vector_maskall_operation($dst$$KRegister, $src$$Register, mask_len);
9517
9470
%}
9518
9471
ins_pipe( pipe_slow );
9519
9472
%}
9520
9473
9474
+ #ifdef _LP64
9521
9475
instruct mask_not_immLT8(kReg dst, kReg src, rRegI rtmp, kReg ktmp, immI_M1 cnt) %{
9522
9476
predicate(Matcher::vector_length(n) < 8 && VM_Version::supports_avx512dq());
9523
9477
match(Set dst (XorVMask src (MaskAll cnt)));
0 commit comments