@@ -1827,7 +1827,7 @@ const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType
1827
1827
}
1828
1828
break;
1829
1829
case Op_MaskAll:
1830
- if (!is_LP64 || ! VM_Version::supports_evex()) {
1830
+ if (!VM_Version::supports_evex()) {
1831
1831
return false;
1832
1832
}
1833
1833
if ((vlen > 16 || is_subword_type(bt)) && !VM_Version::supports_avx512bw()) {
@@ -9452,64 +9452,18 @@ instruct evcmp_masked(kReg dst, vec src1, vec src2, immI8 cond, kReg mask, rRegP
9452
9452
ins_pipe( pipe_slow );
9453
9453
%}
9454
9454
9455
- #ifdef _LP64
9456
- instruct mask_all_evexI_imm(kReg dst, immI cnt, rRegL tmp) %{
9457
- match(Set dst (MaskAll cnt));
9458
- effect(TEMP_DEF dst, TEMP tmp);
9459
- format %{ "mask_all_evexI $dst, $cnt \t! using $tmp as TEMP" %}
9460
- ins_encode %{
9461
- int vec_len = Matcher::vector_length(this);
9462
- if (VM_Version::supports_avx512bw()) {
9463
- __ movq($tmp$$Register, $cnt$$constant);
9464
- __ kmovql($dst$$KRegister, $tmp$$Register);
9465
- __ kshiftrql($dst$$KRegister, $dst$$KRegister, 64 - vec_len);
9466
- } else {
9467
- assert(vec_len <= 16, "");
9468
- __ movq($tmp$$Register, $cnt$$constant);
9469
- __ kmovwl($dst$$KRegister, $tmp$$Register);
9470
- __ kshiftrwl($dst$$KRegister, $dst$$KRegister, 16 - vec_len);
9471
- }
9472
- %}
9473
- ins_pipe( pipe_slow );
9474
- %}
9475
-
9476
- instruct mask_all_evexI(kReg dst, rRegI src, rRegL tmp) %{
9477
- match(Set dst (MaskAll src));
9478
- effect(TEMP_DEF dst, TEMP tmp);
9479
- format %{ "mask_all_evexI $dst, $src \t! using $tmp as TEMP" %}
9480
- ins_encode %{
9481
- int vec_len = Matcher::vector_length(this);
9482
- if (VM_Version::supports_avx512bw()) {
9483
- __ movslq($tmp$$Register, $src$$Register);
9484
- __ kmovql($dst$$KRegister, $tmp$$Register);
9485
- __ kshiftrql($dst$$KRegister, $dst$$KRegister, 64 - vec_len);
9486
- } else {
9487
- assert(vec_len <= 16, "");
9488
- __ kmovwl($dst$$KRegister, $src$$Register);
9489
- __ kshiftrwl($dst$$KRegister, $dst$$KRegister, 16 - vec_len);
9490
- }
9491
- %}
9492
- ins_pipe( pipe_slow );
9493
- %}
9494
-
9495
- instruct mask_all_evexL(kReg dst, rRegL src) %{
9455
+ instruct mask_all_evexI_LE32(kReg dst, rRegI src) %{
9456
+ predicate(Matcher::vector_length(n) <= 32);
9496
9457
match(Set dst (MaskAll src));
9497
- effect(TEMP_DEF dst);
9498
- format %{ "mask_all_evexL $dst, $src \t! mask all operation" %}
9458
+ format %{ "mask_all_evexI_LE32 $dst, $src \t" %}
9499
9459
ins_encode %{
9500
- int vec_len = Matcher::vector_length(this);
9501
- if (VM_Version::supports_avx512bw()) {
9502
- __ kmovql($dst$$KRegister, $src$$Register);
9503
- __ kshiftrql($dst$$KRegister, $dst$$KRegister, 64 - vec_len);
9504
- } else {
9505
- assert(vec_len <= 16, "");
9506
- __ kmovwl($dst$$KRegister, $src$$Register);
9507
- __ kshiftrwl($dst$$KRegister, $dst$$KRegister, 16 - vec_len);
9508
- }
9460
+ int mask_len = Matcher::vector_length(this);
9461
+ __ vector_maskall_operation($dst$$KRegister, $src$$Register, mask_len);
9509
9462
%}
9510
9463
ins_pipe( pipe_slow );
9511
9464
%}
9512
9465
9466
+ #ifdef _LP64
9513
9467
instruct mask_not_immLT8(kReg dst, kReg src, rRegI rtmp, kReg ktmp, immI_M1 cnt) %{
9514
9468
predicate(Matcher::vector_length(n) < 8 && VM_Version::supports_avx512dq());
9515
9469
match(Set dst (XorVMask src (MaskAll cnt)));
0 commit comments