2021-10-02 01:38:39 +03:00
|
|
|
// This file is a fragment of the yjit.o compilation unit. See yjit.c.
|
|
|
|
//
|
|
|
|
// Note that the definition for some of these functions don't specify
|
|
|
|
// static inline, but their declaration in yjit_asm.h do. The resulting
|
|
|
|
// linkage is the same as if they both specify. The relevant sections in
|
|
|
|
// N1256 is 6.2.2p4, 6.2.2p5, and 6.7.4p5.
|
2020-09-04 22:56:00 +03:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
2021-04-06 18:24:58 +03:00
|
|
|
#include <string.h>
|
2020-09-04 22:56:00 +03:00
|
|
|
#include <stdarg.h>
|
2021-04-27 18:37:06 +03:00
|
|
|
#include <stdint.h>
|
2020-09-04 22:56:00 +03:00
|
|
|
#include <assert.h>
|
2021-09-14 11:31:34 +03:00
|
|
|
#include <errno.h>
|
2020-09-04 22:56:00 +03:00
|
|
|
|
2021-04-27 18:37:06 +03:00
|
|
|
// For mmapp(), sysconf()
|
2020-10-05 14:41:46 +03:00
|
|
|
#ifndef _WIN32
|
2021-04-27 18:37:06 +03:00
|
|
|
#include <unistd.h>
|
2020-09-04 22:56:00 +03:00
|
|
|
#include <sys/mman.h>
|
2020-10-05 14:41:46 +03:00
|
|
|
#endif
|
2020-09-04 22:56:00 +03:00
|
|
|
|
2021-03-07 02:46:56 +03:00
|
|
|
#include "yjit_asm.h"
|
2020-09-04 22:56:00 +03:00
|
|
|
|
2020-09-09 23:45:28 +03:00
|
|
|
// Compute the number of bits needed to encode a signed value
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t sig_imm_size(int64_t imm)
|
2020-09-09 23:45:28 +03:00
|
|
|
{
|
|
|
|
// Compute the smallest size this immediate fits in
|
2021-04-27 18:37:06 +03:00
|
|
|
if (imm >= INT8_MIN && imm <= INT8_MAX)
|
2020-09-09 23:45:28 +03:00
|
|
|
return 8;
|
2021-04-27 18:37:06 +03:00
|
|
|
if (imm >= INT16_MIN && imm <= INT16_MAX)
|
2020-09-09 23:45:28 +03:00
|
|
|
return 16;
|
2021-04-27 18:37:06 +03:00
|
|
|
if (imm >= INT32_MIN && imm <= INT32_MAX)
|
2020-09-09 23:45:28 +03:00
|
|
|
return 32;
|
|
|
|
|
|
|
|
return 64;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the number of bits needed to encode an unsigned value
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t unsig_imm_size(uint64_t imm)
|
2020-09-09 23:45:28 +03:00
|
|
|
{
|
|
|
|
// Compute the smallest size this immediate fits in
|
2021-04-27 18:37:06 +03:00
|
|
|
if (imm <= UINT8_MAX)
|
2020-09-09 23:45:28 +03:00
|
|
|
return 8;
|
2021-04-27 18:37:06 +03:00
|
|
|
else if (imm <= UINT16_MAX)
|
2020-09-09 23:45:28 +03:00
|
|
|
return 16;
|
2021-04-27 18:37:06 +03:00
|
|
|
else if (imm <= UINT32_MAX)
|
2020-09-09 23:45:28 +03:00
|
|
|
return 32;
|
|
|
|
|
|
|
|
return 64;
|
|
|
|
}
|
|
|
|
|
2021-01-12 22:56:43 +03:00
|
|
|
x86opnd_t mem_opnd(uint32_t num_bits, x86opnd_t base_reg, int32_t disp)
|
2020-09-09 23:45:28 +03:00
|
|
|
{
|
2020-10-06 00:11:50 +03:00
|
|
|
bool is_iprel = base_reg.as.reg.reg_type == REG_IP;
|
2020-09-15 22:12:31 +03:00
|
|
|
|
2020-09-09 23:45:28 +03:00
|
|
|
x86opnd_t opnd = {
|
|
|
|
OPND_MEM,
|
|
|
|
num_bits,
|
2020-10-06 00:11:50 +03:00
|
|
|
.as.mem = { base_reg.as.reg.reg_no, 0, 0, false, is_iprel, disp }
|
2020-09-09 23:45:28 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
return opnd;
|
|
|
|
}
|
|
|
|
|
2021-01-28 00:13:27 +03:00
|
|
|
x86opnd_t mem_opnd_sib(uint32_t num_bits, x86opnd_t base_reg, x86opnd_t index_reg, int32_t scale, int32_t disp)
|
|
|
|
{
|
|
|
|
uint8_t scale_exp;
|
|
|
|
switch (scale) {
|
2021-09-29 22:38:57 +03:00
|
|
|
case 8:
|
2021-01-28 00:13:27 +03:00
|
|
|
scale_exp = 3;
|
|
|
|
break;
|
2021-09-29 22:38:57 +03:00
|
|
|
case 4:
|
2021-01-28 00:13:27 +03:00
|
|
|
scale_exp = 2;
|
|
|
|
break;
|
2021-09-29 22:38:57 +03:00
|
|
|
case 2:
|
2021-01-28 00:13:27 +03:00
|
|
|
scale_exp = 1;
|
|
|
|
break;
|
2021-09-29 22:38:57 +03:00
|
|
|
case 1:
|
2021-01-28 00:13:27 +03:00
|
|
|
scale_exp = 0;
|
|
|
|
break;
|
2021-09-29 22:38:57 +03:00
|
|
|
default:
|
2021-10-19 23:43:20 +03:00
|
|
|
rb_bug("yjit: scale not one of 1,2,4,8");
|
2021-01-28 00:13:27 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool is_iprel = base_reg.as.reg.reg_type == REG_IP;
|
|
|
|
|
|
|
|
x86opnd_t opnd = {
|
|
|
|
OPND_MEM,
|
|
|
|
num_bits,
|
|
|
|
.as.mem = {
|
|
|
|
.base_reg_no = base_reg.as.reg.reg_no,
|
|
|
|
.idx_reg_no = index_reg.as.reg.reg_no,
|
|
|
|
.has_idx = 1,
|
|
|
|
.scale_exp = scale_exp,
|
|
|
|
.is_iprel = is_iprel,
|
|
|
|
.disp = disp
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
return opnd;
|
|
|
|
}
|
|
|
|
|
2021-10-02 01:38:39 +03:00
|
|
|
static x86opnd_t resize_opnd(x86opnd_t opnd, uint32_t num_bits)
|
2020-09-28 22:50:41 +03:00
|
|
|
{
|
|
|
|
assert (num_bits % 8 == 0);
|
|
|
|
x86opnd_t sub = opnd;
|
|
|
|
sub.num_bits = num_bits;
|
|
|
|
return sub;
|
|
|
|
}
|
|
|
|
|
2020-09-09 23:45:28 +03:00
|
|
|
x86opnd_t imm_opnd(int64_t imm)
|
|
|
|
{
|
|
|
|
x86opnd_t opnd = {
|
|
|
|
OPND_IMM,
|
|
|
|
sig_imm_size(imm),
|
2020-10-06 00:11:50 +03:00
|
|
|
.as.imm = imm
|
2020-09-09 23:45:28 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
return opnd;
|
|
|
|
}
|
|
|
|
|
2020-10-28 01:49:17 +03:00
|
|
|
x86opnd_t const_ptr_opnd(const void *ptr)
|
2020-09-14 21:36:39 +03:00
|
|
|
{
|
|
|
|
x86opnd_t opnd = {
|
|
|
|
OPND_IMM,
|
|
|
|
64,
|
2020-10-06 00:11:50 +03:00
|
|
|
.as.unsig_imm = (uint64_t)ptr
|
2020-09-14 21:36:39 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
return opnd;
|
|
|
|
}
|
|
|
|
|
2021-04-27 18:37:06 +03:00
|
|
|
// Align the current write position to a multiple of bytes
|
2021-09-29 21:58:01 +03:00
|
|
|
static uint8_t *align_ptr(uint8_t *ptr, uint32_t multiple)
|
2021-04-27 18:37:06 +03:00
|
|
|
{
|
|
|
|
// Compute the pointer modulo the given alignment boundary
|
|
|
|
uint32_t rem = ((uint32_t)(uintptr_t)ptr) % multiple;
|
|
|
|
|
|
|
|
// If the pointer is already aligned, stop
|
|
|
|
if (rem == 0)
|
|
|
|
return ptr;
|
|
|
|
|
|
|
|
// Pad the pointer by the necessary amount to align it
|
|
|
|
uint32_t pad = multiple - rem;
|
|
|
|
|
|
|
|
return ptr + pad;
|
|
|
|
}
|
|
|
|
|
2020-09-28 22:50:41 +03:00
|
|
|
// Allocate a block of executable memory
|
2021-11-04 23:05:41 +03:00
|
|
|
static uint8_t *alloc_exec_mem(uint32_t mem_size)
|
2020-09-04 22:56:00 +03:00
|
|
|
{
|
2020-10-05 14:41:46 +03:00
|
|
|
#ifndef _WIN32
|
2021-09-29 21:58:01 +03:00
|
|
|
uint8_t *mem_block;
|
2021-04-27 18:37:06 +03:00
|
|
|
|
|
|
|
// On Linux
|
|
|
|
#if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE)
|
|
|
|
// Align the requested address to page size
|
|
|
|
uint32_t page_size = (uint32_t)sysconf(_SC_PAGESIZE);
|
2021-09-29 21:58:01 +03:00
|
|
|
uint8_t *req_addr = align_ptr((uint8_t*)&alloc_exec_mem, page_size);
|
2021-04-27 18:37:06 +03:00
|
|
|
|
2021-10-21 02:04:22 +03:00
|
|
|
do {
|
2021-04-27 18:37:06 +03:00
|
|
|
// Try to map a chunk of memory as executable
|
|
|
|
mem_block = (uint8_t*)mmap(
|
|
|
|
(void*)req_addr,
|
|
|
|
mem_size,
|
|
|
|
PROT_READ | PROT_WRITE | PROT_EXEC,
|
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE,
|
|
|
|
-1,
|
|
|
|
0
|
|
|
|
);
|
2020-09-04 22:56:00 +03:00
|
|
|
|
2021-04-27 18:37:06 +03:00
|
|
|
// If we succeeded, stop
|
|
|
|
if (mem_block != MAP_FAILED) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// +4MB
|
|
|
|
req_addr += 4 * 1024 * 1024;
|
2021-10-21 02:04:22 +03:00
|
|
|
} while (req_addr < (uint8_t*)&alloc_exec_mem + INT32_MAX);
|
2021-04-27 18:37:06 +03:00
|
|
|
|
|
|
|
// On MacOS and other platforms
|
|
|
|
#else
|
|
|
|
// Try to map a chunk of memory as executable
|
|
|
|
mem_block = (uint8_t*)mmap(
|
|
|
|
(void*)alloc_exec_mem,
|
|
|
|
mem_size,
|
|
|
|
PROT_READ | PROT_WRITE | PROT_EXEC,
|
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS,
|
|
|
|
-1,
|
|
|
|
0
|
|
|
|
);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// Fallback
|
2021-02-11 20:13:14 +03:00
|
|
|
if (mem_block == MAP_FAILED) {
|
2021-04-27 18:37:06 +03:00
|
|
|
// Try again without the address hint (e.g., valgrind)
|
2021-02-11 20:13:14 +03:00
|
|
|
mem_block = (uint8_t*)mmap(
|
2021-04-27 18:37:06 +03:00
|
|
|
NULL,
|
2021-02-11 20:13:14 +03:00
|
|
|
mem_size,
|
|
|
|
PROT_READ | PROT_WRITE | PROT_EXEC,
|
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS,
|
|
|
|
-1,
|
|
|
|
0
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-09-04 22:56:00 +03:00
|
|
|
// Check that the memory mapping was successful
|
2021-02-11 20:13:14 +03:00
|
|
|
if (mem_block == MAP_FAILED) {
|
2021-09-14 11:31:34 +03:00
|
|
|
perror("mmap call failed");
|
2020-09-04 22:56:00 +03:00
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
|
2021-04-06 18:26:20 +03:00
|
|
|
// Fill the executable memory with INT3 (0xCC) so that
|
2021-04-06 18:24:58 +03:00
|
|
|
// executing uninitialized memory will fault
|
|
|
|
memset(mem_block, 0xCC, mem_size);
|
|
|
|
|
2020-09-28 22:50:41 +03:00
|
|
|
return mem_block;
|
2020-10-05 14:41:46 +03:00
|
|
|
#else
|
2021-04-27 18:37:06 +03:00
|
|
|
// Windows not supported for now
|
2020-10-05 14:41:46 +03:00
|
|
|
return NULL;
|
|
|
|
#endif
|
2020-09-28 22:50:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize a code block object
|
2021-09-29 21:58:01 +03:00
|
|
|
void cb_init(codeblock_t *cb, uint8_t *mem_block, uint32_t mem_size)
|
2020-09-28 22:50:41 +03:00
|
|
|
{
|
2021-09-21 17:59:55 +03:00
|
|
|
assert (mem_block);
|
2020-09-28 22:50:41 +03:00
|
|
|
cb->mem_block = mem_block;
|
2020-09-04 22:56:00 +03:00
|
|
|
cb->mem_size = mem_size;
|
|
|
|
cb->write_pos = 0;
|
|
|
|
cb->num_labels = 0;
|
|
|
|
cb->num_refs = 0;
|
|
|
|
}
|
|
|
|
|
2020-09-20 21:23:14 +03:00
|
|
|
// Align the current write position to a multiple of bytes
|
2021-09-29 21:58:01 +03:00
|
|
|
void cb_align_pos(codeblock_t *cb, uint32_t multiple)
|
2020-09-20 21:23:14 +03:00
|
|
|
{
|
|
|
|
// Compute the pointer modulo the given alignment boundary
|
2021-09-29 21:58:01 +03:00
|
|
|
uint8_t *ptr = &cb->mem_block[cb->write_pos];
|
|
|
|
uint8_t *aligned_ptr = align_ptr(ptr, multiple);
|
2020-09-20 21:23:14 +03:00
|
|
|
|
|
|
|
// Pad the pointer by the necessary amount to align it
|
2021-04-27 18:37:06 +03:00
|
|
|
ptrdiff_t pad = aligned_ptr - ptr;
|
|
|
|
cb->write_pos += (int32_t)pad;
|
2020-09-20 21:23:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set the current write position
|
2021-09-29 21:58:01 +03:00
|
|
|
void cb_set_pos(codeblock_t *cb, uint32_t pos)
|
2020-09-08 23:45:35 +03:00
|
|
|
{
|
|
|
|
assert (pos < cb->mem_size);
|
|
|
|
cb->write_pos = pos;
|
|
|
|
}
|
|
|
|
|
2021-09-21 17:59:55 +03:00
|
|
|
// Set the current write position from a pointer
|
2021-09-29 21:58:01 +03:00
|
|
|
void cb_set_write_ptr(codeblock_t *cb, uint8_t *code_ptr)
|
2021-09-21 17:59:55 +03:00
|
|
|
{
|
|
|
|
intptr_t pos = code_ptr - cb->mem_block;
|
|
|
|
assert (pos < cb->mem_size);
|
|
|
|
cb->write_pos = (uint32_t)pos;
|
|
|
|
}
|
|
|
|
|
2020-09-04 22:56:00 +03:00
|
|
|
// Get a direct pointer into the executable memory block
|
2021-09-29 21:58:01 +03:00
|
|
|
uint8_t *cb_get_ptr(codeblock_t *cb, uint32_t index)
|
2020-09-04 22:56:00 +03:00
|
|
|
{
|
|
|
|
assert (index < cb->mem_size);
|
|
|
|
return &cb->mem_block[index];
|
|
|
|
}
|
|
|
|
|
2021-09-21 17:59:55 +03:00
|
|
|
// Get a direct pointer to the current write position
|
2021-09-29 21:58:01 +03:00
|
|
|
uint8_t *cb_get_write_ptr(codeblock_t *cb)
|
2021-09-21 17:59:55 +03:00
|
|
|
{
|
|
|
|
return cb_get_ptr(cb, cb->write_pos);
|
|
|
|
}
|
|
|
|
|
2020-09-04 22:56:00 +03:00
|
|
|
// Write a byte at the current position
|
2021-09-29 21:58:01 +03:00
|
|
|
void cb_write_byte(codeblock_t *cb, uint8_t byte)
|
2020-09-04 22:56:00 +03:00
|
|
|
{
|
|
|
|
assert (cb->mem_block);
|
|
|
|
assert (cb->write_pos + 1 <= cb->mem_size);
|
|
|
|
cb->mem_block[cb->write_pos++] = byte;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write multiple bytes starting from the current position
|
2021-09-29 21:58:01 +03:00
|
|
|
void cb_write_bytes(codeblock_t *cb, uint32_t num_bytes, ...)
|
2020-09-04 22:56:00 +03:00
|
|
|
{
|
|
|
|
va_list va;
|
|
|
|
va_start(va, num_bytes);
|
|
|
|
|
2021-01-12 22:56:43 +03:00
|
|
|
for (uint32_t i = 0; i < num_bytes; ++i)
|
2020-09-04 22:56:00 +03:00
|
|
|
{
|
|
|
|
uint8_t byte = va_arg(va, int);
|
|
|
|
cb_write_byte(cb, byte);
|
|
|
|
}
|
|
|
|
|
|
|
|
va_end(va);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write a signed integer over a given number of bits at the current position
|
2021-09-29 21:58:01 +03:00
|
|
|
void cb_write_int(codeblock_t *cb, uint64_t val, uint32_t num_bits)
|
2020-09-04 22:56:00 +03:00
|
|
|
{
|
|
|
|
assert (num_bits > 0);
|
|
|
|
assert (num_bits % 8 == 0);
|
|
|
|
|
|
|
|
// Switch on the number of bits
|
2021-09-29 22:38:57 +03:00
|
|
|
switch (num_bits) {
|
|
|
|
case 8:
|
2020-09-04 22:56:00 +03:00
|
|
|
cb_write_byte(cb, (uint8_t)val);
|
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case 16:
|
2020-09-04 22:56:00 +03:00
|
|
|
cb_write_bytes(
|
|
|
|
cb,
|
|
|
|
2,
|
|
|
|
(uint8_t)((val >> 0) & 0xFF),
|
|
|
|
(uint8_t)((val >> 8) & 0xFF)
|
|
|
|
);
|
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case 32:
|
2020-09-04 22:56:00 +03:00
|
|
|
cb_write_bytes(
|
|
|
|
cb,
|
|
|
|
4,
|
|
|
|
(uint8_t)((val >> 0) & 0xFF),
|
|
|
|
(uint8_t)((val >> 8) & 0xFF),
|
|
|
|
(uint8_t)((val >> 16) & 0xFF),
|
|
|
|
(uint8_t)((val >> 24) & 0xFF)
|
|
|
|
);
|
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
default:
|
2020-09-04 22:56:00 +03:00
|
|
|
{
|
|
|
|
// Compute the size in bytes
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t num_bytes = num_bits / 8;
|
2020-09-04 22:56:00 +03:00
|
|
|
|
|
|
|
// Write out the bytes
|
2021-01-12 22:56:43 +03:00
|
|
|
for (uint32_t i = 0; i < num_bytes; ++i)
|
2020-09-04 22:56:00 +03:00
|
|
|
{
|
|
|
|
uint8_t byte_val = (uint8_t)(val & 0xFF);
|
|
|
|
cb_write_byte(cb, byte_val);
|
|
|
|
val >>= 8;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-11 22:10:16 +03:00
|
|
|
// Allocate a new label with a given name
|
2021-09-29 21:58:01 +03:00
|
|
|
uint32_t cb_new_label(codeblock_t *cb, const char *name)
|
2020-09-11 22:10:16 +03:00
|
|
|
{
|
|
|
|
//if (hasASM)
|
|
|
|
// writeString(to!string(label) ~ ":");
|
|
|
|
|
|
|
|
assert (cb->num_labels < MAX_LABELS);
|
|
|
|
|
|
|
|
// Allocate the new label
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t label_idx = cb->num_labels++;
|
2020-09-11 22:10:16 +03:00
|
|
|
|
|
|
|
// This label doesn't have an address yet
|
|
|
|
cb->label_addrs[label_idx] = 0;
|
|
|
|
cb->label_names[label_idx] = name;
|
|
|
|
|
|
|
|
return label_idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write a label at the current address
|
2021-09-29 21:58:01 +03:00
|
|
|
void cb_write_label(codeblock_t *cb, uint32_t label_idx)
|
2020-09-11 22:10:16 +03:00
|
|
|
{
|
|
|
|
assert (label_idx < MAX_LABELS);
|
|
|
|
cb->label_addrs[label_idx] = cb->write_pos;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a label reference at the current write position
|
2021-09-29 21:58:01 +03:00
|
|
|
void cb_label_ref(codeblock_t *cb, uint32_t label_idx)
|
2020-09-11 22:10:16 +03:00
|
|
|
{
|
|
|
|
assert (label_idx < MAX_LABELS);
|
|
|
|
assert (cb->num_refs < MAX_LABEL_REFS);
|
|
|
|
|
|
|
|
// Keep track of the reference
|
|
|
|
cb->label_refs[cb->num_refs] = (labelref_t){ cb->write_pos, label_idx };
|
|
|
|
cb->num_refs++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Link internal label references
|
2021-09-29 21:58:01 +03:00
|
|
|
void cb_link_labels(codeblock_t *cb)
|
2020-09-11 22:10:16 +03:00
|
|
|
{
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t orig_pos = cb->write_pos;
|
2020-09-11 22:10:16 +03:00
|
|
|
|
|
|
|
// For each label reference
|
2021-01-12 22:56:43 +03:00
|
|
|
for (uint32_t i = 0; i < cb->num_refs; ++i)
|
2020-09-11 22:10:16 +03:00
|
|
|
{
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t ref_pos = cb->label_refs[i].pos;
|
|
|
|
uint32_t label_idx = cb->label_refs[i].label_idx;
|
2020-09-11 22:10:16 +03:00
|
|
|
assert (ref_pos < cb->mem_size);
|
|
|
|
assert (label_idx < MAX_LABELS);
|
|
|
|
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t label_addr = cb->label_addrs[label_idx];
|
2020-09-11 22:10:16 +03:00
|
|
|
assert (label_addr < cb->mem_size);
|
|
|
|
|
|
|
|
// Compute the offset from the reference's end to the label
|
|
|
|
int64_t offset = (int64_t)label_addr - (int64_t)(ref_pos + 4);
|
|
|
|
|
|
|
|
cb_set_pos(cb, ref_pos);
|
|
|
|
cb_write_int(cb, offset, 32);
|
|
|
|
}
|
|
|
|
|
|
|
|
cb->write_pos = orig_pos;
|
|
|
|
|
|
|
|
// Clear the label positions and references
|
|
|
|
cb->num_labels = 0;
|
|
|
|
cb->num_refs = 0;
|
|
|
|
}
|
|
|
|
|
2020-09-09 23:45:28 +03:00
|
|
|
// Check if an operand needs a REX byte to be encoded
|
2021-10-02 01:38:39 +03:00
|
|
|
static bool rex_needed(x86opnd_t opnd)
|
2020-09-08 23:45:35 +03:00
|
|
|
{
|
2020-09-09 23:45:28 +03:00
|
|
|
if (opnd.type == OPND_NONE || opnd.type == OPND_IMM)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-09-08 23:45:35 +03:00
|
|
|
if (opnd.type == OPND_REG)
|
|
|
|
{
|
|
|
|
return (
|
2020-10-06 00:11:50 +03:00
|
|
|
opnd.as.reg.reg_no > 7 ||
|
|
|
|
(opnd.num_bits == 8 && opnd.as.reg.reg_no >= 4 && opnd.as.reg.reg_no <= 7)
|
2020-09-08 23:45:35 +03:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (opnd.type == OPND_MEM)
|
|
|
|
{
|
2020-10-06 00:11:50 +03:00
|
|
|
return (opnd.as.mem.base_reg_no > 7) || (opnd.as.mem.has_idx && opnd.as.mem.idx_reg_no > 7);
|
2020-09-08 23:45:35 +03:00
|
|
|
}
|
|
|
|
|
2021-10-19 23:43:20 +03:00
|
|
|
rb_bug("unreachable");
|
2020-09-08 23:45:35 +03:00
|
|
|
}
|
|
|
|
|
2020-09-09 23:45:28 +03:00
|
|
|
// Check if an SIB byte is needed to encode this operand
|
2021-10-02 01:38:39 +03:00
|
|
|
static bool sib_needed(x86opnd_t opnd)
|
2020-09-09 23:45:28 +03:00
|
|
|
{
|
|
|
|
if (opnd.type != OPND_MEM)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return (
|
2020-10-06 00:11:50 +03:00
|
|
|
opnd.as.mem.has_idx ||
|
|
|
|
opnd.as.mem.base_reg_no == RSP.as.reg.reg_no ||
|
|
|
|
opnd.as.mem.base_reg_no == R12.as.reg.reg_no
|
2020-09-09 23:45:28 +03:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the size of the displacement field needed for a memory operand
|
2021-10-02 01:38:39 +03:00
|
|
|
static uint32_t disp_size(x86opnd_t opnd)
|
2020-09-09 23:45:28 +03:00
|
|
|
{
|
|
|
|
assert (opnd.type == OPND_MEM);
|
|
|
|
|
|
|
|
// If using RIP as the base, use disp32
|
2020-10-06 00:11:50 +03:00
|
|
|
if (opnd.as.mem.is_iprel)
|
2020-09-09 23:45:28 +03:00
|
|
|
{
|
|
|
|
return 32;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the required displacement size
|
2020-10-06 00:11:50 +03:00
|
|
|
if (opnd.as.mem.disp != 0)
|
2020-09-09 23:45:28 +03:00
|
|
|
{
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t num_bits = sig_imm_size(opnd.as.mem.disp);
|
2020-09-09 23:45:28 +03:00
|
|
|
assert (num_bits <= 32 && "displacement does not fit in 32 bits");
|
|
|
|
|
|
|
|
// x86 can only encode 8-bit and 32-bit displacements
|
|
|
|
if (num_bits == 16)
|
|
|
|
num_bits = 32;;
|
|
|
|
|
|
|
|
return num_bits;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If EBP or RBP or R13 is used as the base, displacement must be encoded
|
2020-10-06 00:11:50 +03:00
|
|
|
if (opnd.as.mem.base_reg_no == RBP.as.reg.reg_no ||
|
|
|
|
opnd.as.mem.base_reg_no == R13.as.reg.reg_no)
|
2020-09-09 23:45:28 +03:00
|
|
|
{
|
|
|
|
return 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-09-05 00:10:11 +03:00
|
|
|
// Write the REX byte
|
2020-09-08 23:45:35 +03:00
|
|
|
static void cb_write_rex(
|
2021-09-29 21:58:01 +03:00
|
|
|
codeblock_t *cb,
|
2020-09-05 00:10:11 +03:00
|
|
|
bool w_flag,
|
|
|
|
uint8_t reg_no,
|
|
|
|
uint8_t idx_reg_no,
|
|
|
|
uint8_t rm_reg_no
|
|
|
|
)
|
|
|
|
{
|
|
|
|
// 0 1 0 0 w r x b
|
|
|
|
// w - 64-bit operand size flag
|
|
|
|
// r - MODRM.reg extension
|
|
|
|
// x - SIB.index extension
|
|
|
|
// b - MODRM.rm or SIB.base extension
|
|
|
|
uint8_t w = w_flag? 1:0;
|
|
|
|
uint8_t r = (reg_no & 8)? 1:0;
|
|
|
|
uint8_t x = (idx_reg_no & 8)? 1:0;
|
|
|
|
uint8_t b = (rm_reg_no & 8)? 1:0;
|
|
|
|
|
|
|
|
// Encode and write the REX byte
|
|
|
|
uint8_t rexByte = 0x40 + (w << 3) + (r << 2) + (x << 1) + (b);
|
|
|
|
cb_write_byte(cb, rexByte);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write an opcode byte with an embedded register operand
|
2021-09-29 21:58:01 +03:00
|
|
|
static void cb_write_opcode(codeblock_t *cb, uint8_t opcode, x86opnd_t reg)
|
2020-09-05 00:10:11 +03:00
|
|
|
{
|
|
|
|
// Write the reg field into the opcode byte
|
2020-10-06 00:11:50 +03:00
|
|
|
uint8_t op_byte = opcode | (reg.as.reg.reg_no & 7);
|
2020-09-05 00:10:11 +03:00
|
|
|
cb_write_byte(cb, op_byte);
|
|
|
|
}
|
|
|
|
|
2020-09-09 23:45:28 +03:00
|
|
|
// Encode an RM instruction
|
2021-10-02 01:38:39 +03:00
|
|
|
static void cb_write_rm(
|
2021-09-29 21:58:01 +03:00
|
|
|
codeblock_t *cb,
|
2020-09-09 23:45:28 +03:00
|
|
|
bool szPref,
|
|
|
|
bool rexW,
|
|
|
|
x86opnd_t r_opnd,
|
|
|
|
x86opnd_t rm_opnd,
|
|
|
|
uint8_t opExt,
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t op_len,
|
2020-09-09 23:45:28 +03:00
|
|
|
...)
|
|
|
|
{
|
|
|
|
assert (op_len > 0 && op_len <= 3);
|
|
|
|
assert (r_opnd.type == OPND_REG || r_opnd.type == OPND_NONE);
|
|
|
|
|
|
|
|
// Flag to indicate the REX prefix is needed
|
|
|
|
bool need_rex = rexW || rex_needed(r_opnd) || rex_needed(rm_opnd);
|
|
|
|
|
|
|
|
// Flag to indicate SIB byte is needed
|
|
|
|
bool need_sib = sib_needed(r_opnd) || sib_needed(rm_opnd);
|
|
|
|
|
|
|
|
// Add the operand-size prefix, if needed
|
|
|
|
if (szPref == true)
|
|
|
|
cb_write_byte(cb, 0x66);
|
|
|
|
|
|
|
|
// Add the REX prefix, if needed
|
|
|
|
if (need_rex)
|
|
|
|
{
|
|
|
|
// 0 1 0 0 w r x b
|
|
|
|
// w - 64-bit operand size flag
|
|
|
|
// r - MODRM.reg extension
|
|
|
|
// x - SIB.index extension
|
|
|
|
// b - MODRM.rm or SIB.base extension
|
|
|
|
|
|
|
|
uint8_t w = rexW? 1:0;
|
|
|
|
|
|
|
|
uint8_t r;
|
|
|
|
if (r_opnd.type != OPND_NONE)
|
2020-10-06 00:11:50 +03:00
|
|
|
r = (r_opnd.as.reg.reg_no & 8)? 1:0;
|
2020-09-09 23:45:28 +03:00
|
|
|
else
|
|
|
|
r = 0;
|
|
|
|
|
|
|
|
uint8_t x;
|
2020-10-06 00:11:50 +03:00
|
|
|
if (need_sib && rm_opnd.as.mem.has_idx)
|
|
|
|
x = (rm_opnd.as.mem.idx_reg_no & 8)? 1:0;
|
2020-09-09 23:45:28 +03:00
|
|
|
else
|
|
|
|
x = 0;
|
|
|
|
|
|
|
|
uint8_t b;
|
|
|
|
if (rm_opnd.type == OPND_REG)
|
2020-10-06 00:11:50 +03:00
|
|
|
b = (rm_opnd.as.reg.reg_no & 8)? 1:0;
|
2020-09-09 23:45:28 +03:00
|
|
|
else if (rm_opnd.type == OPND_MEM)
|
2020-10-06 00:11:50 +03:00
|
|
|
b = (rm_opnd.as.mem.base_reg_no & 8)? 1:0;
|
2020-09-09 23:45:28 +03:00
|
|
|
else
|
|
|
|
b = 0;
|
|
|
|
|
|
|
|
// Encode and write the REX byte
|
|
|
|
uint8_t rex_byte = 0x40 + (w << 3) + (r << 2) + (x << 1) + (b);
|
|
|
|
cb_write_byte(cb, rex_byte);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write the opcode bytes to the code block
|
|
|
|
va_list va;
|
|
|
|
va_start(va, op_len);
|
2021-01-12 22:56:43 +03:00
|
|
|
for (uint32_t i = 0; i < op_len; ++i)
|
2020-09-09 23:45:28 +03:00
|
|
|
{
|
|
|
|
uint8_t byte = va_arg(va, int);
|
|
|
|
cb_write_byte(cb, byte);
|
|
|
|
}
|
|
|
|
va_end(va);
|
|
|
|
|
|
|
|
// MODRM.mod (2 bits)
|
|
|
|
// MODRM.reg (3 bits)
|
|
|
|
// MODRM.rm (3 bits)
|
|
|
|
|
|
|
|
assert (
|
|
|
|
!(opExt != 0xFF && r_opnd.type != OPND_NONE) &&
|
|
|
|
"opcode extension and register operand present"
|
|
|
|
);
|
|
|
|
|
|
|
|
// Encode the mod field
|
|
|
|
uint8_t mod;
|
|
|
|
if (rm_opnd.type == OPND_REG)
|
|
|
|
{
|
|
|
|
mod = 3;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t dsize = disp_size(rm_opnd);
|
2020-10-06 00:11:50 +03:00
|
|
|
if (dsize == 0 || rm_opnd.as.mem.is_iprel)
|
2020-09-09 23:45:28 +03:00
|
|
|
mod = 0;
|
|
|
|
else if (dsize == 8)
|
|
|
|
mod = 1;
|
|
|
|
else if (dsize == 32)
|
|
|
|
mod = 2;
|
|
|
|
else
|
2021-10-19 23:43:20 +03:00
|
|
|
rb_bug("unreachable");
|
2020-09-09 23:45:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Encode the reg field
|
|
|
|
uint8_t reg;
|
|
|
|
if (opExt != 0xFF)
|
|
|
|
reg = opExt;
|
|
|
|
else if (r_opnd.type == OPND_REG)
|
2020-10-06 00:11:50 +03:00
|
|
|
reg = r_opnd.as.reg.reg_no & 7;
|
2020-09-09 23:45:28 +03:00
|
|
|
else
|
|
|
|
reg = 0;
|
|
|
|
|
|
|
|
// Encode the rm field
|
|
|
|
uint8_t rm;
|
|
|
|
if (rm_opnd.type == OPND_REG)
|
|
|
|
{
|
2020-10-06 00:11:50 +03:00
|
|
|
rm = rm_opnd.as.reg.reg_no & 7;
|
2020-09-09 23:45:28 +03:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (need_sib)
|
|
|
|
rm = 4;
|
|
|
|
else
|
2020-10-06 00:11:50 +03:00
|
|
|
rm = rm_opnd.as.mem.base_reg_no & 7;
|
2020-09-09 23:45:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Encode and write the ModR/M byte
|
|
|
|
uint8_t rm_byte = (mod << 6) + (reg << 3) + (rm);
|
|
|
|
cb_write_byte(cb, rm_byte);
|
|
|
|
|
|
|
|
// Add the SIB byte, if needed
|
|
|
|
if (need_sib)
|
|
|
|
{
|
|
|
|
// SIB.scale (2 bits)
|
|
|
|
// SIB.index (3 bits)
|
|
|
|
// SIB.base (3 bits)
|
|
|
|
|
|
|
|
assert (rm_opnd.type == OPND_MEM);
|
|
|
|
|
|
|
|
// Encode the scale value
|
2020-10-06 00:11:50 +03:00
|
|
|
uint8_t scale = rm_opnd.as.mem.scale_exp;
|
2020-09-09 23:45:28 +03:00
|
|
|
|
|
|
|
// Encode the index value
|
|
|
|
uint8_t index;
|
2020-10-06 00:11:50 +03:00
|
|
|
if (!rm_opnd.as.mem.has_idx)
|
2020-09-09 23:45:28 +03:00
|
|
|
index = 4;
|
|
|
|
else
|
2020-10-06 00:11:50 +03:00
|
|
|
index = rm_opnd.as.mem.idx_reg_no & 7;
|
2020-09-09 23:45:28 +03:00
|
|
|
|
|
|
|
// Encode the base register
|
2020-10-06 00:11:50 +03:00
|
|
|
uint8_t base = rm_opnd.as.mem.base_reg_no & 7;
|
2020-09-09 23:45:28 +03:00
|
|
|
|
|
|
|
// Encode and write the SIB byte
|
|
|
|
uint8_t sib_byte = (scale << 6) + (index << 3) + (base);
|
|
|
|
cb_write_byte(cb, sib_byte);
|
|
|
|
}
|
|
|
|
|
2020-09-15 22:12:31 +03:00
|
|
|
// Add the displacement
|
2020-09-14 18:54:25 +03:00
|
|
|
if (rm_opnd.type == OPND_MEM)
|
2020-09-09 23:45:28 +03:00
|
|
|
{
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t dsize = disp_size(rm_opnd);
|
2020-09-14 18:54:25 +03:00
|
|
|
if (dsize > 0)
|
2020-10-06 00:11:50 +03:00
|
|
|
cb_write_int(cb, rm_opnd.as.mem.disp, dsize);
|
2020-09-09 23:45:28 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-11 23:42:51 +03:00
|
|
|
// Encode a mul-like single-operand RM instruction
|
2021-10-02 01:38:39 +03:00
|
|
|
static void write_rm_unary(
|
2021-09-29 21:58:01 +03:00
|
|
|
codeblock_t *cb,
|
|
|
|
const char *mnem,
|
2020-09-11 23:42:51 +03:00
|
|
|
uint8_t opMemReg8,
|
|
|
|
uint8_t opMemRegPref,
|
|
|
|
uint8_t opExt,
|
|
|
|
x86opnd_t opnd)
|
|
|
|
{
|
|
|
|
// Write a disassembly string
|
|
|
|
//cb.writeASM(mnem, opnd);
|
|
|
|
|
|
|
|
// Check the size of opnd0
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t opndSize;
|
2020-09-11 23:42:51 +03:00
|
|
|
if (opnd.type == OPND_REG || opnd.type == OPND_MEM)
|
|
|
|
opndSize = opnd.num_bits;
|
|
|
|
else
|
2021-10-19 23:43:20 +03:00
|
|
|
rb_bug("yjit: invalid operand");
|
2020-09-11 23:42:51 +03:00
|
|
|
|
|
|
|
assert (opndSize == 8 || opndSize == 16 || opndSize == 32 || opndSize == 64);
|
|
|
|
bool szPref = opndSize == 16;
|
|
|
|
bool rexW = opndSize == 64;
|
|
|
|
|
|
|
|
if (opndSize == 8)
|
|
|
|
cb_write_rm(cb, false, false, NO_OPND, opnd, opExt, 1, opMemReg8);
|
|
|
|
else
|
|
|
|
cb_write_rm(cb, szPref, rexW, NO_OPND, opnd, opExt, 1, opMemRegPref);
|
|
|
|
}
|
|
|
|
|
2020-09-09 23:45:28 +03:00
|
|
|
// Encode an add-like RM instruction with multiple possible encodings
|
2021-10-02 01:38:39 +03:00
|
|
|
static void cb_write_rm_multi(
|
2021-09-29 21:58:01 +03:00
|
|
|
codeblock_t *cb,
|
|
|
|
const char *mnem,
|
2020-09-09 23:45:28 +03:00
|
|
|
uint8_t opMemReg8,
|
|
|
|
uint8_t opMemRegPref,
|
|
|
|
uint8_t opRegMem8,
|
|
|
|
uint8_t opRegMemPref,
|
|
|
|
uint8_t opMemImm8,
|
|
|
|
uint8_t opMemImmSml,
|
|
|
|
uint8_t opMemImmLrg,
|
|
|
|
uint8_t opExtImm,
|
|
|
|
x86opnd_t opnd0,
|
|
|
|
x86opnd_t opnd1)
|
|
|
|
{
|
|
|
|
assert (opnd0.type == OPND_REG || opnd0.type == OPND_MEM);
|
|
|
|
|
|
|
|
/*
|
|
|
|
// Write disassembly string
|
|
|
|
if (!opnd1.isNone)
|
|
|
|
cb.writeASM(mnem, opnd0, opnd1);
|
|
|
|
else
|
|
|
|
cb.writeASM(mnem, opnd0);
|
|
|
|
*/
|
|
|
|
|
|
|
|
// Check the size of opnd0
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t opndSize = opnd0.num_bits;
|
2020-09-09 23:45:28 +03:00
|
|
|
|
|
|
|
// Check the size of opnd1
|
|
|
|
if (opnd1.type == OPND_REG || opnd1.type == OPND_MEM)
|
|
|
|
{
|
|
|
|
assert (opnd1.num_bits == opndSize && "operand size mismatch");
|
|
|
|
}
|
|
|
|
else if (opnd1.type == OPND_IMM)
|
|
|
|
{
|
|
|
|
assert (opnd1.num_bits <= opndSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert (opndSize == 8 || opndSize == 16 || opndSize == 32 || opndSize == 64);
|
|
|
|
bool szPref = opndSize == 16;
|
|
|
|
bool rexW = opndSize == 64;
|
|
|
|
|
|
|
|
// R/M + Reg
|
|
|
|
if ((opnd0.type == OPND_MEM && opnd1.type == OPND_REG) ||
|
|
|
|
(opnd0.type == OPND_REG && opnd1.type == OPND_REG))
|
|
|
|
{
|
|
|
|
// R/M is opnd0
|
|
|
|
if (opndSize == 8)
|
|
|
|
cb_write_rm(cb, false, false, opnd1, opnd0, 0xFF, 1, opMemReg8);
|
|
|
|
else
|
|
|
|
cb_write_rm(cb, szPref, rexW, opnd1, opnd0, 0xFF, 1, opMemRegPref);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reg + R/M
|
|
|
|
else if (opnd0.type == OPND_REG && opnd1.type == OPND_MEM)
|
|
|
|
{
|
|
|
|
// R/M is opnd1
|
|
|
|
if (opndSize == 8)
|
|
|
|
cb_write_rm(cb, false, false, opnd0, opnd1, 0xFF, 1, opRegMem8);
|
|
|
|
else
|
|
|
|
cb_write_rm(cb, szPref, rexW, opnd0, opnd1, 0xFF, 1, opRegMemPref);
|
|
|
|
}
|
|
|
|
|
|
|
|
// R/M + Imm
|
|
|
|
else if (opnd1.type == OPND_IMM)
|
|
|
|
{
|
|
|
|
// 8-bit immediate
|
|
|
|
if (opnd1.num_bits <= 8)
|
|
|
|
{
|
|
|
|
if (opndSize == 8)
|
|
|
|
cb_write_rm(cb, false, false, NO_OPND, opnd0, opExtImm, 1, opMemImm8);
|
|
|
|
else
|
|
|
|
cb_write_rm(cb, szPref, rexW, NO_OPND, opnd0, opExtImm, 1, opMemImmSml);
|
|
|
|
|
2020-10-06 00:11:50 +03:00
|
|
|
cb_write_int(cb, opnd1.as.imm, 8);
|
2020-09-09 23:45:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// 32-bit immediate
|
|
|
|
else if (opnd1.num_bits <= 32)
|
|
|
|
{
|
|
|
|
assert (opnd1.num_bits <= opndSize && "immediate too large for dst");
|
|
|
|
cb_write_rm(cb, szPref, rexW, NO_OPND, opnd0, opExtImm, 1, opMemImmLrg);
|
2020-10-06 00:11:50 +03:00
|
|
|
cb_write_int(cb, opnd1.as.imm, (opndSize > 32)? 32:opndSize);
|
2020-09-09 23:45:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Immediate too large
|
|
|
|
else
|
|
|
|
{
|
|
|
|
assert (false && "immediate value too large");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Invalid operands
|
|
|
|
else
|
|
|
|
{
|
|
|
|
assert (false && "invalid operand combination");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-10 21:31:45 +03:00
|
|
|
// Encode a single-operand shift instruction
|
2021-10-02 01:38:39 +03:00
|
|
|
static void cb_write_shift(
|
2021-09-29 21:58:01 +03:00
|
|
|
codeblock_t *cb,
|
|
|
|
const char *mnem,
|
2020-09-10 21:31:45 +03:00
|
|
|
uint8_t opMemOnePref,
|
|
|
|
uint8_t opMemClPref,
|
|
|
|
uint8_t opMemImmPref,
|
|
|
|
uint8_t opExt,
|
|
|
|
x86opnd_t opnd0,
|
|
|
|
x86opnd_t opnd1)
|
|
|
|
{
|
|
|
|
// Write a disassembly string
|
|
|
|
//cb.writeASM(mnem, opnd0, opnd1);
|
|
|
|
|
|
|
|
// Check the size of opnd0
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t opndSize;
|
2020-09-10 21:31:45 +03:00
|
|
|
if (opnd0.type == OPND_REG || opnd0.type == OPND_MEM)
|
|
|
|
opndSize = opnd0.num_bits;
|
|
|
|
else
|
2021-10-19 23:43:20 +03:00
|
|
|
rb_bug("yjit: shift: invalid first operand");
|
2020-09-10 21:31:45 +03:00
|
|
|
|
|
|
|
assert (opndSize == 16 || opndSize == 32 || opndSize == 64);
|
|
|
|
bool szPref = opndSize == 16;
|
|
|
|
bool rexW = opndSize == 64;
|
|
|
|
|
|
|
|
if (opnd1.type == OPND_IMM)
|
|
|
|
{
|
2020-10-06 00:11:50 +03:00
|
|
|
if (opnd1.as.imm == 1)
|
2020-09-10 21:31:45 +03:00
|
|
|
{
|
|
|
|
cb_write_rm(cb, szPref, rexW, NO_OPND, opnd0, opExt, 1, opMemOnePref);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
assert (opnd1.num_bits <= 8);
|
|
|
|
cb_write_rm(cb, szPref, rexW, NO_OPND, opnd0, opExt, 1, opMemImmPref);
|
2020-10-06 00:11:50 +03:00
|
|
|
cb_write_byte(cb, (uint8_t)opnd1.as.imm);
|
2020-09-10 21:31:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
else if (opnd1.isReg && opnd1.reg == CL)
|
|
|
|
{
|
|
|
|
cb.writeRMInstr!('l', opExt, opMemClPref)(szPref, rexW, opnd0, X86Opnd.NONE);
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
else
|
|
|
|
{
|
|
|
|
assert (false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-11 23:42:51 +03:00
|
|
|
// Encode a relative jump to a label (direct or conditional)
|
|
|
|
// Note: this always encodes a 32-bit offset
|
2021-10-02 01:38:39 +03:00
|
|
|
static void cb_write_jcc(codeblock_t *cb, const char *mnem, uint8_t op0, uint8_t op1, uint32_t label_idx)
|
2020-09-11 23:42:51 +03:00
|
|
|
{
|
|
|
|
//cb.writeASM(mnem, label);
|
|
|
|
|
|
|
|
// Write the opcode
|
2021-02-26 01:01:52 +03:00
|
|
|
if (op0 != 0xFF)
|
|
|
|
cb_write_byte(cb, op0);
|
2020-09-11 23:42:51 +03:00
|
|
|
cb_write_byte(cb, op1);
|
|
|
|
|
|
|
|
// Add a reference to the label
|
|
|
|
cb_label_ref(cb, label_idx);
|
|
|
|
|
|
|
|
// Relative 32-bit offset to be patched
|
|
|
|
cb_write_int(cb, 0, 32);
|
|
|
|
}
|
|
|
|
|
2020-09-28 22:50:41 +03:00
|
|
|
// Encode a relative jump to a pointer at a 32-bit offset (direct or conditional)
|
2021-10-02 01:38:39 +03:00
|
|
|
static void cb_write_jcc_ptr(codeblock_t *cb, const char *mnem, uint8_t op0, uint8_t op1, uint8_t *dst_ptr)
|
2020-09-28 22:50:41 +03:00
|
|
|
{
|
|
|
|
//cb.writeASM(mnem, label);
|
|
|
|
|
|
|
|
// Write the opcode
|
2020-09-30 20:32:15 +03:00
|
|
|
if (op0 != 0xFF)
|
|
|
|
cb_write_byte(cb, op0);
|
2020-09-28 22:50:41 +03:00
|
|
|
cb_write_byte(cb, op1);
|
|
|
|
|
2020-10-23 17:59:22 +03:00
|
|
|
// Pointer to the end of this jump instruction
|
2021-09-29 21:58:01 +03:00
|
|
|
uint8_t *end_ptr = &cb->mem_block[cb->write_pos] + 4;
|
2020-09-28 22:50:41 +03:00
|
|
|
|
|
|
|
// Compute the jump offset
|
|
|
|
int64_t rel64 = (int64_t)(dst_ptr - end_ptr);
|
2021-04-27 18:37:06 +03:00
|
|
|
assert (rel64 >= INT32_MIN && rel64 <= INT32_MAX);
|
2020-09-28 22:50:41 +03:00
|
|
|
|
|
|
|
// Write the relative 32-bit jump offset
|
|
|
|
cb_write_int(cb, (int32_t)rel64, 32);
|
|
|
|
}
|
|
|
|
|
2020-09-14 23:59:39 +03:00
|
|
|
// Encode a conditional move instruction
|
2021-10-02 01:38:39 +03:00
|
|
|
static void cb_write_cmov(codeblock_t *cb, const char *mnem, uint8_t opcode1, x86opnd_t dst, x86opnd_t src)
|
2020-09-14 23:59:39 +03:00
|
|
|
{
|
|
|
|
//cb.writeASM(mnem, dst, src);
|
|
|
|
|
2020-09-15 17:44:46 +03:00
|
|
|
assert (dst.type == OPND_REG);
|
|
|
|
assert (src.type == OPND_REG || src.type == OPND_MEM);
|
|
|
|
assert (dst.num_bits >= 16 && "invalid dst reg size in cmov");
|
2020-09-14 23:59:39 +03:00
|
|
|
|
2020-09-15 17:44:46 +03:00
|
|
|
bool szPref = dst.num_bits == 16;
|
|
|
|
bool rexW = dst.num_bits == 64;
|
2020-09-14 23:59:39 +03:00
|
|
|
|
2020-09-15 17:44:46 +03:00
|
|
|
cb_write_rm(cb, szPref, rexW, dst, src, 0xFF, 2, 0x0F, opcode1);
|
2020-09-14 23:59:39 +03:00
|
|
|
}
|
|
|
|
|
2020-09-09 23:45:28 +03:00
|
|
|
// add - Integer addition
|
2021-09-29 21:58:01 +03:00
|
|
|
void add(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
2020-09-09 23:45:28 +03:00
|
|
|
{
|
|
|
|
cb_write_rm_multi(
|
|
|
|
cb,
|
|
|
|
"add",
|
|
|
|
0x00, // opMemReg8
|
|
|
|
0x01, // opMemRegPref
|
|
|
|
0x02, // opRegMem8
|
|
|
|
0x03, // opRegMemPref
|
|
|
|
0x80, // opMemImm8
|
|
|
|
0x83, // opMemImmSml
|
|
|
|
0x81, // opMemImmLrg
|
|
|
|
0x00, // opExtImm
|
|
|
|
opnd0,
|
|
|
|
opnd1
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-09-14 23:59:39 +03:00
|
|
|
/// and - Bitwise AND
|
2021-09-29 21:58:01 +03:00
|
|
|
void and(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
2020-09-14 23:59:39 +03:00
|
|
|
{
|
|
|
|
cb_write_rm_multi(
|
|
|
|
cb,
|
|
|
|
"and",
|
|
|
|
0x20, // opMemReg8
|
|
|
|
0x21, // opMemRegPref
|
|
|
|
0x22, // opRegMem8
|
|
|
|
0x23, // opRegMemPref
|
|
|
|
0x80, // opMemImm8
|
|
|
|
0x83, // opMemImmSml
|
|
|
|
0x81, // opMemImmLrg
|
|
|
|
0x04, // opExtImm
|
|
|
|
opnd0,
|
|
|
|
opnd1
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-10-23 17:59:22 +03:00
|
|
|
// call - Call to a pointer with a 32-bit displacement offset
|
2021-10-02 01:38:39 +03:00
|
|
|
static void call_rel32(codeblock_t *cb, int32_t rel32)
|
2020-10-23 17:59:22 +03:00
|
|
|
{
|
|
|
|
//cb.writeASM("call", rel32);
|
|
|
|
|
|
|
|
// Write the opcode
|
|
|
|
cb_write_byte(cb, 0xE8);
|
|
|
|
|
|
|
|
// Write the relative 32-bit jump offset
|
|
|
|
cb_write_int(cb, (int32_t)rel32, 32);
|
|
|
|
}
|
|
|
|
|
|
|
|
// call - Call a pointer, encode with a 32-bit offset if possible
|
2021-09-29 21:58:01 +03:00
|
|
|
void call_ptr(codeblock_t *cb, x86opnd_t scratch_reg, uint8_t *dst_ptr)
|
2020-10-23 17:59:22 +03:00
|
|
|
{
|
|
|
|
assert (scratch_reg.type == OPND_REG);
|
|
|
|
|
|
|
|
// Pointer to the end of this call instruction
|
2021-09-29 21:58:01 +03:00
|
|
|
uint8_t *end_ptr = &cb->mem_block[cb->write_pos] + 5;
|
2020-10-23 17:59:22 +03:00
|
|
|
|
|
|
|
// Compute the jump offset
|
|
|
|
int64_t rel64 = (int64_t)(dst_ptr - end_ptr);
|
|
|
|
|
|
|
|
// If the offset fits in 32-bit
|
2021-04-27 18:37:06 +03:00
|
|
|
if (rel64 >= INT32_MIN && rel64 <= INT32_MAX)
|
2020-10-23 17:59:22 +03:00
|
|
|
{
|
2021-02-12 22:52:08 +03:00
|
|
|
call_rel32(cb, (int32_t)rel64);
|
2021-02-12 22:49:17 +03:00
|
|
|
return;
|
2020-10-23 17:59:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Move the pointer into the scratch register and call
|
|
|
|
mov(cb, scratch_reg, const_ptr_opnd(dst_ptr));
|
|
|
|
call(cb, scratch_reg);
|
|
|
|
}
|
|
|
|
|
2020-09-09 23:45:28 +03:00
|
|
|
/// call - Call to label with 32-bit offset
|
2021-09-29 21:58:01 +03:00
|
|
|
void call_label(codeblock_t *cb, uint32_t label_idx)
|
2020-09-09 23:45:28 +03:00
|
|
|
{
|
2020-09-11 22:36:40 +03:00
|
|
|
//cb.writeASM("call", label);
|
2020-09-09 23:45:28 +03:00
|
|
|
|
|
|
|
// Write the opcode
|
2020-09-11 22:36:40 +03:00
|
|
|
cb_write_byte(cb, 0xE8);
|
2020-09-09 23:45:28 +03:00
|
|
|
|
|
|
|
// Add a reference to the label
|
2020-09-11 22:36:40 +03:00
|
|
|
cb_label_ref(cb, label_idx);
|
2020-09-09 23:45:28 +03:00
|
|
|
|
|
|
|
// Relative 32-bit offset to be patched
|
2020-09-11 22:36:40 +03:00
|
|
|
cb_write_int(cb, 0, 32);
|
2020-09-09 23:45:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// call - Indirect call with an R/M operand
|
2021-09-29 21:58:01 +03:00
|
|
|
void call(codeblock_t *cb, x86opnd_t opnd)
|
2020-09-09 23:45:28 +03:00
|
|
|
{
|
|
|
|
//cb.writeASM("call", opnd);
|
|
|
|
cb_write_rm(cb, false, false, NO_OPND, opnd, 2, 1, 0xFF);
|
|
|
|
}
|
|
|
|
|
2020-09-14 23:59:39 +03:00
|
|
|
/// cmovcc - Conditional move
|
2021-09-29 21:58:01 +03:00
|
|
|
void cmova(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmova", 0x47, dst, src); }
|
|
|
|
void cmovae(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovae", 0x43, dst, src); }
|
|
|
|
void cmovb(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovb", 0x42, dst, src); }
|
|
|
|
void cmovbe(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovbe", 0x46, dst, src); }
|
|
|
|
void cmovc(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovc", 0x42, dst, src); }
|
|
|
|
void cmove(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmove", 0x44, dst, src); }
|
|
|
|
void cmovg(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovg", 0x4F, dst, src); }
|
|
|
|
void cmovge(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovge", 0x4D, dst, src); }
|
|
|
|
void cmovl(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovl", 0x4C, dst, src); }
|
|
|
|
void cmovle(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovle", 0x4E, dst, src); }
|
|
|
|
void cmovna(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovna", 0x46, dst, src); }
|
|
|
|
void cmovnae(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnae", 0x42, dst, src); }
|
|
|
|
void cmovnb(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnb", 0x43, dst, src); }
|
|
|
|
void cmovnbe(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnbe", 0x47, dst, src); }
|
|
|
|
void cmovnc(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnc", 0x43, dst, src); }
|
|
|
|
void cmovne(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovne", 0x45, dst, src); }
|
|
|
|
void cmovng(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovng", 0x4E, dst, src); }
|
|
|
|
void cmovnge(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnge", 0x4C, dst, src); }
|
|
|
|
void cmovnl(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnl" , 0x4D, dst, src); }
|
|
|
|
void cmovnle(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnle", 0x4F, dst, src); }
|
|
|
|
void cmovno(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovno", 0x41, dst, src); }
|
|
|
|
void cmovnp(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnp", 0x4B, dst, src); }
|
|
|
|
void cmovns(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovns", 0x49, dst, src); }
|
|
|
|
void cmovnz(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnz", 0x45, dst, src); }
|
|
|
|
void cmovo(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovo", 0x40, dst, src); }
|
|
|
|
void cmovp(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovp", 0x4A, dst, src); }
|
|
|
|
void cmovpe(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovpe", 0x4A, dst, src); }
|
|
|
|
void cmovpo(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovpo", 0x4B, dst, src); }
|
|
|
|
void cmovs(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovs", 0x48, dst, src); }
|
|
|
|
void cmovz(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovz", 0x44, dst, src); }
|
2020-09-14 23:59:39 +03:00
|
|
|
|
2020-09-14 18:54:25 +03:00
|
|
|
/// cmp - Compare and set flags
|
2021-09-29 21:58:01 +03:00
|
|
|
void cmp(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
2020-09-14 18:54:25 +03:00
|
|
|
{
|
|
|
|
cb_write_rm_multi(
|
|
|
|
cb,
|
|
|
|
"cmp",
|
|
|
|
0x38, // opMemReg8
|
|
|
|
0x39, // opMemRegPref
|
|
|
|
0x3A, // opRegMem8
|
|
|
|
0x3B, // opRegMemPref
|
|
|
|
0x80, // opMemImm8
|
|
|
|
0x83, // opMemImmSml
|
|
|
|
0x81, // opMemImmLrg
|
|
|
|
0x07, // opExtImm
|
|
|
|
opnd0,
|
|
|
|
opnd1
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// cdq - Convert doubleword to quadword
|
2021-09-29 21:58:01 +03:00
|
|
|
void cdq(codeblock_t *cb)
|
2020-09-14 18:54:25 +03:00
|
|
|
{
|
|
|
|
//cb.writeASM("cdq");
|
|
|
|
cb_write_byte(cb, 0x99);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// cqo - Convert quadword to octaword
|
2021-09-29 21:58:01 +03:00
|
|
|
void cqo(codeblock_t *cb)
|
2020-09-14 18:54:25 +03:00
|
|
|
{
|
|
|
|
//cb.writeASM("cqo");
|
|
|
|
cb_write_bytes(cb, 2, 0x48, 0x99);
|
|
|
|
}
|
|
|
|
|
2020-10-14 20:48:26 +03:00
|
|
|
/// Interrupt 3 - trap to debugger
|
2021-09-29 21:58:01 +03:00
|
|
|
void int3(codeblock_t *cb)
|
2020-10-14 20:48:26 +03:00
|
|
|
{
|
|
|
|
//cb.writeASM("INT 3");
|
|
|
|
cb_write_byte(cb, 0xCC);
|
|
|
|
}
|
|
|
|
|
2020-09-11 23:42:51 +03:00
|
|
|
/*
|
|
|
|
// div - Unsigned integer division
|
|
|
|
alias div = writeRMUnary!(
|
|
|
|
"div",
|
|
|
|
0xF6, // opMemReg8
|
|
|
|
0xF7, // opMemRegPref
|
|
|
|
0x06 // opExt
|
|
|
|
);
|
|
|
|
*/
|
2020-09-10 00:16:21 +03:00
|
|
|
|
2020-09-11 23:42:51 +03:00
|
|
|
/*
|
|
|
|
/// divsd - Divide scalar double
|
|
|
|
alias divsd = writeXMM64!(
|
|
|
|
"divsd",
|
|
|
|
0xF2, // prefix
|
|
|
|
0x0F, // opRegMem0
|
|
|
|
0x5E // opRegMem1
|
|
|
|
);
|
|
|
|
*/
|
2020-09-10 00:16:21 +03:00
|
|
|
|
2020-09-11 23:42:51 +03:00
|
|
|
/*
|
|
|
|
// idiv - Signed integer division
|
|
|
|
alias idiv = writeRMUnary!(
|
|
|
|
"idiv",
|
|
|
|
0xF6, // opMemReg8
|
|
|
|
0xF7, // opMemRegPref
|
|
|
|
0x07 // opExt
|
|
|
|
);
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
/// imul - Signed integer multiplication with two operands
|
|
|
|
void imul(CodeBlock cb, X86Opnd opnd0, X86Opnd opnd1)
|
|
|
|
{
|
|
|
|
cb.writeASM("imul", opnd0, opnd1);
|
|
|
|
|
|
|
|
assert (opnd0.isReg, "invalid first operand");
|
|
|
|
auto opndSize = opnd0.reg.size;
|
|
|
|
|
|
|
|
// Check the size of opnd1
|
|
|
|
if (opnd1.isReg)
|
|
|
|
assert (opnd1.reg.size is opndSize, "operand size mismatch");
|
|
|
|
else if (opnd1.isMem)
|
|
|
|
assert (opnd1.mem.size is opndSize, "operand size mismatch");
|
|
|
|
|
|
|
|
assert (opndSize is 16 || opndSize is 32 || opndSize is 64);
|
|
|
|
auto szPref = opndSize is 16;
|
|
|
|
auto rexW = opndSize is 64;
|
|
|
|
|
|
|
|
cb.writeRMInstr!('r', 0xFF, 0x0F, 0xAF)(szPref, rexW, opnd0, opnd1);
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
/// imul - Signed integer multiplication with three operands (one immediate)
|
|
|
|
void imul(CodeBlock cb, X86Opnd opnd0, X86Opnd opnd1, X86Opnd opnd2)
|
|
|
|
{
|
|
|
|
cb.writeASM("imul", opnd0, opnd1, opnd2);
|
|
|
|
|
|
|
|
assert (opnd0.isReg, "invalid first operand");
|
|
|
|
auto opndSize = opnd0.reg.size;
|
|
|
|
|
|
|
|
// Check the size of opnd1
|
|
|
|
if (opnd1.isReg)
|
|
|
|
assert (opnd1.reg.size is opndSize, "operand size mismatch");
|
|
|
|
else if (opnd1.isMem)
|
|
|
|
assert (opnd1.mem.size is opndSize, "operand size mismatch");
|
|
|
|
|
|
|
|
assert (opndSize is 16 || opndSize is 32 || opndSize is 64);
|
|
|
|
auto szPref = opndSize is 16;
|
|
|
|
auto rexW = opndSize is 64;
|
|
|
|
|
|
|
|
assert (opnd2.isImm, "invalid third operand");
|
|
|
|
auto imm = opnd2.imm;
|
|
|
|
|
|
|
|
// 8-bit immediate
|
|
|
|
if (imm.immSize <= 8)
|
|
|
|
{
|
|
|
|
cb.writeRMInstr!('r', 0xFF, 0x6B)(szPref, rexW, opnd0, opnd1);
|
|
|
|
cb.writeInt(imm.imm, 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
// 32-bit immediate
|
|
|
|
else if (imm.immSize <= 32)
|
|
|
|
{
|
|
|
|
assert (imm.immSize <= opndSize, "immediate too large for dst");
|
|
|
|
cb.writeRMInstr!('r', 0xFF, 0x69)(szPref, rexW, opnd0, opnd1);
|
|
|
|
cb.writeInt(imm.imm, min(opndSize, 32));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Immediate too large
|
|
|
|
else
|
|
|
|
{
|
|
|
|
assert (false, "immediate value too large");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
|
2020-09-30 20:32:15 +03:00
|
|
|
/// jcc - relative jumps to a label
|
2021-09-29 21:58:01 +03:00
|
|
|
void ja_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "ja" , 0x0F, 0x87, label_idx); }
|
|
|
|
void jae_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jae" , 0x0F, 0x83, label_idx); }
|
|
|
|
void jb_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jb" , 0x0F, 0x82, label_idx); }
|
|
|
|
void jbe_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jbe" , 0x0F, 0x86, label_idx); }
|
|
|
|
void jc_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jc" , 0x0F, 0x82, label_idx); }
|
|
|
|
void je_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "je" , 0x0F, 0x84, label_idx); }
|
|
|
|
void jg_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jg" , 0x0F, 0x8F, label_idx); }
|
|
|
|
void jge_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jge" , 0x0F, 0x8D, label_idx); }
|
|
|
|
void jl_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jl" , 0x0F, 0x8C, label_idx); }
|
|
|
|
void jle_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jle" , 0x0F, 0x8E, label_idx); }
|
|
|
|
void jna_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jna" , 0x0F, 0x86, label_idx); }
|
|
|
|
void jnae_label(codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnae", 0x0F, 0x82, label_idx); }
|
|
|
|
void jnb_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnb" , 0x0F, 0x83, label_idx); }
|
|
|
|
void jnbe_label(codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnbe", 0x0F, 0x87, label_idx); }
|
|
|
|
void jnc_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnc" , 0x0F, 0x83, label_idx); }
|
|
|
|
void jne_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jne" , 0x0F, 0x85, label_idx); }
|
|
|
|
void jng_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jng" , 0x0F, 0x8E, label_idx); }
|
|
|
|
void jnge_label(codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnge", 0x0F, 0x8C, label_idx); }
|
|
|
|
void jnl_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnl" , 0x0F, 0x8D, label_idx); }
|
|
|
|
void jnle_label(codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnle", 0x0F, 0x8F, label_idx); }
|
|
|
|
void jno_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jno" , 0x0F, 0x81, label_idx); }
|
|
|
|
void jnp_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnp" , 0x0F, 0x8b, label_idx); }
|
|
|
|
void jns_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jns" , 0x0F, 0x89, label_idx); }
|
|
|
|
void jnz_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnz" , 0x0F, 0x85, label_idx); }
|
|
|
|
void jo_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jo" , 0x0F, 0x80, label_idx); }
|
|
|
|
void jp_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jp" , 0x0F, 0x8A, label_idx); }
|
|
|
|
void jpe_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jpe" , 0x0F, 0x8A, label_idx); }
|
|
|
|
void jpo_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jpo" , 0x0F, 0x8B, label_idx); }
|
|
|
|
void js_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "js" , 0x0F, 0x88, label_idx); }
|
|
|
|
void jz_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jz" , 0x0F, 0x84, label_idx); }
|
|
|
|
void jmp_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jmp" , 0xFF, 0xE9, label_idx); }
|
2020-09-10 00:16:21 +03:00
|
|
|
|
2020-09-30 20:32:15 +03:00
|
|
|
/// jcc - relative jumps to a pointer (32-bit offset)
|
2021-09-29 21:58:01 +03:00
|
|
|
void ja_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "ja" , 0x0F, 0x87, ptr); }
|
|
|
|
void jae_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jae" , 0x0F, 0x83, ptr); }
|
|
|
|
void jb_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jb" , 0x0F, 0x82, ptr); }
|
|
|
|
void jbe_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jbe" , 0x0F, 0x86, ptr); }
|
|
|
|
void jc_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jc" , 0x0F, 0x82, ptr); }
|
|
|
|
void je_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "je" , 0x0F, 0x84, ptr); }
|
|
|
|
void jg_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jg" , 0x0F, 0x8F, ptr); }
|
|
|
|
void jge_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jge" , 0x0F, 0x8D, ptr); }
|
|
|
|
void jl_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jl" , 0x0F, 0x8C, ptr); }
|
|
|
|
void jle_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jle" , 0x0F, 0x8E, ptr); }
|
|
|
|
void jna_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jna" , 0x0F, 0x86, ptr); }
|
|
|
|
void jnae_ptr(codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnae", 0x0F, 0x82, ptr); }
|
|
|
|
void jnb_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnb" , 0x0F, 0x83, ptr); }
|
|
|
|
void jnbe_ptr(codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnbe", 0x0F, 0x87, ptr); }
|
|
|
|
void jnc_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnc" , 0x0F, 0x83, ptr); }
|
|
|
|
void jne_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jne" , 0x0F, 0x85, ptr); }
|
|
|
|
void jng_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jng" , 0x0F, 0x8E, ptr); }
|
|
|
|
void jnge_ptr(codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnge", 0x0F, 0x8C, ptr); }
|
|
|
|
void jnl_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnl" , 0x0F, 0x8D, ptr); }
|
|
|
|
void jnle_ptr(codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnle", 0x0F, 0x8F, ptr); }
|
|
|
|
void jno_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jno" , 0x0F, 0x81, ptr); }
|
|
|
|
void jnp_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnp" , 0x0F, 0x8b, ptr); }
|
|
|
|
void jns_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jns" , 0x0F, 0x89, ptr); }
|
|
|
|
void jnz_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnz" , 0x0F, 0x85, ptr); }
|
|
|
|
void jo_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jo" , 0x0F, 0x80, ptr); }
|
|
|
|
void jp_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jp" , 0x0F, 0x8A, ptr); }
|
|
|
|
void jpe_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jpe" , 0x0F, 0x8A, ptr); }
|
|
|
|
void jpo_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jpo" , 0x0F, 0x8B, ptr); }
|
|
|
|
void js_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "js" , 0x0F, 0x88, ptr); }
|
|
|
|
void jz_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jz" , 0x0F, 0x84, ptr); }
|
|
|
|
void jmp_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jmp" , 0xFF, 0xE9, ptr); }
|
2020-09-10 00:16:21 +03:00
|
|
|
|
|
|
|
/// jmp - Indirect jump near to an R/M operand
|
2021-09-29 21:58:01 +03:00
|
|
|
void jmp_rm(codeblock_t *cb, x86opnd_t opnd)
|
2020-09-10 00:16:21 +03:00
|
|
|
{
|
|
|
|
//cb.writeASM("jmp", opnd);
|
|
|
|
cb_write_rm(cb, false, false, NO_OPND, opnd, 4, 1, 0xFF);
|
|
|
|
}
|
|
|
|
|
2020-09-18 00:09:42 +03:00
|
|
|
// jmp - Jump with relative 32-bit offset
|
2021-09-29 21:58:01 +03:00
|
|
|
void jmp32(codeblock_t *cb, int32_t offset)
|
2020-09-10 00:16:21 +03:00
|
|
|
{
|
2020-09-18 00:09:42 +03:00
|
|
|
//cb.writeASM("jmp", ((offset > 0)? "+":"-") ~ to!string(offset));
|
|
|
|
cb_write_byte(cb, 0xE9);
|
|
|
|
cb_write_int(cb, offset, 32);
|
2020-09-10 00:16:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// lea - Load Effective Address
|
2021-09-29 21:58:01 +03:00
|
|
|
void lea(codeblock_t *cb, x86opnd_t dst, x86opnd_t src)
|
2020-09-10 00:16:21 +03:00
|
|
|
{
|
|
|
|
//cb.writeASM("lea", dst, src);
|
|
|
|
assert (dst.num_bits == 64);
|
|
|
|
cb_write_rm(cb, false, true, dst, src, 0xFF, 1, 0x8D);
|
|
|
|
}
|
|
|
|
|
YJIT: use shorter encoding for mov(r64,imm) when unambiguous (#5081)
* YJIT: use shorter encoding for mov(r64,imm) when unambiguous
Previously, for small constants such as `mov(RAX, imm_opnd(Qundef))`,
we emit an instruction with an 8-byte immediate. This form commonly
gets the `movabs` mnemonic.
In 64-bit mode, 32-bit operands get zero extended to 64-bit to fill the
register, so when the immediate is small enough, we can save 4 bytes by
using the `mov` variant that takes a 32-bit immediate and does a zero
extension.
Not implement with this change, there is an imm32 variant of `mov` that
does sign extension we could use. When the constant is negative, we
fallback to the `movabs` form.
In railsbench, this change yields roughly a 12% code size reduction for
the outlined block.
Co-authored-by: Jemma Issroff <jemmaissroff@gmail.com>
* [ci skip] comment edit. Please squash.
Co-authored-by: Jemma Issroff <jemmaissroff@gmail.com>
2021-11-05 22:44:29 +03:00
|
|
|
// Does this number fit in 32 bits and stays the same if you zero extend it to 64 bit?
|
|
|
|
// If the sign bit is clear, sign extension and zero extension yield the same
|
|
|
|
// result.
|
|
|
|
static bool
|
|
|
|
zero_extendable_32bit(uint64_t number)
|
|
|
|
{
|
|
|
|
return number <= UINT32_MAX && (number & (1ull << 31ull)) == 0;
|
|
|
|
}
|
|
|
|
|
2020-09-09 23:45:28 +03:00
|
|
|
/// mov - Data move operation
|
2021-09-29 21:58:01 +03:00
|
|
|
void mov(codeblock_t *cb, x86opnd_t dst, x86opnd_t src)
|
2020-09-09 23:45:28 +03:00
|
|
|
{
|
|
|
|
// R/M + Imm
|
|
|
|
if (src.type == OPND_IMM)
|
|
|
|
{
|
|
|
|
//cb.writeASM("mov", dst, src);
|
|
|
|
|
|
|
|
// R + Imm
|
|
|
|
if (dst.type == OPND_REG)
|
|
|
|
{
|
|
|
|
assert (
|
|
|
|
src.num_bits <= dst.num_bits ||
|
2020-10-06 00:11:50 +03:00
|
|
|
unsig_imm_size(src.as.imm) <= dst.num_bits
|
2020-09-09 23:45:28 +03:00
|
|
|
);
|
|
|
|
|
YJIT: use shorter encoding for mov(r64,imm) when unambiguous (#5081)
* YJIT: use shorter encoding for mov(r64,imm) when unambiguous
Previously, for small constants such as `mov(RAX, imm_opnd(Qundef))`,
we emit an instruction with an 8-byte immediate. This form commonly
gets the `movabs` mnemonic.
In 64-bit mode, 32-bit operands get zero extended to 64-bit to fill the
register, so when the immediate is small enough, we can save 4 bytes by
using the `mov` variant that takes a 32-bit immediate and does a zero
extension.
Not implement with this change, there is an imm32 variant of `mov` that
does sign extension we could use. When the constant is negative, we
fallback to the `movabs` form.
In railsbench, this change yields roughly a 12% code size reduction for
the outlined block.
Co-authored-by: Jemma Issroff <jemmaissroff@gmail.com>
* [ci skip] comment edit. Please squash.
Co-authored-by: Jemma Issroff <jemmaissroff@gmail.com>
2021-11-05 22:44:29 +03:00
|
|
|
// In case the source immediate could be zero extended to be 64
|
|
|
|
// bit, we can use the 32-bit operands version of the instruction.
|
|
|
|
// For example, we can turn mov(rax, 0x34) into the equivalent
|
|
|
|
// mov(eax, 0x34).
|
|
|
|
if (dst.num_bits == 64 && zero_extendable_32bit(src.as.unsig_imm)) {
|
|
|
|
if (rex_needed(dst))
|
|
|
|
cb_write_rex(cb, false, 0, 0, dst.as.reg.reg_no);
|
|
|
|
cb_write_opcode(cb, 0xB8, dst);
|
|
|
|
cb_write_int(cb, src.as.imm, 32);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (dst.num_bits == 16)
|
|
|
|
cb_write_byte(cb, 0x66);
|
|
|
|
|
|
|
|
if (rex_needed(dst) || dst.num_bits == 64)
|
|
|
|
cb_write_rex(cb, dst.num_bits == 64, 0, 0, dst.as.reg.reg_no);
|
2020-09-09 23:45:28 +03:00
|
|
|
|
YJIT: use shorter encoding for mov(r64,imm) when unambiguous (#5081)
* YJIT: use shorter encoding for mov(r64,imm) when unambiguous
Previously, for small constants such as `mov(RAX, imm_opnd(Qundef))`,
we emit an instruction with an 8-byte immediate. This form commonly
gets the `movabs` mnemonic.
In 64-bit mode, 32-bit operands get zero extended to 64-bit to fill the
register, so when the immediate is small enough, we can save 4 bytes by
using the `mov` variant that takes a 32-bit immediate and does a zero
extension.
Not implement with this change, there is an imm32 variant of `mov` that
does sign extension we could use. When the constant is negative, we
fallback to the `movabs` form.
In railsbench, this change yields roughly a 12% code size reduction for
the outlined block.
Co-authored-by: Jemma Issroff <jemmaissroff@gmail.com>
* [ci skip] comment edit. Please squash.
Co-authored-by: Jemma Issroff <jemmaissroff@gmail.com>
2021-11-05 22:44:29 +03:00
|
|
|
cb_write_opcode(cb, (dst.num_bits == 8)? 0xB0:0xB8, dst);
|
2020-09-09 23:45:28 +03:00
|
|
|
|
YJIT: use shorter encoding for mov(r64,imm) when unambiguous (#5081)
* YJIT: use shorter encoding for mov(r64,imm) when unambiguous
Previously, for small constants such as `mov(RAX, imm_opnd(Qundef))`,
we emit an instruction with an 8-byte immediate. This form commonly
gets the `movabs` mnemonic.
In 64-bit mode, 32-bit operands get zero extended to 64-bit to fill the
register, so when the immediate is small enough, we can save 4 bytes by
using the `mov` variant that takes a 32-bit immediate and does a zero
extension.
Not implement with this change, there is an imm32 variant of `mov` that
does sign extension we could use. When the constant is negative, we
fallback to the `movabs` form.
In railsbench, this change yields roughly a 12% code size reduction for
the outlined block.
Co-authored-by: Jemma Issroff <jemmaissroff@gmail.com>
* [ci skip] comment edit. Please squash.
Co-authored-by: Jemma Issroff <jemmaissroff@gmail.com>
2021-11-05 22:44:29 +03:00
|
|
|
cb_write_int(cb, src.as.imm, dst.num_bits);
|
|
|
|
}
|
2020-09-09 23:45:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// M + Imm
|
|
|
|
else if (dst.type == OPND_MEM)
|
|
|
|
{
|
|
|
|
assert (src.num_bits <= dst.num_bits);
|
|
|
|
|
|
|
|
if (dst.num_bits == 8)
|
|
|
|
cb_write_rm(cb, false, false, NO_OPND, dst, 0xFF, 1, 0xC6);
|
|
|
|
else
|
|
|
|
cb_write_rm(cb, dst.num_bits == 16, dst.num_bits == 64, NO_OPND, dst, 0, 1, 0xC7);
|
|
|
|
|
2020-10-06 00:11:50 +03:00
|
|
|
cb_write_int(cb, src.as.imm, (dst.num_bits > 32)? 32:dst.num_bits);
|
2020-09-09 23:45:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
else
|
|
|
|
{
|
|
|
|
assert (false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
cb_write_rm_multi(
|
|
|
|
cb,
|
|
|
|
"mov",
|
|
|
|
0x88, // opMemReg8
|
|
|
|
0x89, // opMemRegPref
|
|
|
|
0x8A, // opRegMem8
|
|
|
|
0x8B, // opRegMemPref
|
|
|
|
0xC6, // opMemImm8
|
|
|
|
0xFF, // opMemImmSml (not available)
|
|
|
|
0xFF, // opMemImmLrg
|
2020-09-28 22:50:41 +03:00
|
|
|
0xFF, // opExtImm
|
2020-09-09 23:45:28 +03:00
|
|
|
dst,
|
|
|
|
src
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-18 19:20:43 +03:00
|
|
|
/// movsx - Move with sign extension (signed integers)
|
2021-09-29 21:58:01 +03:00
|
|
|
void movsx(codeblock_t *cb, x86opnd_t dst, x86opnd_t src)
|
2020-09-18 19:20:43 +03:00
|
|
|
{
|
|
|
|
assert (dst.type == OPND_REG);
|
|
|
|
assert (src.type == OPND_REG || src.type == OPND_MEM);
|
|
|
|
assert (src.num_bits < dst.num_bits);
|
|
|
|
|
|
|
|
//cb.writeASM("movsx", dst, src);
|
|
|
|
|
|
|
|
if (src.num_bits == 8)
|
|
|
|
{
|
|
|
|
cb_write_rm(cb, dst.num_bits == 16, dst.num_bits == 64, dst, src, 0xFF, 2, 0x0F, 0xBE);
|
|
|
|
}
|
|
|
|
else if (src.num_bits == 16)
|
|
|
|
{
|
|
|
|
cb_write_rm(cb, dst.num_bits == 16, dst.num_bits == 64, dst, src, 0xFF, 2, 0x0F, 0xBF);
|
|
|
|
}
|
|
|
|
else if (src.num_bits == 32)
|
|
|
|
{
|
|
|
|
cb_write_rm(cb, false, true, dst, src, 0xFF, 1, 0x63);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
assert (false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
/// movzx - Move with zero extension (unsigned values)
|
2021-09-29 21:58:01 +03:00
|
|
|
void movzx(codeblock_t *cb, x86opnd_t dst, x86opnd_t src)
|
2020-09-18 19:20:43 +03:00
|
|
|
{
|
|
|
|
cb.writeASM("movzx", dst, src);
|
|
|
|
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t dstSize;
|
2020-09-18 19:20:43 +03:00
|
|
|
if (dst.isReg)
|
|
|
|
dstSize = dst.reg.size;
|
|
|
|
else
|
|
|
|
assert (false, "movzx dst must be a register");
|
|
|
|
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t srcSize;
|
2020-09-18 19:20:43 +03:00
|
|
|
if (src.isReg)
|
|
|
|
srcSize = src.reg.size;
|
|
|
|
else if (src.isMem)
|
|
|
|
srcSize = src.mem.size;
|
|
|
|
else
|
|
|
|
assert (false);
|
|
|
|
|
|
|
|
assert (
|
|
|
|
srcSize < dstSize,
|
|
|
|
"movzx: srcSize >= dstSize"
|
|
|
|
);
|
|
|
|
|
|
|
|
if (srcSize is 8)
|
|
|
|
{
|
|
|
|
cb.writeRMInstr!('r', 0xFF, 0x0F, 0xB6)(dstSize is 16, dstSize is 64, dst, src);
|
|
|
|
}
|
|
|
|
else if (srcSize is 16)
|
|
|
|
{
|
|
|
|
cb.writeRMInstr!('r', 0xFF, 0x0F, 0xB7)(dstSize is 16, dstSize is 64, dst, src);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
assert (false, "invalid src operand size for movxz");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
|
2020-09-14 23:59:39 +03:00
|
|
|
// neg - Integer negation (multiplication by -1)
|
2021-09-29 21:58:01 +03:00
|
|
|
void neg(codeblock_t *cb, x86opnd_t opnd)
|
2020-09-14 23:59:39 +03:00
|
|
|
{
|
|
|
|
write_rm_unary(
|
|
|
|
cb,
|
|
|
|
"neg",
|
|
|
|
0xF6, // opMemReg8
|
|
|
|
0xF7, // opMemRegPref
|
|
|
|
0x03, // opExt
|
|
|
|
opnd
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-09-04 22:56:00 +03:00
|
|
|
// nop - Noop, one or multiple bytes long
|
2021-09-29 21:58:01 +03:00
|
|
|
void nop(codeblock_t *cb, uint32_t length)
|
2020-09-04 22:56:00 +03:00
|
|
|
{
|
2021-09-29 22:38:57 +03:00
|
|
|
switch (length) {
|
|
|
|
case 0:
|
2020-09-04 22:56:00 +03:00
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case 1:
|
2020-09-04 22:56:00 +03:00
|
|
|
//cb.writeASM("nop1");
|
|
|
|
cb_write_byte(cb, 0x90);
|
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case 2:
|
2020-09-04 22:56:00 +03:00
|
|
|
//cb.writeASM("nop2");
|
|
|
|
cb_write_bytes(cb, 2, 0x66,0x90);
|
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case 3:
|
2020-09-04 22:56:00 +03:00
|
|
|
//cb.writeASM("nop3");
|
|
|
|
cb_write_bytes(cb, 3, 0x0F,0x1F,0x00);
|
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case 4:
|
2020-09-04 22:56:00 +03:00
|
|
|
//cb.writeASM("nop4");
|
|
|
|
cb_write_bytes(cb, 4, 0x0F,0x1F,0x40,0x00);
|
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case 5:
|
2020-09-04 22:56:00 +03:00
|
|
|
//cb.writeASM("nop5");
|
|
|
|
cb_write_bytes(cb, 5, 0x0F,0x1F,0x44,0x00,0x00);
|
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case 6:
|
2020-09-04 22:56:00 +03:00
|
|
|
//cb.writeASM("nop6");
|
|
|
|
cb_write_bytes(cb, 6, 0x66,0x0F,0x1F,0x44,0x00,0x00);
|
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case 7:
|
2020-09-04 22:56:00 +03:00
|
|
|
//cb.writeASM("nop7");
|
|
|
|
cb_write_bytes(cb, 7, 0x0F,0x1F,0x80,0x00,0x00,0x00,0x00);
|
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case 8:
|
2020-09-04 22:56:00 +03:00
|
|
|
//cb.writeASM("nop8");
|
|
|
|
cb_write_bytes(cb, 8, 0x0F,0x1F,0x84,0x00,0x00,0x00,0x00,0x00);
|
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case 9:
|
2020-09-04 22:56:00 +03:00
|
|
|
//cb.writeASM("nop9");
|
|
|
|
cb_write_bytes(cb, 9, 0x66,0x0F,0x1F,0x84,0x00,0x00,0x00,0x00,0x00);
|
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
default:
|
2020-09-04 22:56:00 +03:00
|
|
|
{
|
2021-01-12 22:56:43 +03:00
|
|
|
uint32_t written = 0;
|
2020-09-04 22:56:00 +03:00
|
|
|
while (written + 9 <= length)
|
|
|
|
{
|
|
|
|
nop(cb, 9);
|
|
|
|
written += 9;
|
|
|
|
}
|
|
|
|
nop(cb, length - written);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2020-09-05 00:10:11 +03:00
|
|
|
|
2020-09-14 18:54:25 +03:00
|
|
|
// not - Bitwise NOT
|
2021-09-29 21:58:01 +03:00
|
|
|
void not(codeblock_t *cb, x86opnd_t opnd)
|
2020-09-14 18:54:25 +03:00
|
|
|
{
|
|
|
|
write_rm_unary(
|
|
|
|
cb,
|
|
|
|
"not",
|
|
|
|
0xF6, // opMemReg8
|
|
|
|
0xF7, // opMemRegPref
|
|
|
|
0x02, // opExt
|
|
|
|
opnd
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// or - Bitwise OR
|
2021-09-29 21:58:01 +03:00
|
|
|
void or(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
2020-09-14 23:59:39 +03:00
|
|
|
{
|
|
|
|
cb_write_rm_multi(
|
|
|
|
cb,
|
|
|
|
"or",
|
|
|
|
0x08, // opMemReg8
|
|
|
|
0x09, // opMemRegPref
|
|
|
|
0x0A, // opRegMem8
|
|
|
|
0x0B, // opRegMemPref
|
|
|
|
0x80, // opMemImm8
|
|
|
|
0x83, // opMemImmSml
|
|
|
|
0x81, // opMemImmLrg
|
|
|
|
0x01, // opExtImm
|
|
|
|
opnd0,
|
|
|
|
opnd1
|
|
|
|
);
|
|
|
|
}
|
2020-09-14 18:54:25 +03:00
|
|
|
|
2020-09-14 23:59:39 +03:00
|
|
|
/// pop - Pop a register off the stack
|
2021-09-29 21:58:01 +03:00
|
|
|
void pop(codeblock_t *cb, x86opnd_t opnd)
|
2020-09-05 00:10:11 +03:00
|
|
|
{
|
2021-02-27 06:11:27 +03:00
|
|
|
assert (opnd.num_bits == 64);
|
2020-09-05 00:10:11 +03:00
|
|
|
|
2021-02-27 06:11:27 +03:00
|
|
|
//cb.writeASM("pop", opnd);
|
2020-09-08 23:45:35 +03:00
|
|
|
|
2021-02-27 06:11:27 +03:00
|
|
|
if (opnd.type == OPND_REG) {
|
|
|
|
if (rex_needed(opnd))
|
|
|
|
cb_write_rex(cb, false, 0, 0, opnd.as.reg.reg_no);
|
|
|
|
cb_write_opcode(cb, 0x58, opnd);
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else if (opnd.type == OPND_MEM) {
|
2021-02-27 06:11:27 +03:00
|
|
|
cb_write_rm(cb, false, false, NO_OPND, opnd, 0, 1, 0x8F);
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else {
|
2021-02-27 06:11:27 +03:00
|
|
|
assert(false && "unexpected operand type");
|
|
|
|
}
|
2020-09-05 00:10:11 +03:00
|
|
|
}
|
|
|
|
|
2020-09-15 22:12:31 +03:00
|
|
|
/// popfq - Pop the flags register (64-bit)
|
2021-09-29 21:58:01 +03:00
|
|
|
void popfq(codeblock_t *cb)
|
2020-09-15 22:12:31 +03:00
|
|
|
{
|
|
|
|
//cb.writeASM("popfq");
|
|
|
|
|
|
|
|
// REX.W + 0x9D
|
|
|
|
cb_write_bytes(cb, 2, 0x48, 0x9D);
|
|
|
|
}
|
|
|
|
|
2021-02-26 10:22:01 +03:00
|
|
|
/// push - Push an operand on the stack
|
2021-09-29 21:58:01 +03:00
|
|
|
void push(codeblock_t *cb, x86opnd_t opnd)
|
2020-09-05 00:10:11 +03:00
|
|
|
{
|
2021-02-26 10:22:01 +03:00
|
|
|
assert (opnd.num_bits == 64);
|
|
|
|
|
|
|
|
//cb.writeASM("push", opnd);
|
|
|
|
|
|
|
|
if (opnd.type == OPND_REG) {
|
|
|
|
if (rex_needed(opnd))
|
|
|
|
cb_write_rex(cb, false, 0, 0, opnd.as.reg.reg_no);
|
|
|
|
cb_write_opcode(cb, 0x50, opnd);
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else if (opnd.type == OPND_MEM) {
|
2021-02-26 10:22:01 +03:00
|
|
|
cb_write_rm(cb, false, false, NO_OPND, opnd, 6, 1, 0xFF);
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else {
|
2021-02-26 10:22:01 +03:00
|
|
|
assert(false && "unexpected operand type");
|
|
|
|
}
|
2020-09-08 23:45:35 +03:00
|
|
|
}
|
|
|
|
|
2020-09-15 22:12:31 +03:00
|
|
|
/// pushfq - Push the flags register (64-bit)
|
2021-09-29 21:58:01 +03:00
|
|
|
void pushfq(codeblock_t *cb)
|
2020-09-15 22:12:31 +03:00
|
|
|
{
|
|
|
|
//cb.writeASM("pushfq");
|
|
|
|
cb_write_byte(cb, 0x9C);
|
|
|
|
}
|
|
|
|
|
2020-09-08 23:45:35 +03:00
|
|
|
/// ret - Return from call, popping only the return address
|
2021-09-29 21:58:01 +03:00
|
|
|
void ret(codeblock_t *cb)
|
2020-09-08 23:45:35 +03:00
|
|
|
{
|
|
|
|
//cb.writeASM("ret");
|
|
|
|
cb_write_byte(cb, 0xC3);
|
2020-09-05 00:10:11 +03:00
|
|
|
}
|
2020-09-10 17:57:29 +03:00
|
|
|
|
2020-09-10 21:31:45 +03:00
|
|
|
// sal - Shift arithmetic left
|
2021-09-29 21:58:01 +03:00
|
|
|
void sal(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
2020-09-10 21:31:45 +03:00
|
|
|
{
|
|
|
|
cb_write_shift(
|
|
|
|
cb,
|
|
|
|
"sal",
|
|
|
|
0xD1, // opMemOnePref,
|
|
|
|
0xD3, // opMemClPref,
|
|
|
|
0xC1, // opMemImmPref,
|
|
|
|
0x04,
|
|
|
|
opnd0,
|
|
|
|
opnd1
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// sar - Shift arithmetic right (signed)
|
2021-09-29 21:58:01 +03:00
|
|
|
void sar(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
2020-09-10 21:31:45 +03:00
|
|
|
{
|
|
|
|
cb_write_shift(
|
|
|
|
cb,
|
|
|
|
"sar",
|
|
|
|
0xD1, // opMemOnePref,
|
|
|
|
0xD3, // opMemClPref,
|
|
|
|
0xC1, // opMemImmPref,
|
|
|
|
0x07,
|
|
|
|
opnd0,
|
|
|
|
opnd1
|
|
|
|
);
|
|
|
|
}
|
|
|
|
// shl - Shift logical left
|
2021-09-29 21:58:01 +03:00
|
|
|
void shl(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
2020-09-10 21:31:45 +03:00
|
|
|
{
|
|
|
|
cb_write_shift(
|
|
|
|
cb,
|
|
|
|
"shl",
|
|
|
|
0xD1, // opMemOnePref,
|
|
|
|
0xD3, // opMemClPref,
|
|
|
|
0xC1, // opMemImmPref,
|
|
|
|
0x04,
|
|
|
|
opnd0,
|
|
|
|
opnd1
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// shr - Shift logical right (unsigned)
|
2021-09-29 21:58:01 +03:00
|
|
|
void shr(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
2020-09-10 21:31:45 +03:00
|
|
|
{
|
|
|
|
cb_write_shift(
|
|
|
|
cb,
|
|
|
|
"shr",
|
|
|
|
0xD1, // opMemOnePref,
|
|
|
|
0xD3, // opMemClPref,
|
|
|
|
0xC1, // opMemImmPref,
|
|
|
|
0x05,
|
|
|
|
opnd0,
|
|
|
|
opnd1
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-09-10 17:57:29 +03:00
|
|
|
/// sub - Integer subtraction
|
2021-09-29 21:58:01 +03:00
|
|
|
void sub(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
2020-09-10 17:57:29 +03:00
|
|
|
{
|
|
|
|
cb_write_rm_multi(
|
|
|
|
cb,
|
|
|
|
"sub",
|
|
|
|
0x28, // opMemReg8
|
|
|
|
0x29, // opMemRegPref
|
|
|
|
0x2A, // opRegMem8
|
|
|
|
0x2B, // opRegMemPref
|
|
|
|
0x80, // opMemImm8
|
|
|
|
0x83, // opMemImmSml
|
|
|
|
0x81, // opMemImmLrg
|
2020-09-10 21:31:45 +03:00
|
|
|
0x05, // opExtImm
|
2020-09-10 17:57:29 +03:00
|
|
|
opnd0,
|
|
|
|
opnd1
|
|
|
|
);
|
|
|
|
}
|
2020-09-14 23:59:39 +03:00
|
|
|
|
2020-09-28 22:50:41 +03:00
|
|
|
/// test - Logical Compare
|
2021-09-29 21:58:01 +03:00
|
|
|
void test(codeblock_t *cb, x86opnd_t rm_opnd, x86opnd_t test_opnd)
|
2020-09-28 22:50:41 +03:00
|
|
|
{
|
|
|
|
assert (rm_opnd.type == OPND_REG || rm_opnd.type == OPND_MEM);
|
2020-10-27 18:10:19 +03:00
|
|
|
assert (test_opnd.type == OPND_REG || test_opnd.type == OPND_IMM);
|
2020-09-28 22:50:41 +03:00
|
|
|
|
2020-10-27 18:10:19 +03:00
|
|
|
// If the second operand is an immediate
|
|
|
|
if (test_opnd.type == OPND_IMM)
|
2020-09-28 22:50:41 +03:00
|
|
|
{
|
2020-10-27 18:10:19 +03:00
|
|
|
x86opnd_t imm_opnd = test_opnd;
|
|
|
|
|
2020-12-14 21:26:33 +03:00
|
|
|
if (imm_opnd.as.imm >= 0)
|
2020-10-27 18:10:19 +03:00
|
|
|
{
|
2020-12-14 21:26:33 +03:00
|
|
|
assert (unsig_imm_size(imm_opnd.as.unsig_imm) <= 32);
|
|
|
|
assert (unsig_imm_size(imm_opnd.as.unsig_imm) <= rm_opnd.num_bits);
|
|
|
|
|
|
|
|
// Use the smallest operand size possible
|
|
|
|
rm_opnd = resize_opnd(rm_opnd, unsig_imm_size(imm_opnd.as.unsig_imm));
|
|
|
|
|
|
|
|
if (rm_opnd.num_bits == 8)
|
|
|
|
{
|
|
|
|
cb_write_rm(cb, false, false, NO_OPND, rm_opnd, 0x00, 1, 0xF6);
|
|
|
|
cb_write_int(cb, imm_opnd.as.imm, rm_opnd.num_bits);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
cb_write_rm(cb, rm_opnd.num_bits == 16, false, NO_OPND, rm_opnd, 0x00, 1, 0xF7);
|
|
|
|
cb_write_int(cb, imm_opnd.as.imm, rm_opnd.num_bits);
|
|
|
|
}
|
2020-10-27 18:10:19 +03:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2020-12-14 21:26:33 +03:00
|
|
|
// This mode only applies to 64-bit R/M operands with 32-bit signed immediates
|
|
|
|
assert (imm_opnd.as.imm < 0);
|
|
|
|
assert (sig_imm_size(imm_opnd.as.imm) <= 32);
|
|
|
|
assert (rm_opnd.num_bits == 64);
|
|
|
|
cb_write_rm(cb, false, true, NO_OPND, rm_opnd, 0x00, 1, 0xF7);
|
|
|
|
cb_write_int(cb, imm_opnd.as.imm, 32);
|
2020-10-27 18:10:19 +03:00
|
|
|
}
|
2020-09-28 22:50:41 +03:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2020-10-27 18:10:19 +03:00
|
|
|
assert (test_opnd.num_bits == rm_opnd.num_bits);
|
2021-05-11 23:33:28 +03:00
|
|
|
|
|
|
|
if (rm_opnd.num_bits == 8)
|
|
|
|
{
|
|
|
|
cb_write_rm(cb, false, false, test_opnd, rm_opnd, 0xFF, 1, 0x84);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
cb_write_rm(cb, rm_opnd.num_bits == 16, rm_opnd.num_bits == 64, test_opnd, rm_opnd, 0xFF, 1, 0x85);
|
|
|
|
}
|
2020-09-28 22:50:41 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-20 21:23:14 +03:00
|
|
|
/// Undefined opcode
|
2021-09-29 21:58:01 +03:00
|
|
|
void ud2(codeblock_t *cb)
|
2020-09-20 21:23:14 +03:00
|
|
|
{
|
|
|
|
cb_write_bytes(cb, 2, 0x0F, 0x0B);
|
|
|
|
}
|
|
|
|
|
2021-05-12 17:18:46 +03:00
|
|
|
/// xchg - Exchange Register/Memory with Register
|
2021-09-29 21:58:01 +03:00
|
|
|
void xchg(codeblock_t *cb, x86opnd_t rm_opnd, x86opnd_t r_opnd)
|
2021-05-12 17:18:46 +03:00
|
|
|
{
|
|
|
|
assert (rm_opnd.num_bits == 64);
|
|
|
|
assert (r_opnd.num_bits == 64);
|
|
|
|
assert (rm_opnd.type == OPND_REG);
|
|
|
|
assert (r_opnd.type == OPND_REG);
|
|
|
|
|
|
|
|
// If we're exchanging with RAX
|
|
|
|
if (rm_opnd.type == OPND_REG && rm_opnd.as.reg.reg_no == RAX.as.reg.reg_no)
|
|
|
|
{
|
|
|
|
// Write the REX byte
|
|
|
|
cb_write_rex(cb, rm_opnd.num_bits == 64, 0, 0, r_opnd.as.reg.reg_no);
|
|
|
|
|
|
|
|
// Write the opcode and register number
|
|
|
|
cb_write_byte(cb, 0x90 + (r_opnd.as.reg.reg_no & 7));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
cb_write_rm(cb, rm_opnd.num_bits == 16, rm_opnd.num_bits == 64, r_opnd, rm_opnd, 0xFF, 1, 0x87);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-14 23:59:39 +03:00
|
|
|
/// xor - Exclusive bitwise OR
|
2021-09-29 21:58:01 +03:00
|
|
|
void xor(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
2020-09-15 17:44:46 +03:00
|
|
|
{
|
|
|
|
cb_write_rm_multi(
|
|
|
|
cb,
|
|
|
|
"xor",
|
|
|
|
0x30, // opMemReg8
|
|
|
|
0x31, // opMemRegPref
|
|
|
|
0x32, // opRegMem8
|
|
|
|
0x33, // opRegMemPref
|
|
|
|
0x80, // opMemImm8
|
|
|
|
0x83, // opMemImmSml
|
|
|
|
0x81, // opMemImmLrg
|
|
|
|
0x06, // opExtImm
|
|
|
|
opnd0,
|
|
|
|
opnd1
|
|
|
|
);
|
|
|
|
}
|
2021-03-03 02:27:50 +03:00
|
|
|
|
|
|
|
// LOCK - lock prefix for atomic shared memory operations
|
2021-09-29 21:58:01 +03:00
|
|
|
void cb_write_lock_prefix(codeblock_t *cb)
|
2021-03-03 02:27:50 +03:00
|
|
|
{
|
|
|
|
cb_write_byte(cb, 0xF0);
|
|
|
|
}
|