|
49 | 49 | #define OPCODE_MOV_I8_TO_R8 (0xb0) /* +rb */ |
50 | 50 | #define OPCODE_MOV_I64_TO_R64 (0xb8) /* +rq */ |
51 | 51 | #define OPCODE_MOV_I32_TO_RM32 (0xc7) |
| 52 | +#define OPCODE_MOV_R8_TO_RM8 (0x88) /* /r */ |
52 | 53 | #define OPCODE_MOV_R64_TO_RM64 (0x89) /* /r */ |
53 | 54 | #define OPCODE_MOV_RM64_TO_R64 (0x8b) |
54 | 55 | #define OPCODE_LEA_MEM_TO_R64 (0x8d) /* /r */ |
|
85 | 86 | #define MODRM_RM_REG (0xc0) |
86 | 87 | #define MODRM_RM_R64(x) ((x) & 0x7) |
87 | 88 |
|
| 89 | +#define OP_SIZE_PREFIX (0x66) |
| 90 | + |
88 | 91 | #define REX_PREFIX (0x40) |
89 | 92 | #define REX_W (0x08) // width |
90 | 93 | #define REX_R (0x04) // register |
@@ -298,18 +301,28 @@ STATIC void asm_x64_ret(asm_x64_t *as) { |
298 | 301 | asm_x64_write_byte_1(as, OPCODE_RET); |
299 | 302 | } |
300 | 303 |
|
301 | | -void asm_x64_mov_r32_to_r32(asm_x64_t *as, int src_r32, int dest_r32) { |
302 | | - // defaults to 32 bit operation |
303 | | - assert(src_r32 < 8); |
304 | | - assert(dest_r32 < 8); |
305 | | - asm_x64_write_byte_2(as, OPCODE_MOV_R64_TO_RM64, MODRM_R64(src_r32) | MODRM_RM_REG | MODRM_RM_R64(dest_r32)); |
306 | | -} |
307 | | - |
308 | 304 | void asm_x64_mov_r64_to_r64(asm_x64_t *as, int src_r64, int dest_r64) { |
309 | 305 | // use REX prefix for 64 bit operation |
310 | 306 | asm_x64_write_byte_3(as, REX_PREFIX | REX_W | (src_r64 < 8 ? 0 : REX_R) | (dest_r64 < 8 ? 0 : REX_B), OPCODE_MOV_R64_TO_RM64, MODRM_R64(src_r64) | MODRM_RM_REG | MODRM_RM_R64(dest_r64)); |
311 | 307 | } |
312 | 308 |
|
| 309 | +void asm_x64_mov_r8_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) { |
| 310 | + assert(dest_r64 < 8); |
| 311 | + if (src_r64 < 8) { |
| 312 | + asm_x64_write_byte_1(as, OPCODE_MOV_R8_TO_RM8); |
| 313 | + } else { |
| 314 | + asm_x64_write_byte_2(as, REX_PREFIX | REX_R, OPCODE_MOV_R8_TO_RM8); |
| 315 | + } |
| 316 | + asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp); |
| 317 | +} |
| 318 | + |
| 319 | +void asm_x64_mov_r16_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) { |
| 320 | + assert(src_r64 < 8); |
| 321 | + assert(dest_r64 < 8); |
| 322 | + asm_x64_write_byte_2(as, OP_SIZE_PREFIX, OPCODE_MOV_R64_TO_RM64); |
| 323 | + asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp); |
| 324 | +} |
| 325 | + |
313 | 326 | void asm_x64_mov_r64_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) { |
314 | 327 | // use REX prefix for 64 bit operation |
315 | 328 | assert(dest_r64 < 8); |
@@ -356,6 +369,7 @@ void asm_x64_mov_i64_to_r64(asm_x64_t *as, int64_t src_i64, int dest_r64) { |
356 | 369 | } |
357 | 370 |
|
358 | 371 | void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r64) { |
| 372 | + // TODO use movzx, movsx if possible |
359 | 373 | if (UNSIGNED_FIT32(src_i64)) { |
360 | 374 | // 5 bytes |
361 | 375 | asm_x64_mov_i32_to_r64(as, src_i64 & 0xffffffff, dest_r64); |
|
0 commit comments