-
Notifications
You must be signed in to change notification settings - Fork 0
/
machine_code_func.cpp
127 lines (99 loc) · 4.23 KB
/
machine_code_func.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
#include "x64asm/include/x64asm.h"
#include "types.h"
/**
* This class is to help you get started with jumping into machine code from your VM interpreter
* x64asm naturally supports calling function with up to six parameters (see other examples
* in this file). However, it does not naturally support calling functions with more than
* six parameters. This function implements calling functions with more than six parameters
* as you may need to support for MITScript
*
* This is an illustrative example. You may be able to change this implementation to achieve
* better performance
*/
class MachineCodeFunction
{
public:
MachineCodeFunction(const size_t parameter_count, x64asm::Function body):
parameter_count_(parameter_count),
body_(body),
compiled_(false) {};
void compile() {
// Number of parameters passed in registers
const static size_t NUM_PARAMETER_REGS = 6;
// Parameter Registers according to SysV calling convention
const static x64asm::R64 registers[] = {x64asm::rdi, x64asm::rsi, x64asm::rdx, x64asm::rcx, x64asm::r8, x64asm::r9};
// Should be able to use static_assert instead
assert((sizeof(registers) / sizeof(x64asm::R64)) == NUM_PARAMETER_REGS);
assert(!compiled_);
// Allocate the parameter buffer
buffer_.resize(parameter_count_);
// Create an assembler and a function to compile code to.
x64asm::Assembler assm;
// Generate trampoline to handle non-static number of arguments
assm.start(trampoline_);
// At this point the stack is 8-byte aligned.
// However, we need it to be 16-byte aligned for the call instruction
// If we push an even number of arguments on the stack
// or push none at all, then we need to adjust the stack
// by 8 bytes
bool pushed_alignment = false;
if ((parameter_count_ <= NUM_PARAMETER_REGS) || (parameter_count_ % 2 == 0)) {
assm.sub(x64asm::rsp, x64asm::Imm8{8});
pushed_alignment = true;
}
// Push arguments 7-n in reverse order
for (size_t i = 6; i < parameter_count_; ++i) {
// Load address of parameter buffer slot for this value
assm.assemble({x64asm::MOV_R64_IMM64, {
x64asm::rax,
x64asm::Imm64{
&buffer_[parameter_count_ - (i - NUM_PARAMETER_REGS) - 1]
}
}});
// Load parameter value from buffer
assm.assemble(
{x64asm::MOV_R64_M64, {x64asm::rax, x64asm::M64{x64asm::rax}}}
);
// Push on stack
assm.push(x64asm::rax);
}
// Pass arguments 1 to 6
for (size_t i = 0; i < parameter_count_ && i < NUM_PARAMETER_REGS; ++i) {
// Load address of parameter buffer slot for this value
// Be careful to use a register (rax) that is not in the set of argument registers
assm.assemble({x64asm::MOV_R64_IMM64, {
x64asm::rax, x64asm::Imm64{&buffer_[i]}}});
// Load parameter value from buffer
assm.assemble({x64asm::MOV_R64_M64, {
registers[i], x64asm::M64{x64asm::rax}}});
}
// Call body of function
assm.assemble({x64asm::MOV_R64_IMM64, {x64asm::rax, x64asm::Imm64{body_}}});
assm.call(x64asm::rax);
// Pop arguments 7-n off stack
for (size_t i = parameter_count_; NUM_PARAMETER_REGS < i; --i) {
// Pop stack into unused register
assm.pop(x64asm::rdx);
}
if (pushed_alignment) {
assm.add(x64asm::rsp, x64asm::Imm8{8});
}
assm.ret();
assm.finish();
compiled_ = true;
}
tagptr_t call(const vector<tagptr_t*> args) {
assert(compiled_);
assert(args.size() == parameter_count_);
// copy contents into buffer
copy(args.begin(), args.end(), buffer_.begin());
tagptr_t result = trampoline_.call<tagptr_t>();
return result;
}
private:
bool compiled_;
vector<tagptr_t*> buffer_;
size_t parameter_count_;
x64asm::Function trampoline_;
x64asm::Function body_;
};