2020-04-10 08:11:40 +03:00
|
|
|
#ifndef RUBY_INSNHELPER_H
|
|
|
|
#define RUBY_INSNHELPER_H
|
2006-12-31 18:02:22 +03:00
|
|
|
/**********************************************************************
|
|
|
|
|
|
|
|
insnhelper.h - helper macros to implement each instructions
|
|
|
|
|
|
|
|
$Author$
|
|
|
|
created at: 04/01/01 15:50:34 JST
|
|
|
|
|
* blockinlining.c, compile.c, compile.h, debug.c, debug.h,
id.c, insnhelper.h, insns.def, thread.c, thread_pthread.ci,
thread_pthread.h, thread_win32.ci, thread_win32.h, vm.h,
vm_dump.c, vm_evalbody.ci, vm_opts.h: fix comments and
copyright year.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@13920 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-11-14 01:13:04 +03:00
|
|
|
Copyright (C) 2004-2007 Koichi Sasada
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
**********************************************************************/
|
|
|
|
|
2018-08-07 19:27:45 +03:00
|
|
|
RUBY_EXTERN VALUE ruby_vm_const_missing_count;
|
2022-04-05 23:37:00 +03:00
|
|
|
RUBY_EXTERN rb_serial_t ruby_vm_constant_cache_invalidations;
|
|
|
|
RUBY_EXTERN rb_serial_t ruby_vm_constant_cache_misses;
|
2021-06-01 20:34:06 +03:00
|
|
|
RUBY_EXTERN rb_serial_t ruby_vm_global_cvar_state;
|
* common.mk: clean up
- remove blockinlining.$(OBJEXT) to built
- make ENCODING_H_INCLDUES variable (include/ruby/encoding.h)
- make VM_CORE_H_INCLUDES variable (vm_core.h)
- simplify rules.
- make depends rule to output depend status using gcc -MM.
* include/ruby/mvm.h, include/ruby/vm.h: rename mvm.h to vm.h.
* include/ruby.h: ditto.
* load.c: add inclusion explicitly.
* enumerator.c, object.c, parse.y, thread.c, vm_dump.c:
remove useless inclusion.
* eval_intern.h: cleanup inclusion.
* vm_core.h: rb_thread_t should be defined in this file.
* vm_evalbody.c, vm_exec.c: rename vm_evalbody.c to vm_exec.c.
* vm.h, vm_exec.h: rename vm.h to vm_exec.h.
* insnhelper.h, vm_insnhelper.h: rename insnhelper.h to vm_insnhelper.h.
* vm.c, vm_insnhelper.c, vm_insnhelper.h:
- rename vm_eval() to vm_exec_core().
- rename vm_eval_body() to vm_exec().
- cleanup include order.
* vm_method.c: fix comment.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@19466 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2008-09-23 04:20:28 +04:00
|
|
|
|
2023-12-19 10:49:54 +03:00
|
|
|
#if USE_YJIT || USE_RJIT // We want vm_insns_count on any JIT-enabled build.
|
2023-07-14 01:14:43 +03:00
|
|
|
// Increment vm_insns_count for --yjit-stats. We increment this even when
|
|
|
|
// --yjit or --yjit-stats is not used because branching to skip it is slower.
|
|
|
|
// We also don't use ATOMIC_INC for performance, allowing inaccuracy on Ractors.
|
2023-12-19 10:49:54 +03:00
|
|
|
#define JIT_COLLECT_USAGE_INSN(insn) rb_vm_insns_count++
|
2023-07-12 03:26:03 +03:00
|
|
|
#else
|
2023-12-19 10:49:54 +03:00
|
|
|
#define JIT_COLLECT_USAGE_INSN(insn) // none
|
2023-07-12 03:26:03 +03:00
|
|
|
#endif
|
|
|
|
|
2012-11-22 10:28:17 +04:00
|
|
|
#if VM_COLLECT_USAGE_DETAILS
|
2012-10-04 16:31:05 +04:00
|
|
|
#define COLLECT_USAGE_INSN(insn) vm_collect_usage_insn(insn)
|
|
|
|
#define COLLECT_USAGE_OPERAND(insn, n, op) vm_collect_usage_operand((insn), (n), ((VALUE)(op)))
|
|
|
|
#define COLLECT_USAGE_REGISTER(reg, s) vm_collect_usage_register((reg), (s))
|
|
|
|
#else
|
2023-12-19 10:49:54 +03:00
|
|
|
#define COLLECT_USAGE_INSN(insn) JIT_COLLECT_USAGE_INSN(insn)
|
2023-07-12 03:26:03 +03:00
|
|
|
#define COLLECT_USAGE_OPERAND(insn, n, op) // none
|
|
|
|
#define COLLECT_USAGE_REGISTER(reg, s) // none
|
2012-10-04 16:31:05 +04:00
|
|
|
#endif
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
/**********************************************************/
|
|
|
|
/* deal with stack */
|
|
|
|
/**********************************************************/
|
|
|
|
|
2018-06-27 13:36:49 +03:00
|
|
|
#define PUSH(x) (SET_SV(x), INC_SP(1))
|
2006-12-31 18:02:22 +03:00
|
|
|
#define TOPN(n) (*(GET_SP()-(n)-1))
|
2007-06-05 21:39:52 +04:00
|
|
|
#define POPN(n) (DEC_SP(n))
|
|
|
|
#define POP() (DEC_SP(1))
|
2007-06-01 08:05:46 +04:00
|
|
|
#define STACK_ADDR_FROM_TOP(n) (GET_SP()-(n))
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
/**********************************************************/
|
|
|
|
/* deal with registers */
|
|
|
|
/**********************************************************/
|
|
|
|
|
2016-11-05 19:31:25 +03:00
|
|
|
#define VM_REG_CFP (reg_cfp)
|
|
|
|
#define VM_REG_PC (VM_REG_CFP->pc)
|
|
|
|
#define VM_REG_SP (VM_REG_CFP->sp)
|
|
|
|
#define VM_REG_EP (VM_REG_CFP->ep)
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2007-06-24 14:33:00 +04:00
|
|
|
#define RESTORE_REGS() do { \
|
2017-10-27 09:21:50 +03:00
|
|
|
VM_REG_CFP = ec->cfp; \
|
2007-06-24 14:33:00 +04:00
|
|
|
} while (0)
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2024-04-20 03:02:21 +03:00
|
|
|
typedef enum call_type {
|
|
|
|
CALL_PUBLIC,
|
|
|
|
CALL_FCALL,
|
|
|
|
CALL_VCALL,
|
|
|
|
CALL_PUBLIC_KW,
|
|
|
|
CALL_FCALL_KW
|
|
|
|
} call_type;
|
|
|
|
|
Optimized forwarding callers and callees
This patch optimizes forwarding callers and callees. It only optimizes methods that only take `...` as their parameter, and then pass `...` to other calls.
Calls it optimizes look like this:
```ruby
def bar(a) = a
def foo(...) = bar(...) # optimized
foo(123)
```
```ruby
def bar(a) = a
def foo(...) = bar(1, 2, ...) # optimized
foo(123)
```
```ruby
def bar(*a) = a
def foo(...)
list = [1, 2]
bar(*list, ...) # optimized
end
foo(123)
```
All variants of the above but using `super` are also optimized, including a bare super like this:
```ruby
def foo(...)
super
end
```
This patch eliminates intermediate allocations made when calling methods that accept `...`.
We can observe allocation elimination like this:
```ruby
def m
x = GC.stat(:total_allocated_objects)
yield
GC.stat(:total_allocated_objects) - x
end
def bar(a) = a
def foo(...) = bar(...)
def test
m { foo(123) }
end
test
p test # allocates 1 object on master, but 0 objects with this patch
```
```ruby
def bar(a, b:) = a + b
def foo(...) = bar(...)
def test
m { foo(1, b: 2) }
end
test
p test # allocates 2 objects on master, but 0 objects with this patch
```
How does it work?
-----------------
This patch works by using a dynamic stack size when passing forwarded parameters to callees.
The caller's info object (known as the "CI") contains the stack size of the
parameters, so we pass the CI object itself as a parameter to the callee.
When forwarding parameters, the forwarding ISeq uses the caller's CI to determine how much stack to copy, then copies the caller's stack before calling the callee.
The CI at the forwarded call site is adjusted using information from the caller's CI.
I think this description is kind of confusing, so let's walk through an example with code.
```ruby
def delegatee(a, b) = a + b
def delegator(...)
delegatee(...) # CI2 (FORWARDING)
end
def caller
delegator(1, 2) # CI1 (argc: 2)
end
```
Before we call the delegator method, the stack looks like this:
```
Executing Line | Code | Stack
---------------+---------------------------------------+--------
1| def delegatee(a, b) = a + b | self
2| | 1
3| def delegator(...) | 2
4| # |
5| delegatee(...) # CI2 (FORWARDING) |
6| end |
7| |
8| def caller |
-> 9| delegator(1, 2) # CI1 (argc: 2) |
10| end |
```
The ISeq for `delegator` is tagged as "forwardable", so when `caller` calls in
to `delegator`, it writes `CI1` on to the stack as a local variable for the
`delegator` method. The `delegator` method has a special local called `...`
that holds the caller's CI object.
Here is the ISeq disasm fo `delegator`:
```
== disasm: #<ISeq:delegator@-e:1 (1,0)-(1,39)>
local table (size: 1, argc: 0 [opts: 0, rest: -1, post: 0, block: -1, kw: -1@-1, kwrest: -1])
[ 1] "..."@0
0000 putself ( 1)[LiCa]
0001 getlocal_WC_0 "..."@0
0003 send <calldata!mid:delegatee, argc:0, FCALL|FORWARDING>, nil
0006 leave [Re]
```
The local called `...` will contain the caller's CI: CI1.
Here is the stack when we enter `delegator`:
```
Executing Line | Code | Stack
---------------+---------------------------------------+--------
1| def delegatee(a, b) = a + b | self
2| | 1
3| def delegator(...) | 2
-> 4| # | CI1 (argc: 2)
5| delegatee(...) # CI2 (FORWARDING) | cref_or_me
6| end | specval
7| | type
8| def caller |
9| delegator(1, 2) # CI1 (argc: 2) |
10| end |
```
The CI at `delegatee` on line 5 is tagged as "FORWARDING", so it knows to
memcopy the caller's stack before calling `delegatee`. In this case, it will
memcopy self, 1, and 2 to the stack before calling `delegatee`. It knows how much
memory to copy from the caller because `CI1` contains stack size information
(argc: 2).
Before executing the `send` instruction, we push `...` on the stack. The
`send` instruction pops `...`, and because it is tagged with `FORWARDING`, it
knows to memcopy (using the information in the CI it just popped):
```
== disasm: #<ISeq:delegator@-e:1 (1,0)-(1,39)>
local table (size: 1, argc: 0 [opts: 0, rest: -1, post: 0, block: -1, kw: -1@-1, kwrest: -1])
[ 1] "..."@0
0000 putself ( 1)[LiCa]
0001 getlocal_WC_0 "..."@0
0003 send <calldata!mid:delegatee, argc:0, FCALL|FORWARDING>, nil
0006 leave [Re]
```
Instruction 001 puts the caller's CI on the stack. `send` is tagged with
FORWARDING, so it reads the CI and _copies_ the callers stack to this stack:
```
Executing Line | Code | Stack
---------------+---------------------------------------+--------
1| def delegatee(a, b) = a + b | self
2| | 1
3| def delegator(...) | 2
4| # | CI1 (argc: 2)
-> 5| delegatee(...) # CI2 (FORWARDING) | cref_or_me
6| end | specval
7| | type
8| def caller | self
9| delegator(1, 2) # CI1 (argc: 2) | 1
10| end | 2
```
The "FORWARDING" call site combines information from CI1 with CI2 in order
to support passing other values in addition to the `...` value, as well as
perfectly forward splat args, kwargs, etc.
Since we're able to copy the stack from `caller` in to `delegator`'s stack, we
can avoid allocating objects.
I want to do this to eliminate object allocations for delegate methods.
My long term goal is to implement `Class#new` in Ruby and it uses `...`.
I was able to implement `Class#new` in Ruby
[here](https://github.com/ruby/ruby/pull/9289).
If we adopt the technique in this patch, then we can optimize allocating
objects that take keyword parameters for `initialize`.
For example, this code will allocate 2 objects: one for `SomeObject`, and one
for the kwargs:
```ruby
SomeObject.new(foo: 1)
```
If we combine this technique, plus implement `Class#new` in Ruby, then we can
reduce allocations for this common operation.
Co-Authored-By: John Hawthorn <john@hawthorn.email>
Co-Authored-By: Alan Wu <XrXr@users.noreply.github.com>
2024-04-15 20:48:53 +03:00
|
|
|
struct rb_forwarding_call_data {
|
|
|
|
struct rb_call_data cd;
|
|
|
|
CALL_INFO caller_ci;
|
|
|
|
};
|
|
|
|
|
2018-12-28 04:06:04 +03:00
|
|
|
#if VM_COLLECT_USAGE_DETAILS
|
2012-06-11 07:14:59 +04:00
|
|
|
enum vm_regan_regtype {
|
|
|
|
VM_REGAN_PC = 0,
|
|
|
|
VM_REGAN_SP = 1,
|
|
|
|
VM_REGAN_EP = 2,
|
|
|
|
VM_REGAN_CFP = 3,
|
|
|
|
VM_REGAN_SELF = 4,
|
2018-01-02 09:41:46 +03:00
|
|
|
VM_REGAN_ISEQ = 5
|
2012-06-11 07:14:59 +04:00
|
|
|
};
|
|
|
|
enum vm_regan_acttype {
|
|
|
|
VM_REGAN_ACT_GET = 0,
|
2018-01-02 09:41:46 +03:00
|
|
|
VM_REGAN_ACT_SET = 1
|
2012-06-11 07:14:59 +04:00
|
|
|
};
|
|
|
|
|
2012-10-04 16:31:05 +04:00
|
|
|
#define COLLECT_USAGE_REGISTER_HELPER(a, b, v) \
|
|
|
|
(COLLECT_USAGE_REGISTER((VM_REGAN_##a), (VM_REGAN_ACT_##b)), (v))
|
2006-12-31 18:02:22 +03:00
|
|
|
#else
|
2012-10-04 16:31:05 +04:00
|
|
|
#define COLLECT_USAGE_REGISTER_HELPER(a, b, v) (v)
|
2006-12-31 18:02:22 +03:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* PC */
|
2016-11-05 19:31:25 +03:00
|
|
|
#define GET_PC() (COLLECT_USAGE_REGISTER_HELPER(PC, GET, VM_REG_PC))
|
|
|
|
#define SET_PC(x) (VM_REG_PC = (COLLECT_USAGE_REGISTER_HELPER(PC, SET, (x))))
|
2006-12-31 18:02:22 +03:00
|
|
|
#define GET_CURRENT_INSN() (*GET_PC())
|
|
|
|
#define GET_OPERAND(n) (GET_PC()[(n)])
|
2016-11-05 19:31:25 +03:00
|
|
|
#define ADD_PC(n) (SET_PC(VM_REG_PC + (n)))
|
2017-11-14 15:58:36 +03:00
|
|
|
#define JUMP(dst) (SET_PC(VM_REG_PC + (dst)))
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2012-06-11 07:14:59 +04:00
|
|
|
/* frame pointer, environment pointer */
|
2016-11-05 19:31:25 +03:00
|
|
|
#define GET_CFP() (COLLECT_USAGE_REGISTER_HELPER(CFP, GET, VM_REG_CFP))
|
|
|
|
#define GET_EP() (COLLECT_USAGE_REGISTER_HELPER(EP, GET, VM_REG_EP))
|
|
|
|
#define SET_EP(x) (VM_REG_EP = (COLLECT_USAGE_REGISTER_HELPER(EP, SET, (x))))
|
2012-06-11 07:14:59 +04:00
|
|
|
#define GET_LEP() (VM_EP_LEP(GET_EP()))
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
/* SP */
|
2016-11-05 19:31:25 +03:00
|
|
|
#define GET_SP() (COLLECT_USAGE_REGISTER_HELPER(SP, GET, VM_REG_SP))
|
|
|
|
#define SET_SP(x) (VM_REG_SP = (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
|
|
|
|
#define INC_SP(x) (VM_REG_SP += (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
|
|
|
|
#define DEC_SP(x) (VM_REG_SP -= (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
|
2020-03-09 20:22:11 +03:00
|
|
|
#define SET_SV(x) (*GET_SP() = rb_ractor_confirm_belonging(x))
|
2018-07-19 16:25:22 +03:00
|
|
|
/* set current stack value as x */
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
/* instruction sequence C struct */
|
|
|
|
#define GET_ISEQ() (GET_CFP()->iseq)
|
|
|
|
|
|
|
|
/**********************************************************/
|
|
|
|
/* deal with variables */
|
|
|
|
/**********************************************************/
|
|
|
|
|
2016-07-28 14:02:30 +03:00
|
|
|
#define GET_PREV_EP(ep) ((VALUE *)((ep)[VM_ENV_DATA_INDEX_SPECVAL] & ~0x03))
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
/**********************************************************/
|
|
|
|
/* deal with values */
|
|
|
|
/**********************************************************/
|
|
|
|
|
2012-10-04 16:31:05 +04:00
|
|
|
#define GET_SELF() (COLLECT_USAGE_REGISTER_HELPER(SELF, GET, GET_CFP()->self))
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
/**********************************************************/
|
|
|
|
/* deal with control flow 2: method/iterator */
|
|
|
|
/**********************************************************/
|
|
|
|
|
2012-10-15 00:59:21 +04:00
|
|
|
/* set fastpath when cached method is *NOT* protected
|
|
|
|
* because inline method cache does not care about receiver.
|
|
|
|
*/
|
2012-10-15 21:40:50 +04:00
|
|
|
|
2019-12-17 07:22:24 +03:00
|
|
|
static inline void
|
2020-01-08 10:14:01 +03:00
|
|
|
CC_SET_FASTPATH(const struct rb_callcache *cc, vm_call_handler func, bool enabled)
|
2019-12-17 07:22:24 +03:00
|
|
|
{
|
|
|
|
if (LIKELY(enabled)) {
|
2020-01-08 10:14:01 +03:00
|
|
|
vm_cc_call_set(cc, func);
|
2019-12-17 07:22:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-28 14:02:30 +03:00
|
|
|
#define GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL])
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
/**********************************************************/
|
|
|
|
/* deal with control flow 3: exception */
|
|
|
|
/**********************************************************/
|
|
|
|
|
|
|
|
|
2018-09-13 06:46:46 +03:00
|
|
|
/**********************************************************/
|
|
|
|
/* deal with stack canary */
|
|
|
|
/**********************************************************/
|
|
|
|
|
|
|
|
#if VM_CHECK_MODE > 0
|
2020-06-21 20:27:04 +03:00
|
|
|
#define SETUP_CANARY(cond) \
|
2020-05-22 07:49:08 +03:00
|
|
|
VALUE *canary = 0; \
|
2020-06-21 20:27:04 +03:00
|
|
|
if (cond) { \
|
2018-09-13 06:46:46 +03:00
|
|
|
canary = GET_SP(); \
|
|
|
|
SET_SV(vm_stack_canary); \
|
2019-02-01 10:26:39 +03:00
|
|
|
} \
|
|
|
|
else {\
|
|
|
|
SET_SV(Qfalse); /* cleanup */ \
|
2018-09-13 06:46:46 +03:00
|
|
|
}
|
2020-06-21 20:27:04 +03:00
|
|
|
#define CHECK_CANARY(cond, insn) \
|
|
|
|
if (cond) { \
|
2019-02-01 10:26:39 +03:00
|
|
|
if (*canary == vm_stack_canary) { \
|
|
|
|
*canary = Qfalse; /* cleanup */ \
|
|
|
|
} \
|
|
|
|
else { \
|
2020-12-25 17:36:25 +03:00
|
|
|
rb_vm_canary_is_found_dead(insn, *canary); \
|
2019-02-01 10:26:39 +03:00
|
|
|
} \
|
2018-09-13 06:46:46 +03:00
|
|
|
}
|
|
|
|
#else
|
2020-06-22 00:17:12 +03:00
|
|
|
#define SETUP_CANARY(cond) if (cond) {} else {}
|
|
|
|
#define CHECK_CANARY(cond, insn) if (cond) {(void)(insn);}
|
2018-09-13 06:46:46 +03:00
|
|
|
#endif
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
/**********************************************************/
|
|
|
|
/* others */
|
|
|
|
/**********************************************************/
|
|
|
|
|
2018-09-14 10:44:44 +03:00
|
|
|
#define CALL_SIMPLE_METHOD() do { \
|
2023-08-17 06:28:33 +03:00
|
|
|
rb_snum_t insn_width = attr_width_opt_send_without_block(0); \
|
|
|
|
ADD_PC(-insn_width); \
|
2018-09-14 10:44:44 +03:00
|
|
|
DISPATCH_ORIGINAL_INSN(opt_send_without_block); \
|
|
|
|
} while (0)
|
|
|
|
|
2021-06-01 20:34:06 +03:00
|
|
|
#define GET_GLOBAL_CVAR_STATE() (ruby_vm_global_cvar_state)
|
|
|
|
#define INC_GLOBAL_CVAR_STATE() (++ruby_vm_global_cvar_state)
|
2011-06-13 15:25:44 +04:00
|
|
|
|
2015-03-11 15:49:27 +03:00
|
|
|
static inline struct vm_throw_data *
|
2019-07-25 11:15:48 +03:00
|
|
|
THROW_DATA_NEW(VALUE val, const rb_control_frame_t *cf, int st)
|
2015-03-10 21:39:46 +03:00
|
|
|
{
|
2024-02-20 23:58:10 +03:00
|
|
|
struct vm_throw_data *obj = IMEMO_NEW(struct vm_throw_data, imemo_throw_data, 0);
|
|
|
|
*((VALUE *)&obj->throw_obj) = val;
|
|
|
|
*((struct rb_control_frame_struct **)&obj->catch_frame) = (struct rb_control_frame_struct *)cf;
|
2019-07-25 11:15:48 +03:00
|
|
|
obj->throw_state = st;
|
2024-02-20 23:58:10 +03:00
|
|
|
|
2019-07-25 11:15:48 +03:00
|
|
|
return obj;
|
2015-03-10 21:39:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
2015-03-11 15:49:27 +03:00
|
|
|
THROW_DATA_VAL(const struct vm_throw_data *obj)
|
2015-03-10 21:39:46 +03:00
|
|
|
{
|
2017-04-07 10:50:30 +03:00
|
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
2015-03-10 21:39:46 +03:00
|
|
|
return obj->throw_obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline const rb_control_frame_t *
|
2015-03-11 15:49:27 +03:00
|
|
|
THROW_DATA_CATCH_FRAME(const struct vm_throw_data *obj)
|
2015-03-10 21:39:46 +03:00
|
|
|
{
|
2017-04-07 10:50:30 +03:00
|
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
2015-03-10 21:39:46 +03:00
|
|
|
return obj->catch_frame;
|
|
|
|
}
|
|
|
|
|
2017-04-06 05:56:23 +03:00
|
|
|
static inline int
|
2015-03-11 15:49:27 +03:00
|
|
|
THROW_DATA_STATE(const struct vm_throw_data *obj)
|
2015-03-10 21:39:46 +03:00
|
|
|
{
|
2017-04-07 10:50:30 +03:00
|
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
2019-07-22 11:44:58 +03:00
|
|
|
return obj->throw_state;
|
2015-03-10 21:39:46 +03:00
|
|
|
}
|
|
|
|
|
2017-04-06 05:56:23 +03:00
|
|
|
static inline int
|
|
|
|
THROW_DATA_CONSUMED_P(const struct vm_throw_data *obj)
|
|
|
|
{
|
|
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
|
|
|
return obj->flags & THROW_DATA_CONSUMED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
THROW_DATA_CATCH_FRAME_SET(struct vm_throw_data *obj, const rb_control_frame_t *cfp)
|
|
|
|
{
|
2017-04-07 10:50:30 +03:00
|
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
2017-04-06 05:56:23 +03:00
|
|
|
obj->catch_frame = cfp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
THROW_DATA_STATE_SET(struct vm_throw_data *obj, int st)
|
|
|
|
{
|
2017-04-07 10:50:30 +03:00
|
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
2019-07-22 11:44:58 +03:00
|
|
|
obj->throw_state = st;
|
2017-04-06 05:56:23 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
THROW_DATA_CONSUMED_SET(struct vm_throw_data *obj)
|
|
|
|
{
|
|
|
|
if (THROW_DATA_P(obj) &&
|
|
|
|
THROW_DATA_STATE(obj) == TAG_BREAK) {
|
|
|
|
obj->flags |= THROW_DATA_CONSUMED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
VALUE size packed callinfo (ci).
Now, rb_call_info contains how to call the method with tuple of
(mid, orig_argc, flags, kwarg). Most of cases, kwarg == NULL and
mid+argc+flags only requires 64bits. So this patch packed
rb_call_info to VALUE (1 word) on such cases. If we can not
represent it in VALUE, then use imemo_callinfo which contains
conventional callinfo (rb_callinfo, renamed from rb_call_info).
iseq->body->ci_kw_size is removed because all of callinfo is VALUE
size (packed ci or a pointer to imemo_callinfo).
To access ci information, we need to use these functions:
vm_ci_mid(ci), _flag(ci), _argc(ci), _kwarg(ci).
struct rb_call_info_kw_arg is renamed to rb_callinfo_kwarg.
rb_funcallv_with_cc() and rb_method_basic_definition_p_with_cc()
is temporary removed because cd->ci should be marked.
2020-01-08 02:20:36 +03:00
|
|
|
#define IS_ARGS_SPLAT(ci) (vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT)
|
|
|
|
#define IS_ARGS_KEYWORD(ci) (vm_ci_flag(ci) & VM_CALL_KWARG)
|
|
|
|
#define IS_ARGS_KW_SPLAT(ci) (vm_ci_flag(ci) & VM_CALL_KW_SPLAT)
|
|
|
|
#define IS_ARGS_KW_OR_KW_SPLAT(ci) (vm_ci_flag(ci) & (VM_CALL_KWARG | VM_CALL_KW_SPLAT))
|
Reduce allocations for keyword argument hashes
Previously, passing a keyword splat to a method always allocated
a hash on the caller side, and accepting arbitrary keywords in
a method allocated a separate hash on the callee side. Passing
explicit keywords to a method that accepted a keyword splat
did not allocate a hash on the caller side, but resulted in two
hashes allocated on the callee side.
This commit makes passing a single keyword splat to a method not
allocate a hash on the caller side. Passing multiple keyword
splats or a mix of explicit keywords and a keyword splat still
generates a hash on the caller side. On the callee side,
if arbitrary keywords are not accepted, it does not allocate a
hash. If arbitrary keywords are accepted, it will allocate a
hash, but this commit uses a callinfo flag to indicate whether
the caller already allocated a hash, and if so, the callee can
use the passed hash without duplicating it. So this commit
should make it so that a maximum of a single hash is allocated
during method calls.
To set the callinfo flag appropriately, method call argument
compilation checks if only a single keyword splat is given.
If only one keyword splat is given, the VM_CALL_KW_SPLAT_MUT
callinfo flag is not set, since in that case the keyword
splat is passed directly and not mutable. If more than one
splat is used, a new hash needs to be generated on the caller
side, and in that case the callinfo flag is set, indicating
the keyword splat is mutable by the callee.
In compile_hash, used for both hash and keyword argument
compilation, if compiling keyword arguments and only a
single keyword splat is used, pass the argument directly.
On the caller side, in vm_args.c, the callinfo flag needs to
be recognized and handled. Because the keyword splat
argument may not be a hash, it needs to be converted to a
hash first if not. Then, unless the callinfo flag is set,
the hash needs to be duplicated. The temporary copy of the
callinfo flag, kw_flag, is updated if a hash was duplicated,
to prevent the need to duplicate it again. If we are
converting to a hash or duplicating a hash, we need to update
the argument array, which can including duplicating the
positional splat array if one was passed. CALLER_SETUP_ARG
and a couple other places needs to be modified to handle
similar issues for other types of calls.
This includes fairly comprehensive tests for different ways
keywords are handled internally, checking that you get equal
results but that keyword splats on the caller side result in
distinct objects for keyword rest parameters.
Included are benchmarks for keyword argument calls.
Brief results when compiled without optimization:
def kw(a: 1) a end
def kws(**kw) kw end
h = {a: 1}
kw(a: 1) # about same
kw(**h) # 2.37x faster
kws(a: 1) # 1.30x faster
kws(**h) # 2.19x faster
kw(a: 1, **h) # 1.03x slower
kw(**h, **h) # about same
kws(a: 1, **h) # 1.16x faster
kws(**h, **h) # 1.14x faster
2020-02-24 23:05:07 +03:00
|
|
|
#define IS_ARGS_KW_SPLAT_MUT(ci) (vm_ci_flag(ci) & VM_CALL_KW_SPLAT_MUT)
|
2018-02-06 17:07:57 +03:00
|
|
|
|
2022-03-11 22:48:02 +03:00
|
|
|
static inline bool
|
|
|
|
vm_call_cacheable(const struct rb_callinfo *ci, const struct rb_callcache *cc)
|
|
|
|
{
|
Optimized forwarding callers and callees
This patch optimizes forwarding callers and callees. It only optimizes methods that only take `...` as their parameter, and then pass `...` to other calls.
Calls it optimizes look like this:
```ruby
def bar(a) = a
def foo(...) = bar(...) # optimized
foo(123)
```
```ruby
def bar(a) = a
def foo(...) = bar(1, 2, ...) # optimized
foo(123)
```
```ruby
def bar(*a) = a
def foo(...)
list = [1, 2]
bar(*list, ...) # optimized
end
foo(123)
```
All variants of the above but using `super` are also optimized, including a bare super like this:
```ruby
def foo(...)
super
end
```
This patch eliminates intermediate allocations made when calling methods that accept `...`.
We can observe allocation elimination like this:
```ruby
def m
x = GC.stat(:total_allocated_objects)
yield
GC.stat(:total_allocated_objects) - x
end
def bar(a) = a
def foo(...) = bar(...)
def test
m { foo(123) }
end
test
p test # allocates 1 object on master, but 0 objects with this patch
```
```ruby
def bar(a, b:) = a + b
def foo(...) = bar(...)
def test
m { foo(1, b: 2) }
end
test
p test # allocates 2 objects on master, but 0 objects with this patch
```
How does it work?
-----------------
This patch works by using a dynamic stack size when passing forwarded parameters to callees.
The caller's info object (known as the "CI") contains the stack size of the
parameters, so we pass the CI object itself as a parameter to the callee.
When forwarding parameters, the forwarding ISeq uses the caller's CI to determine how much stack to copy, then copies the caller's stack before calling the callee.
The CI at the forwarded call site is adjusted using information from the caller's CI.
I think this description is kind of confusing, so let's walk through an example with code.
```ruby
def delegatee(a, b) = a + b
def delegator(...)
delegatee(...) # CI2 (FORWARDING)
end
def caller
delegator(1, 2) # CI1 (argc: 2)
end
```
Before we call the delegator method, the stack looks like this:
```
Executing Line | Code | Stack
---------------+---------------------------------------+--------
1| def delegatee(a, b) = a + b | self
2| | 1
3| def delegator(...) | 2
4| # |
5| delegatee(...) # CI2 (FORWARDING) |
6| end |
7| |
8| def caller |
-> 9| delegator(1, 2) # CI1 (argc: 2) |
10| end |
```
The ISeq for `delegator` is tagged as "forwardable", so when `caller` calls in
to `delegator`, it writes `CI1` on to the stack as a local variable for the
`delegator` method. The `delegator` method has a special local called `...`
that holds the caller's CI object.
Here is the ISeq disasm fo `delegator`:
```
== disasm: #<ISeq:delegator@-e:1 (1,0)-(1,39)>
local table (size: 1, argc: 0 [opts: 0, rest: -1, post: 0, block: -1, kw: -1@-1, kwrest: -1])
[ 1] "..."@0
0000 putself ( 1)[LiCa]
0001 getlocal_WC_0 "..."@0
0003 send <calldata!mid:delegatee, argc:0, FCALL|FORWARDING>, nil
0006 leave [Re]
```
The local called `...` will contain the caller's CI: CI1.
Here is the stack when we enter `delegator`:
```
Executing Line | Code | Stack
---------------+---------------------------------------+--------
1| def delegatee(a, b) = a + b | self
2| | 1
3| def delegator(...) | 2
-> 4| # | CI1 (argc: 2)
5| delegatee(...) # CI2 (FORWARDING) | cref_or_me
6| end | specval
7| | type
8| def caller |
9| delegator(1, 2) # CI1 (argc: 2) |
10| end |
```
The CI at `delegatee` on line 5 is tagged as "FORWARDING", so it knows to
memcopy the caller's stack before calling `delegatee`. In this case, it will
memcopy self, 1, and 2 to the stack before calling `delegatee`. It knows how much
memory to copy from the caller because `CI1` contains stack size information
(argc: 2).
Before executing the `send` instruction, we push `...` on the stack. The
`send` instruction pops `...`, and because it is tagged with `FORWARDING`, it
knows to memcopy (using the information in the CI it just popped):
```
== disasm: #<ISeq:delegator@-e:1 (1,0)-(1,39)>
local table (size: 1, argc: 0 [opts: 0, rest: -1, post: 0, block: -1, kw: -1@-1, kwrest: -1])
[ 1] "..."@0
0000 putself ( 1)[LiCa]
0001 getlocal_WC_0 "..."@0
0003 send <calldata!mid:delegatee, argc:0, FCALL|FORWARDING>, nil
0006 leave [Re]
```
Instruction 001 puts the caller's CI on the stack. `send` is tagged with
FORWARDING, so it reads the CI and _copies_ the callers stack to this stack:
```
Executing Line | Code | Stack
---------------+---------------------------------------+--------
1| def delegatee(a, b) = a + b | self
2| | 1
3| def delegator(...) | 2
4| # | CI1 (argc: 2)
-> 5| delegatee(...) # CI2 (FORWARDING) | cref_or_me
6| end | specval
7| | type
8| def caller | self
9| delegator(1, 2) # CI1 (argc: 2) | 1
10| end | 2
```
The "FORWARDING" call site combines information from CI1 with CI2 in order
to support passing other values in addition to the `...` value, as well as
perfectly forward splat args, kwargs, etc.
Since we're able to copy the stack from `caller` in to `delegator`'s stack, we
can avoid allocating objects.
I want to do this to eliminate object allocations for delegate methods.
My long term goal is to implement `Class#new` in Ruby and it uses `...`.
I was able to implement `Class#new` in Ruby
[here](https://github.com/ruby/ruby/pull/9289).
If we adopt the technique in this patch, then we can optimize allocating
objects that take keyword parameters for `initialize`.
For example, this code will allocate 2 objects: one for `SomeObject`, and one
for the kwargs:
```ruby
SomeObject.new(foo: 1)
```
If we combine this technique, plus implement `Class#new` in Ruby, then we can
reduce allocations for this common operation.
Co-Authored-By: John Hawthorn <john@hawthorn.email>
Co-Authored-By: Alan Wu <XrXr@users.noreply.github.com>
2024-04-15 20:48:53 +03:00
|
|
|
return !(vm_ci_flag(ci) & VM_CALL_FORWARDING) && ((vm_ci_flag(ci) & VM_CALL_FCALL) ||
|
|
|
|
METHOD_ENTRY_VISI(vm_cc_cme(cc)) != METHOD_VISI_PROTECTED);
|
2022-03-11 22:48:02 +03:00
|
|
|
}
|
2019-03-21 09:25:09 +03:00
|
|
|
/* If this returns true, an optimized function returned by `vm_call_iseq_setup_func`
|
|
|
|
can be used as a fastpath. */
|
2020-11-15 03:56:16 +03:00
|
|
|
static inline bool
|
2020-01-08 10:14:01 +03:00
|
|
|
vm_call_iseq_optimizable_p(const struct rb_callinfo *ci, const struct rb_callcache *cc)
|
2019-03-21 09:25:09 +03:00
|
|
|
{
|
2022-03-11 22:48:02 +03:00
|
|
|
return !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) && vm_call_cacheable(ci, cc);
|
2019-03-21 09:25:09 +03:00
|
|
|
}
|
|
|
|
|
2008-01-18 11:56:11 +03:00
|
|
|
#endif /* RUBY_INSNHELPER_H */
|