Introduce Ractor mechanism for parallel execution

This commit introduces Ractor mechanism to run Ruby program in
parallel. See doc/ractor.md for more details about Ractor.
See ticket [Feature #17100] to see the implementation details
and discussions.

[Feature #17100]

This commit does not complete the implementation. You can find
many bugs on using Ractor. Also the specification will be changed
so that this feature is experimental. You will see a warning when
you make the first Ractor with `Ractor.new`.

I hope this feature can help programmers from thread-safety issues.
This commit is contained in:
Koichi Sasada 2020-03-10 02:22:11 +09:00
Родитель eeb5325d3b
Коммит 79df14c04b
41 изменённых файлов: 5951 добавлений и 783 удалений

Просмотреть файл

@ -0,0 +1,516 @@
# Ractor.current returns a current ractor
assert_equal 'Ractor', %q{
Ractor.current.class
}
# Ractor.new returns new Ractor
assert_equal 'Ractor', %q{
Ractor.new{}.class
}
# Ractor.new must call with a block
assert_equal "must be called with a block", %q{
begin
Ractor.new
rescue ArgumentError => e
e.message
end
}
# A return value of a Ractor block will be a message from the Ractor.
assert_equal 'ok', %q{
# join
r = Ractor.new do
'ok'
end
r.take
}
# Passed arguments to Ractor.new will be a block parameter
# The values are passed with Ractor-communication pass.
assert_equal 'ok', %q{
# ping-pong with arg
r = Ractor.new 'ok' do |msg|
msg
end
r.take
}
assert_equal 'ok', %q{
# ping-pong with two args
r = Ractor.new 'ping', 'pong' do |msg, msg2|
[msg, msg2]
end
'ok' if r.take == ['ping', 'pong']
}
# Ractor#send passes an object with copy to a Ractor
# and Ractor.recv in the Ractor block can receive the passed value.
assert_equal 'ok', %q{
r = Ractor.new do
msg = Ractor.recv
end
r.send 'ok'
r.take
}
# Ractor.select(*ractors) receives a values from a ractors.
# It is similar to select(2) and Go's select syntax.
# The return value is [ch, received_value]
assert_equal 'ok', %q{
# select 1
r1 = Ractor.new{'r1'}
r, obj = Ractor.select(r1)
'ok' if r == r1 and obj == 'r1'
}
assert_equal '["r1", "r2"]', %q{
# select 2
r1 = Ractor.new{'r1'}
r2 = Ractor.new{'r2'}
rs = [r1, r2]
as = []
r, obj = Ractor.select(*rs)
rs.delete(r)
as << obj
r, obj = Ractor.select(*rs)
as << obj
as.sort #=> ["r1", "r2"]
}
assert_equal 'true', %q{
def test n
rs = (1..n).map do |i|
Ractor.new(i) do |i|
"r#{i}"
end
end
as = []
all_rs = rs.dup
n.times{
r, obj = Ractor.select(*rs)
as << [r, obj]
rs.delete(r)
}
if as.map{|r, o| r.inspect}.sort == all_rs.map{|r| r.inspect}.sort &&
as.map{|r, o| o}.sort == (1..n).map{|i| "r#{i}"}.sort
'ok'
else
'ng'
end
end
30.times.map{|i|
test i
}.all?('ok')
}
# Outgoing port of a ractor will be closed when the Ractor is terminated.
assert_equal 'ok', %q{
r = Ractor.new do
'finish'
end
r.take
sleep 0.1 # wait for terminate
begin
o = r.take
rescue Ractor::ClosedError
'ok'
else
"ng: #{o}"
end
}
assert_equal 'ok', %q{
r = Ractor.new do
end
r.take # closed
sleep 0.1 # wait for terminate
begin
r.send(1)
rescue Ractor::ClosedError
'ok'
else
'ng'
end
}
# multiple Ractors can recv (wait) from one Ractor
assert_equal '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]', %q{
pipe = Ractor.new do
loop do
Ractor.yield Ractor.recv
end
end
RN = 10
rs = RN.times.map{|i|
Ractor.new pipe, i do |pipe, i|
msg = pipe.take
msg # ping-pong
end
}
RN.times{|i|
pipe << i
}
RN.times.map{
r, n = Ractor.select(*rs)
rs.delete r
n
}.sort
}
# Ractor.select also support multiple take, recv and yiled
assert_equal '[true, true, true]', %q{
RN = 10
CR = Ractor.current
rs = (1..RN).map{
Ractor.new do
CR.send 'send' + CR.take #=> 'sendyield'
'take'
end
}
recv = []
take = []
yiel = []
until rs.empty?
r, v = Ractor.select(CR, *rs, yield_value: 'yield')
case r
when :recv
recv << v
when :yield
yiel << v
else
take << v
rs.delete r
end
end
[recv.all?('sendyield'), yiel.all?(nil), take.all?('take')]
}
# multiple Ractors can send to one Ractor
assert_equal '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]', %q{
pipe = Ractor.new do
loop do
Ractor.yield Ractor.recv
end
end
RN = 10
RN.times.map{|i|
Ractor.new pipe, i do |pipe, i|
pipe << i
end
}
RN.times.map{
pipe.take
}.sort
}
# an exception in a Ractor will be re-raised at Ractor#recv
assert_equal '[RuntimeError, "ok", true]', %q{
r = Ractor.new do
raise 'ok' # exception will be transferred receiver
end
begin
r.take
rescue Ractor::RemoteError => e
[e.cause.class, #=> RuntimeError
e.cause.message, #=> 'ok'
e.ractor == r] #=> true
end
}
# unshareable object are copied
assert_equal 'false', %q{
obj = 'str'.dup
r = Ractor.new obj do |msg|
msg.object_id
end
obj.object_id == r.take
}
# To copy the object, now Marshal#dump is used
assert_equal 'no _dump_data is defined for class Thread', %q{
obj = Thread.new{}
begin
r = Ractor.new obj do |msg|
msg
end
rescue TypeError => e
e.message #=> no _dump_data is defined for class Thread
else
'ng'
end
}
# send sharable and unsharable objects
assert_equal "[[[1, true], [:sym, true], [:xyzzy, true], [\"frozen\", true], " \
"[(3/1), true], [(3+4i), true], [/regexp/, true], [C, true]], " \
"[[\"mutable str\", false], [[:array], false], [{:hash=>true}, false]]]", %q{
r = Ractor.new do
while v = Ractor.recv
Ractor.yield v
end
end
class C
end
sharable_objects = [1, :sym, 'xyzzy'.to_sym, 'frozen'.freeze, 1+2r, 3+4i, /regexp/, C]
sr = sharable_objects.map{|o|
r << o
o2 = r.take
[o, o.object_id == o2.object_id]
}
ur = unsharable_objects = ['mutable str'.dup, [:array], {hash: true}].map{|o|
r << o
o2 = r.take
[o, o.object_id == o2.object_id]
}
[sr, ur].inspect
}
# move example2: String
# touching moved object causes an error
assert_equal 'hello world', %q{
# move
r = Ractor.new do
obj = Ractor.recv
obj << ' world'
end
str = 'hello'
r.send str, move: true
modified = r.take
begin
str << ' exception' # raise Ractor::MovedError
rescue Ractor::MovedError
modified #=> 'hello world'
else
raise 'unreachable'
end
}
# move example2: Array
assert_equal '[0, 1]', %q{
r = Ractor.new do
ary = Ractor.recv
ary << 1
end
a1 = [0]
r.send a1, move: true
a2 = r.take
begin
a1 << 2 # raise Ractor::MovedError
rescue Ractor::MovedError
a2.inspect
end
}
# move with yield
assert_equal 'hello', %q{
r = Ractor.new do
Thread.current.report_on_exception = false
obj = 'hello'
Ractor.yield obj, move: true
obj << 'world'
end
str = r.take
begin
r.take
rescue Ractor::RemoteError
str #=> "hello"
end
}
# Access to global-variables are prohibitted
assert_equal 'can not access global variables $gv from non-main Ractors', %q{
$gv = 1
r = Ractor.new do
$gv
end
begin
r.take
rescue Ractor::RemoteError => e
e.cause.message
end
}
# Access to global-variables are prohibitted
assert_equal 'can not access global variables $gv from non-main Ractors', %q{
r = Ractor.new do
$gv = 1
end
begin
r.take
rescue Ractor::RemoteError => e
e.cause.message
end
}
# $stdin,out,err is Ractor local, but shared fds
assert_equal 'ok', %q{
r = Ractor.new do
[$stdin, $stdout, $stderr].map{|io|
[io.object_id, io.fileno]
}
end
[$stdin, $stdout, $stderr].zip(r.take){|io, (oid, fno)|
raise "should not be different object" if io.object_id == oid
raise "fd should be same" unless io.fileno == fno
}
'ok'
}
# selfs are different objects
assert_equal 'false', %q{
r = Ractor.new do
self.object_id
end
r.take == self.object_id #=> false
}
# self is a Ractor instance
assert_equal 'true', %q{
r = Ractor.new do
self.object_id
end
r.object_id == r.take #=> true
}
# given block Proc will be isolated, so can not access outer variables.
assert_equal 'ArgumentError', %q{
begin
a = true
r = Ractor.new do
a
end
rescue => e
e.class
end
}
# ivar in sharable-objects are not allowed to access from non-main Ractor
assert_equal 'can not access instance variables of classes/modules from non-main Ractors', %q{
class C
@iv = 'str'
end
r = Ractor.new do
class C
p @iv
end
end
begin
r.take
rescue Ractor::RemoteError => e
e.cause.message
end
}
# ivar in sharable-objects are not allowed to access from non-main Ractor
assert_equal 'can not access instance variables of shareable objects from non-main Ractors', %q{
shared = Ractor.new{}
shared.instance_variable_set(:@iv, 'str')
r = Ractor.new shared do |shared|
p shared.instance_variable_get(:@iv)
end
begin
r.take
rescue Ractor::RemoteError => e
e.cause.message
end
}
# cvar in sharable-objects are not allowed to access from non-main Ractor
assert_equal 'can not access class variables from non-main Ractors', %q{
class C
@@cv = 'str'
end
r = Ractor.new do
class C
p @@cv
end
end
begin
r.take
rescue Ractor::RemoteError => e
e.cause.message
end
}
# Getting non-sharable objects via constants by other Ractors is not allowed
assert_equal 'can not access non-sharable objects in constant C::CONST by non-main Ractor.', %q{
class C
CONST = 'str'
end
r = Ractor.new do
C::CONST
end
begin
r.take
rescue Ractor::RemoteError => e
e.cause.message
end
}
# Setting non-sharable objects into constants by other Ractors is not allowed
assert_equal 'can not set constants with non-shareable objects by non-main Ractors', %q{
class C
end
r = Ractor.new do
C::CONST = 'str'
end
begin
r.take
rescue Ractor::RemoteError => e
e.cause.message
end
}
# Immutable Array and Hash are shareable, so it can be shared with constants
assert_equal '[1000, 3]', %q{
A = Array.new(1000).freeze # [nil, ...]
H = {a: 1, b: 2, c: 3}.freeze
Ractor.new{ [A.size, H.size] }.take
}
# A Ractor can have a name
assert_equal 'test-name', %q{
r = Ractor.new name: 'test-name' do
end
r.name
}
# If Ractor doesn't have a name, Ractor#name returns nil.
assert_equal 'nil', %q{
r = Ractor.new do
end
r.name.inspect
}

447
common.mk
Просмотреть файл

@ -116,6 +116,7 @@ COMMONOBJS = array.$(OBJEXT) \
parse.$(OBJEXT) \
proc.$(OBJEXT) \
process.$(OBJEXT) \
ractor.$(OBJEXT) \
random.$(OBJEXT) \
range.$(OBJEXT) \
rational.$(OBJEXT) \
@ -144,6 +145,7 @@ COMMONOBJS = array.$(OBJEXT) \
vm.$(OBJEXT) \
vm_backtrace.$(OBJEXT) \
vm_dump.$(OBJEXT) \
vm_sync.$(OBJEXT) \
vm_trace.$(OBJEXT) \
$(COROUTINE_OBJ) \
$(DTRACE_OBJ) \
@ -759,6 +761,9 @@ no-btest-ruby: PHONY
yes-btest-ruby: prog PHONY
$(Q)$(exec) $(RUNRUBY) "$(srcdir)/bootstraptest/runner.rb" --ruby="$(PROGRAM) -I$(srcdir)/lib $(RUN_OPTS)" -q $(OPTS) $(TESTOPTS) $(BTESTS)
rtest: fake miniruby$(EXEEXT) PHONY
$(Q)$(exec) $(BOOTSTRAPRUBY) "$(srcdir)/bootstraptest/runner.rb" --ruby="$(BTESTRUBY) $(RUN_OPTS)" --sets=ractor -v
test-basic: $(TEST_RUNNABLE)-test-basic
no-test-basic: PHONY
yes-test-basic: prog PHONY
@ -1008,15 +1013,16 @@ $(srcs_vpath)mjit_compile.inc: $(tooldir)/ruby_vm/views/mjit_compile.inc.erb $(i
BUILTIN_RB_SRCS = \
$(srcdir)/ast.rb \
$(srcdir)/dir.rb \
$(srcdir)/gc.rb \
$(srcdir)/integer.rb \
$(srcdir)/io.rb \
$(srcdir)/dir.rb \
$(srcdir)/pack.rb \
$(srcdir)/trace_point.rb \
$(srcdir)/warning.rb \
$(srcdir)/array.rb \
$(srcdir)/kernel.rb \
$(srcdir)/ractor.rb \
$(srcdir)/prelude.rb \
$(srcdir)/gem_prelude.rb \
$(empty)
@ -3361,6 +3367,8 @@ cont.$(OBJEXT): {$(VPATH)}method.h
cont.$(OBJEXT): {$(VPATH)}missing.h
cont.$(OBJEXT): {$(VPATH)}mjit.h
cont.$(OBJEXT): {$(VPATH)}node.h
cont.$(OBJEXT): {$(VPATH)}ractor.h
cont.$(OBJEXT): {$(VPATH)}ractor_pub.h
cont.$(OBJEXT): {$(VPATH)}ruby_assert.h
cont.$(OBJEXT): {$(VPATH)}ruby_atomic.h
cont.$(OBJEXT): {$(VPATH)}st.h
@ -3368,6 +3376,7 @@ cont.$(OBJEXT): {$(VPATH)}subst.h
cont.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
cont.$(OBJEXT): {$(VPATH)}thread_native.h
cont.$(OBJEXT): {$(VPATH)}vm_core.h
cont.$(OBJEXT): {$(VPATH)}vm_debug.h
cont.$(OBJEXT): {$(VPATH)}vm_opts.h
debug.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
debug.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
@ -3403,6 +3412,7 @@ debug.$(OBJEXT): {$(VPATH)}encoding.h
debug.$(OBJEXT): {$(VPATH)}eval_intern.h
debug.$(OBJEXT): {$(VPATH)}gc.h
debug.$(OBJEXT): {$(VPATH)}id.h
debug.$(OBJEXT): {$(VPATH)}id_table.h
debug.$(OBJEXT): {$(VPATH)}intern.h
debug.$(OBJEXT): {$(VPATH)}internal.h
debug.$(OBJEXT): {$(VPATH)}internal/anyargs.h
@ -3551,6 +3561,8 @@ debug.$(OBJEXT): {$(VPATH)}missing.h
debug.$(OBJEXT): {$(VPATH)}node.h
debug.$(OBJEXT): {$(VPATH)}onigmo.h
debug.$(OBJEXT): {$(VPATH)}oniguruma.h
debug.$(OBJEXT): {$(VPATH)}ractor.h
debug.$(OBJEXT): {$(VPATH)}ractor_pub.h
debug.$(OBJEXT): {$(VPATH)}ruby_assert.h
debug.$(OBJEXT): {$(VPATH)}ruby_atomic.h
debug.$(OBJEXT): {$(VPATH)}st.h
@ -5378,6 +5390,8 @@ eval.$(OBJEXT): {$(VPATH)}oniguruma.h
eval.$(OBJEXT): {$(VPATH)}probes.dmyh
eval.$(OBJEXT): {$(VPATH)}probes.h
eval.$(OBJEXT): {$(VPATH)}probes_helper.h
eval.$(OBJEXT): {$(VPATH)}ractor.h
eval.$(OBJEXT): {$(VPATH)}ractor_pub.h
eval.$(OBJEXT): {$(VPATH)}ruby_assert.h
eval.$(OBJEXT): {$(VPATH)}ruby_atomic.h
eval.$(OBJEXT): {$(VPATH)}st.h
@ -5386,6 +5400,7 @@ eval.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
eval.$(OBJEXT): {$(VPATH)}thread_native.h
eval.$(OBJEXT): {$(VPATH)}vm.h
eval.$(OBJEXT): {$(VPATH)}vm_core.h
eval.$(OBJEXT): {$(VPATH)}vm_debug.h
eval.$(OBJEXT): {$(VPATH)}vm_opts.h
explicit_bzero.$(OBJEXT): {$(VPATH)}config.h
explicit_bzero.$(OBJEXT): {$(VPATH)}explicit_bzero.c
@ -5797,6 +5812,8 @@ gc.$(OBJEXT): {$(VPATH)}onigmo.h
gc.$(OBJEXT): {$(VPATH)}oniguruma.h
gc.$(OBJEXT): {$(VPATH)}probes.dmyh
gc.$(OBJEXT): {$(VPATH)}probes.h
gc.$(OBJEXT): {$(VPATH)}ractor.h
gc.$(OBJEXT): {$(VPATH)}ractor_pub.h
gc.$(OBJEXT): {$(VPATH)}re.h
gc.$(OBJEXT): {$(VPATH)}regenc.h
gc.$(OBJEXT): {$(VPATH)}regex.h
@ -5813,7 +5830,9 @@ gc.$(OBJEXT): {$(VPATH)}transient_heap.h
gc.$(OBJEXT): {$(VPATH)}util.h
gc.$(OBJEXT): {$(VPATH)}vm_callinfo.h
gc.$(OBJEXT): {$(VPATH)}vm_core.h
gc.$(OBJEXT): {$(VPATH)}vm_debug.h
gc.$(OBJEXT): {$(VPATH)}vm_opts.h
gc.$(OBJEXT): {$(VPATH)}vm_sync.h
golf_prelude.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
golf_prelude.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
golf_prelude.$(OBJEXT): $(CCAN_DIR)/list/list.h
@ -6713,6 +6732,7 @@ io.$(OBJEXT): {$(VPATH)}missing.h
io.$(OBJEXT): {$(VPATH)}node.h
io.$(OBJEXT): {$(VPATH)}onigmo.h
io.$(OBJEXT): {$(VPATH)}oniguruma.h
io.$(OBJEXT): {$(VPATH)}ractor_pub.h
io.$(OBJEXT): {$(VPATH)}ruby_assert.h
io.$(OBJEXT): {$(VPATH)}ruby_atomic.h
io.$(OBJEXT): {$(VPATH)}st.h
@ -8163,6 +8183,7 @@ miniinit.$(OBJEXT): {$(VPATH)}onigmo.h
miniinit.$(OBJEXT): {$(VPATH)}oniguruma.h
miniinit.$(OBJEXT): {$(VPATH)}pack.rb
miniinit.$(OBJEXT): {$(VPATH)}prelude.rb
miniinit.$(OBJEXT): {$(VPATH)}ractor.rb
miniinit.$(OBJEXT): {$(VPATH)}ruby_assert.h
miniinit.$(OBJEXT): {$(VPATH)}ruby_atomic.h
miniinit.$(OBJEXT): {$(VPATH)}st.h
@ -9557,6 +9578,7 @@ parse.$(OBJEXT): {$(VPATH)}parse.h
parse.$(OBJEXT): {$(VPATH)}parse.y
parse.$(OBJEXT): {$(VPATH)}probes.dmyh
parse.$(OBJEXT): {$(VPATH)}probes.h
parse.$(OBJEXT): {$(VPATH)}ractor_pub.h
parse.$(OBJEXT): {$(VPATH)}regenc.h
parse.$(OBJEXT): {$(VPATH)}regex.h
parse.$(OBJEXT): {$(VPATH)}ruby_assert.h
@ -9978,6 +10000,7 @@ process.$(OBJEXT): {$(VPATH)}node.h
process.$(OBJEXT): {$(VPATH)}onigmo.h
process.$(OBJEXT): {$(VPATH)}oniguruma.h
process.$(OBJEXT): {$(VPATH)}process.c
process.$(OBJEXT): {$(VPATH)}ractor_pub.h
process.$(OBJEXT): {$(VPATH)}ruby_assert.h
process.$(OBJEXT): {$(VPATH)}ruby_atomic.h
process.$(OBJEXT): {$(VPATH)}st.h
@ -9988,6 +10011,203 @@ process.$(OBJEXT): {$(VPATH)}thread_native.h
process.$(OBJEXT): {$(VPATH)}util.h
process.$(OBJEXT): {$(VPATH)}vm_core.h
process.$(OBJEXT): {$(VPATH)}vm_opts.h
ractor.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
ractor.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
ractor.$(OBJEXT): $(CCAN_DIR)/list/list.h
ractor.$(OBJEXT): $(CCAN_DIR)/str/str.h
ractor.$(OBJEXT): $(hdrdir)/ruby/ruby.h
ractor.$(OBJEXT): $(top_srcdir)/internal/array.h
ractor.$(OBJEXT): $(top_srcdir)/internal/compilers.h
ractor.$(OBJEXT): $(top_srcdir)/internal/error.h
ractor.$(OBJEXT): $(top_srcdir)/internal/gc.h
ractor.$(OBJEXT): $(top_srcdir)/internal/imemo.h
ractor.$(OBJEXT): $(top_srcdir)/internal/serial.h
ractor.$(OBJEXT): $(top_srcdir)/internal/static_assert.h
ractor.$(OBJEXT): $(top_srcdir)/internal/string.h
ractor.$(OBJEXT): $(top_srcdir)/internal/vm.h
ractor.$(OBJEXT): $(top_srcdir)/internal/warnings.h
ractor.$(OBJEXT): {$(VPATH)}assert.h
ractor.$(OBJEXT): {$(VPATH)}backward/2/assume.h
ractor.$(OBJEXT): {$(VPATH)}backward/2/attributes.h
ractor.$(OBJEXT): {$(VPATH)}backward/2/bool.h
ractor.$(OBJEXT): {$(VPATH)}backward/2/gcc_version_since.h
ractor.$(OBJEXT): {$(VPATH)}backward/2/inttypes.h
ractor.$(OBJEXT): {$(VPATH)}backward/2/limits.h
ractor.$(OBJEXT): {$(VPATH)}backward/2/long_long.h
ractor.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
ractor.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
ractor.$(OBJEXT): {$(VPATH)}builtin.h
ractor.$(OBJEXT): {$(VPATH)}config.h
ractor.$(OBJEXT): {$(VPATH)}constant.h
ractor.$(OBJEXT): {$(VPATH)}debug.h
ractor.$(OBJEXT): {$(VPATH)}debug_counter.h
ractor.$(OBJEXT): {$(VPATH)}defines.h
ractor.$(OBJEXT): {$(VPATH)}encoding.h
ractor.$(OBJEXT): {$(VPATH)}id.h
ractor.$(OBJEXT): {$(VPATH)}id_table.h
ractor.$(OBJEXT): {$(VPATH)}intern.h
ractor.$(OBJEXT): {$(VPATH)}internal.h
ractor.$(OBJEXT): {$(VPATH)}internal/anyargs.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/char.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/double.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/fixnum.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/gid_t.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/int.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/intptr_t.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/long.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/long_long.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/mode_t.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/off_t.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/pid_t.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/short.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/size_t.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/st_data_t.h
ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/uid_t.h
ractor.$(OBJEXT): {$(VPATH)}internal/assume.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/alloc_size.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/artificial.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/cold.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/const.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/constexpr.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/deprecated.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/diagnose_if.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/enum_extensibility.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/error.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/flag_enum.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/forceinline.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/format.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/maybe_unused.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/noalias.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/nodiscard.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/noexcept.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/noinline.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/nonnull.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/noreturn.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/pure.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/restrict.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/returns_nonnull.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/warning.h
ractor.$(OBJEXT): {$(VPATH)}internal/attr/weakref.h
ractor.$(OBJEXT): {$(VPATH)}internal/cast.h
ractor.$(OBJEXT): {$(VPATH)}internal/compiler_is.h
ractor.$(OBJEXT): {$(VPATH)}internal/compiler_is/apple.h
ractor.$(OBJEXT): {$(VPATH)}internal/compiler_is/clang.h
ractor.$(OBJEXT): {$(VPATH)}internal/compiler_is/gcc.h
ractor.$(OBJEXT): {$(VPATH)}internal/compiler_is/intel.h
ractor.$(OBJEXT): {$(VPATH)}internal/compiler_is/msvc.h
ractor.$(OBJEXT): {$(VPATH)}internal/compiler_is/sunpro.h
ractor.$(OBJEXT): {$(VPATH)}internal/compiler_since.h
ractor.$(OBJEXT): {$(VPATH)}internal/config.h
ractor.$(OBJEXT): {$(VPATH)}internal/constant_p.h
ractor.$(OBJEXT): {$(VPATH)}internal/core.h
ractor.$(OBJEXT): {$(VPATH)}internal/core/rarray.h
ractor.$(OBJEXT): {$(VPATH)}internal/core/rbasic.h
ractor.$(OBJEXT): {$(VPATH)}internal/core/rbignum.h
ractor.$(OBJEXT): {$(VPATH)}internal/core/rclass.h
ractor.$(OBJEXT): {$(VPATH)}internal/core/rdata.h
ractor.$(OBJEXT): {$(VPATH)}internal/core/rfile.h
ractor.$(OBJEXT): {$(VPATH)}internal/core/rhash.h
ractor.$(OBJEXT): {$(VPATH)}internal/core/robject.h
ractor.$(OBJEXT): {$(VPATH)}internal/core/rregexp.h
ractor.$(OBJEXT): {$(VPATH)}internal/core/rstring.h
ractor.$(OBJEXT): {$(VPATH)}internal/core/rstruct.h
ractor.$(OBJEXT): {$(VPATH)}internal/core/rtypeddata.h
ractor.$(OBJEXT): {$(VPATH)}internal/ctype.h
ractor.$(OBJEXT): {$(VPATH)}internal/dllexport.h
ractor.$(OBJEXT): {$(VPATH)}internal/dosish.h
ractor.$(OBJEXT): {$(VPATH)}internal/error.h
ractor.$(OBJEXT): {$(VPATH)}internal/eval.h
ractor.$(OBJEXT): {$(VPATH)}internal/event.h
ractor.$(OBJEXT): {$(VPATH)}internal/fl_type.h
ractor.$(OBJEXT): {$(VPATH)}internal/gc.h
ractor.$(OBJEXT): {$(VPATH)}internal/glob.h
ractor.$(OBJEXT): {$(VPATH)}internal/globals.h
ractor.$(OBJEXT): {$(VPATH)}internal/has/attribute.h
ractor.$(OBJEXT): {$(VPATH)}internal/has/builtin.h
ractor.$(OBJEXT): {$(VPATH)}internal/has/c_attribute.h
ractor.$(OBJEXT): {$(VPATH)}internal/has/cpp_attribute.h
ractor.$(OBJEXT): {$(VPATH)}internal/has/declspec_attribute.h
ractor.$(OBJEXT): {$(VPATH)}internal/has/extension.h
ractor.$(OBJEXT): {$(VPATH)}internal/has/feature.h
ractor.$(OBJEXT): {$(VPATH)}internal/has/warning.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/array.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/bignum.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/class.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/compar.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/complex.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/cont.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/dir.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/enum.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/enumerator.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/error.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/eval.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/file.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/gc.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/hash.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/io.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/load.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/marshal.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/numeric.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/object.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/parse.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/proc.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/process.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/random.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/range.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/rational.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/re.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/ruby.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/select.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/select/largesize.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/signal.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/sprintf.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/string.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/struct.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/thread.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/time.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/variable.h
ractor.$(OBJEXT): {$(VPATH)}internal/intern/vm.h
ractor.$(OBJEXT): {$(VPATH)}internal/interpreter.h
ractor.$(OBJEXT): {$(VPATH)}internal/iterator.h
ractor.$(OBJEXT): {$(VPATH)}internal/memory.h
ractor.$(OBJEXT): {$(VPATH)}internal/method.h
ractor.$(OBJEXT): {$(VPATH)}internal/module.h
ractor.$(OBJEXT): {$(VPATH)}internal/newobj.h
ractor.$(OBJEXT): {$(VPATH)}internal/rgengc.h
ractor.$(OBJEXT): {$(VPATH)}internal/scan_args.h
ractor.$(OBJEXT): {$(VPATH)}internal/special_consts.h
ractor.$(OBJEXT): {$(VPATH)}internal/static_assert.h
ractor.$(OBJEXT): {$(VPATH)}internal/stdalign.h
ractor.$(OBJEXT): {$(VPATH)}internal/stdbool.h
ractor.$(OBJEXT): {$(VPATH)}internal/symbol.h
ractor.$(OBJEXT): {$(VPATH)}internal/token_paste.h
ractor.$(OBJEXT): {$(VPATH)}internal/value.h
ractor.$(OBJEXT): {$(VPATH)}internal/value_type.h
ractor.$(OBJEXT): {$(VPATH)}internal/variable.h
ractor.$(OBJEXT): {$(VPATH)}internal/warning_push.h
ractor.$(OBJEXT): {$(VPATH)}internal/xmalloc.h
ractor.$(OBJEXT): {$(VPATH)}method.h
ractor.$(OBJEXT): {$(VPATH)}missing.h
ractor.$(OBJEXT): {$(VPATH)}node.h
ractor.$(OBJEXT): {$(VPATH)}onigmo.h
ractor.$(OBJEXT): {$(VPATH)}oniguruma.h
ractor.$(OBJEXT): {$(VPATH)}ractor.c
ractor.$(OBJEXT): {$(VPATH)}ractor.h
ractor.$(OBJEXT): {$(VPATH)}ractor.rb
ractor.$(OBJEXT): {$(VPATH)}ractor.rbinc
ractor.$(OBJEXT): {$(VPATH)}ractor_pub.h
ractor.$(OBJEXT): {$(VPATH)}ruby_assert.h
ractor.$(OBJEXT): {$(VPATH)}ruby_atomic.h
ractor.$(OBJEXT): {$(VPATH)}st.h
ractor.$(OBJEXT): {$(VPATH)}subst.h
ractor.$(OBJEXT): {$(VPATH)}thread.h
ractor.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
ractor.$(OBJEXT): {$(VPATH)}thread_native.h
ractor.$(OBJEXT): {$(VPATH)}vm_core.h
ractor.$(OBJEXT): {$(VPATH)}vm_debug.h
ractor.$(OBJEXT): {$(VPATH)}vm_opts.h
ractor.$(OBJEXT): {$(VPATH)}vm_sync.h
random.$(OBJEXT): $(hdrdir)/ruby.h
random.$(OBJEXT): $(hdrdir)/ruby/ruby.h
random.$(OBJEXT): $(top_srcdir)/internal/array.h
@ -13569,6 +13789,8 @@ thread.$(OBJEXT): {$(VPATH)}mjit.h
thread.$(OBJEXT): {$(VPATH)}node.h
thread.$(OBJEXT): {$(VPATH)}onigmo.h
thread.$(OBJEXT): {$(VPATH)}oniguruma.h
thread.$(OBJEXT): {$(VPATH)}ractor.h
thread.$(OBJEXT): {$(VPATH)}ractor_pub.h
thread.$(OBJEXT): {$(VPATH)}ruby_assert.h
thread.$(OBJEXT): {$(VPATH)}ruby_atomic.h
thread.$(OBJEXT): {$(VPATH)}st.h
@ -13581,7 +13803,9 @@ thread.$(OBJEXT): {$(VPATH)}thread_native.h
thread.$(OBJEXT): {$(VPATH)}thread_sync.c
thread.$(OBJEXT): {$(VPATH)}timev.h
thread.$(OBJEXT): {$(VPATH)}vm_core.h
thread.$(OBJEXT): {$(VPATH)}vm_debug.h
thread.$(OBJEXT): {$(VPATH)}vm_opts.h
thread.$(OBJEXT): {$(VPATH)}vm_sync.h
time.$(OBJEXT): $(hdrdir)/ruby.h
time.$(OBJEXT): $(hdrdir)/ruby/ruby.h
time.$(OBJEXT): $(top_srcdir)/internal/array.h
@ -13942,15 +14166,23 @@ transcode.$(OBJEXT): {$(VPATH)}st.h
transcode.$(OBJEXT): {$(VPATH)}subst.h
transcode.$(OBJEXT): {$(VPATH)}transcode.c
transcode.$(OBJEXT): {$(VPATH)}transcode_data.h
transient_heap.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
transient_heap.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
transient_heap.$(OBJEXT): $(CCAN_DIR)/list/list.h
transient_heap.$(OBJEXT): $(CCAN_DIR)/str/str.h
transient_heap.$(OBJEXT): $(hdrdir)/ruby.h
transient_heap.$(OBJEXT): $(hdrdir)/ruby/ruby.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/array.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/compilers.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/gc.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/hash.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/imemo.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/sanitizers.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/serial.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/static_assert.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/struct.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/variable.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/vm.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/warnings.h
transient_heap.$(OBJEXT): {$(VPATH)}assert.h
transient_heap.$(OBJEXT): {$(VPATH)}backward/2/assume.h
@ -13968,6 +14200,7 @@ transient_heap.$(OBJEXT): {$(VPATH)}debug.h
transient_heap.$(OBJEXT): {$(VPATH)}debug_counter.h
transient_heap.$(OBJEXT): {$(VPATH)}defines.h
transient_heap.$(OBJEXT): {$(VPATH)}gc.h
transient_heap.$(OBJEXT): {$(VPATH)}id.h
transient_heap.$(OBJEXT): {$(VPATH)}id_table.h
transient_heap.$(OBJEXT): {$(VPATH)}intern.h
transient_heap.$(OBJEXT): {$(VPATH)}internal.h
@ -14111,14 +14344,21 @@ transient_heap.$(OBJEXT): {$(VPATH)}internal/value_type.h
transient_heap.$(OBJEXT): {$(VPATH)}internal/variable.h
transient_heap.$(OBJEXT): {$(VPATH)}internal/warning_push.h
transient_heap.$(OBJEXT): {$(VPATH)}internal/xmalloc.h
transient_heap.$(OBJEXT): {$(VPATH)}method.h
transient_heap.$(OBJEXT): {$(VPATH)}missing.h
transient_heap.$(OBJEXT): {$(VPATH)}node.h
transient_heap.$(OBJEXT): {$(VPATH)}ruby_assert.h
transient_heap.$(OBJEXT): {$(VPATH)}ruby_atomic.h
transient_heap.$(OBJEXT): {$(VPATH)}st.h
transient_heap.$(OBJEXT): {$(VPATH)}subst.h
transient_heap.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
transient_heap.$(OBJEXT): {$(VPATH)}thread_native.h
transient_heap.$(OBJEXT): {$(VPATH)}transient_heap.c
transient_heap.$(OBJEXT): {$(VPATH)}transient_heap.h
transient_heap.$(OBJEXT): {$(VPATH)}vm_core.h
transient_heap.$(OBJEXT): {$(VPATH)}vm_debug.h
transient_heap.$(OBJEXT): {$(VPATH)}vm_opts.h
transient_heap.$(OBJEXT): {$(VPATH)}vm_sync.h
util.$(OBJEXT): $(hdrdir)/ruby.h
util.$(OBJEXT): $(hdrdir)/ruby/ruby.h
util.$(OBJEXT): $(top_srcdir)/internal/compilers.h
@ -14472,6 +14712,8 @@ variable.$(OBJEXT): {$(VPATH)}missing.h
variable.$(OBJEXT): {$(VPATH)}node.h
variable.$(OBJEXT): {$(VPATH)}onigmo.h
variable.$(OBJEXT): {$(VPATH)}oniguruma.h
variable.$(OBJEXT): {$(VPATH)}ractor.h
variable.$(OBJEXT): {$(VPATH)}ractor_pub.h
variable.$(OBJEXT): {$(VPATH)}ruby_assert.h
variable.$(OBJEXT): {$(VPATH)}ruby_atomic.h
variable.$(OBJEXT): {$(VPATH)}st.h
@ -14483,6 +14725,7 @@ variable.$(OBJEXT): {$(VPATH)}util.h
variable.$(OBJEXT): {$(VPATH)}variable.c
variable.$(OBJEXT): {$(VPATH)}variable.h
variable.$(OBJEXT): {$(VPATH)}vm_core.h
variable.$(OBJEXT): {$(VPATH)}vm_debug.h
variable.$(OBJEXT): {$(VPATH)}vm_opts.h
version.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
version.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
@ -14881,6 +15124,8 @@ vm.$(OBJEXT): {$(VPATH)}oniguruma.h
vm.$(OBJEXT): {$(VPATH)}probes.dmyh
vm.$(OBJEXT): {$(VPATH)}probes.h
vm.$(OBJEXT): {$(VPATH)}probes_helper.h
vm.$(OBJEXT): {$(VPATH)}ractor.h
vm.$(OBJEXT): {$(VPATH)}ractor_pub.h
vm.$(OBJEXT): {$(VPATH)}ruby_assert.h
vm.$(OBJEXT): {$(VPATH)}ruby_atomic.h
vm.$(OBJEXT): {$(VPATH)}st.h
@ -14903,6 +15148,7 @@ vm.$(OBJEXT): {$(VPATH)}vm_insnhelper.c
vm.$(OBJEXT): {$(VPATH)}vm_insnhelper.h
vm.$(OBJEXT): {$(VPATH)}vm_method.c
vm.$(OBJEXT): {$(VPATH)}vm_opts.h
vm.$(OBJEXT): {$(VPATH)}vm_sync.h
vm.$(OBJEXT): {$(VPATH)}vmtc.inc
vm_backtrace.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
vm_backtrace.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
@ -15270,6 +15516,8 @@ vm_dump.$(OBJEXT): {$(VPATH)}method.h
vm_dump.$(OBJEXT): {$(VPATH)}missing.h
vm_dump.$(OBJEXT): {$(VPATH)}node.h
vm_dump.$(OBJEXT): {$(VPATH)}procstat_vm.c
vm_dump.$(OBJEXT): {$(VPATH)}ractor.h
vm_dump.$(OBJEXT): {$(VPATH)}ractor_pub.h
vm_dump.$(OBJEXT): {$(VPATH)}ruby_assert.h
vm_dump.$(OBJEXT): {$(VPATH)}ruby_atomic.h
vm_dump.$(OBJEXT): {$(VPATH)}st.h
@ -15277,8 +15525,205 @@ vm_dump.$(OBJEXT): {$(VPATH)}subst.h
vm_dump.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
vm_dump.$(OBJEXT): {$(VPATH)}thread_native.h
vm_dump.$(OBJEXT): {$(VPATH)}vm_core.h
vm_dump.$(OBJEXT): {$(VPATH)}vm_debug.h
vm_dump.$(OBJEXT): {$(VPATH)}vm_dump.c
vm_dump.$(OBJEXT): {$(VPATH)}vm_opts.h
vm_sync.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
vm_sync.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
vm_sync.$(OBJEXT): $(CCAN_DIR)/list/list.h
vm_sync.$(OBJEXT): $(CCAN_DIR)/str/str.h
vm_sync.$(OBJEXT): $(hdrdir)/ruby.h
vm_sync.$(OBJEXT): $(hdrdir)/ruby/ruby.h
vm_sync.$(OBJEXT): $(top_srcdir)/internal/array.h
vm_sync.$(OBJEXT): $(top_srcdir)/internal/compilers.h
vm_sync.$(OBJEXT): $(top_srcdir)/internal/gc.h
vm_sync.$(OBJEXT): $(top_srcdir)/internal/imemo.h
vm_sync.$(OBJEXT): $(top_srcdir)/internal/serial.h
vm_sync.$(OBJEXT): $(top_srcdir)/internal/static_assert.h
vm_sync.$(OBJEXT): $(top_srcdir)/internal/vm.h
vm_sync.$(OBJEXT): {$(VPATH)}addr2line.h
vm_sync.$(OBJEXT): {$(VPATH)}assert.h
vm_sync.$(OBJEXT): {$(VPATH)}backward/2/assume.h
vm_sync.$(OBJEXT): {$(VPATH)}backward/2/attributes.h
vm_sync.$(OBJEXT): {$(VPATH)}backward/2/bool.h
vm_sync.$(OBJEXT): {$(VPATH)}backward/2/gcc_version_since.h
vm_sync.$(OBJEXT): {$(VPATH)}backward/2/inttypes.h
vm_sync.$(OBJEXT): {$(VPATH)}backward/2/limits.h
vm_sync.$(OBJEXT): {$(VPATH)}backward/2/long_long.h
vm_sync.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
vm_sync.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
vm_sync.$(OBJEXT): {$(VPATH)}config.h
vm_sync.$(OBJEXT): {$(VPATH)}constant.h
vm_sync.$(OBJEXT): {$(VPATH)}defines.h
vm_sync.$(OBJEXT): {$(VPATH)}gc.h
vm_sync.$(OBJEXT): {$(VPATH)}id.h
vm_sync.$(OBJEXT): {$(VPATH)}id_table.h
vm_sync.$(OBJEXT): {$(VPATH)}intern.h
vm_sync.$(OBJEXT): {$(VPATH)}internal.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/anyargs.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/char.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/double.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/fixnum.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/gid_t.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/int.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/intptr_t.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/long.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/long_long.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/mode_t.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/off_t.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/pid_t.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/short.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/size_t.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/st_data_t.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/uid_t.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/array.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/assume.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/alloc_size.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/artificial.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/cold.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/const.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/constexpr.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/deprecated.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/diagnose_if.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/enum_extensibility.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/error.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/flag_enum.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/forceinline.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/format.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/maybe_unused.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/noalias.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/nodiscard.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/noexcept.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/noinline.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/nonnull.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/noreturn.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/pure.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/restrict.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/returns_nonnull.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/warning.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/weakref.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/cast.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_is.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_is/apple.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_is/clang.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_is/gcc.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_is/intel.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_is/msvc.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_is/sunpro.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_since.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/compilers.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/config.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/constant_p.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/core.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rarray.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rbasic.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rbignum.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rclass.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rdata.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rfile.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rhash.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/core/robject.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rregexp.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rstring.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rstruct.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rtypeddata.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/ctype.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/dllexport.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/dosish.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/error.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/eval.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/event.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/fl_type.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/gc.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/glob.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/globals.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/has/attribute.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/has/builtin.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/has/c_attribute.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/has/cpp_attribute.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/has/declspec_attribute.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/has/extension.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/has/feature.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/has/warning.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/imemo.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/array.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/bignum.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/class.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/compar.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/complex.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/cont.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/dir.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/enum.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/enumerator.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/error.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/eval.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/file.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/gc.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/hash.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/io.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/load.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/marshal.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/numeric.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/object.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/parse.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/proc.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/process.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/random.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/range.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/rational.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/re.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/ruby.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/select.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/select/largesize.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/signal.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/sprintf.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/string.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/struct.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/thread.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/time.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/variable.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/vm.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/interpreter.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/iterator.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/memory.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/method.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/module.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/newobj.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/rgengc.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/scan_args.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/serial.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/special_consts.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/static_assert.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/stdalign.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/stdbool.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/symbol.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/token_paste.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/value.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/value_type.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/variable.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/vm.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/warning_push.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/warnings.h
vm_sync.$(OBJEXT): {$(VPATH)}internal/xmalloc.h
vm_sync.$(OBJEXT): {$(VPATH)}iseq.h
vm_sync.$(OBJEXT): {$(VPATH)}method.h
vm_sync.$(OBJEXT): {$(VPATH)}missing.h
vm_sync.$(OBJEXT): {$(VPATH)}node.h
vm_sync.$(OBJEXT): {$(VPATH)}procstat_vm.c
vm_sync.$(OBJEXT): {$(VPATH)}ractor.h
vm_sync.$(OBJEXT): {$(VPATH)}ractor_pub.h
vm_sync.$(OBJEXT): {$(VPATH)}ruby_assert.h
vm_sync.$(OBJEXT): {$(VPATH)}ruby_atomic.h
vm_sync.$(OBJEXT): {$(VPATH)}st.h
vm_sync.$(OBJEXT): {$(VPATH)}subst.h
vm_sync.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
vm_sync.$(OBJEXT): {$(VPATH)}thread_native.h
vm_sync.$(OBJEXT): {$(VPATH)}vm_core.h
vm_sync.$(OBJEXT): {$(VPATH)}vm_debug.h
vm_sync.$(OBJEXT): {$(VPATH)}vm_opts.h
vm_sync.$(OBJEXT): {$(VPATH)}vm_sync.c
vm_sync.$(OBJEXT): {$(VPATH)}vm_sync.h
vm_trace.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
vm_trace.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
vm_trace.$(OBJEXT): $(CCAN_DIR)/list/list.h

Просмотреть файл

@ -1600,6 +1600,16 @@ iseq_block_param_id_p(const rb_iseq_t *iseq, ID id, int *pidx, int *plevel)
}
}
static void
check_access_outer_variables(const rb_iseq_t *iseq, int level)
{
// set access_outer_variables
for (int i=0; i<level; i++) {
iseq->body->access_outer_variables = TRUE;
iseq = iseq->body->parent_iseq;
}
}
static void
iseq_add_getlocal(rb_iseq_t *iseq, LINK_ANCHOR *const seq, int line, int idx, int level)
{
@ -1609,6 +1619,7 @@ iseq_add_getlocal(rb_iseq_t *iseq, LINK_ANCHOR *const seq, int line, int idx, in
else {
ADD_INSN2(seq, line, getlocal, INT2FIX((idx) + VM_ENV_DATA_SIZE - 1), INT2FIX(level));
}
check_access_outer_variables(iseq, level);
}
static void
@ -1620,6 +1631,7 @@ iseq_add_setlocal(rb_iseq_t *iseq, LINK_ANCHOR *const seq, int line, int idx, in
else {
ADD_INSN2(seq, line, setlocal, INT2FIX((idx) + VM_ENV_DATA_SIZE - 1), INT2FIX(level));
}
check_access_outer_variables(iseq, level);
}
@ -8222,6 +8234,8 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *node, in
if (popped) {
ADD_INSN(ret, line, pop);
}
iseq->body->access_outer_variables = TRUE;
break;
}
case NODE_LVAR:{
@ -8680,7 +8694,8 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *node, in
VALUE flag = INT2FIX(excl);
const NODE *b = node->nd_beg;
const NODE *e = node->nd_end;
if (optimizable_range_item_p(b) && optimizable_range_item_p(e)) {
// TODO: Ractor can not use cached Range objects
if (0 && optimizable_range_item_p(b) && optimizable_range_item_p(e)) {
if (!popped) {
VALUE bv = nd_type(b) == NODE_LIT ? b->nd_lit : Qnil;
VALUE ev = nd_type(e) == NODE_LIT ? e->nd_lit : Qnil;

18
cont.c
Просмотреть файл

@ -28,6 +28,7 @@
#include "mjit.h"
#include "vm_core.h"
#include "id_table.h"
#include "ractor.h"
static const int DEBUG = 0;
@ -808,14 +809,15 @@ static inline void
ec_switch(rb_thread_t *th, rb_fiber_t *fiber)
{
rb_execution_context_t *ec = &fiber->cont.saved_ec;
ruby_current_execution_context_ptr = th->ec = ec;
rb_ractor_set_current_ec(th->ractor, th->ec = ec);
// ruby_current_execution_context_ptr = th->ec = ec;
/*
* timer-thread may set trap interrupt on previous th->ec at any time;
* ensure we do not delay (or lose) the trap interrupt handling.
*/
if (th->vm->main_thread == th && rb_signal_buff_size() > 0) {
if (th->vm->ractor.main_thread == th &&
rb_signal_buff_size() > 0) {
RUBY_VM_SET_TRAP_INTERRUPT(ec);
}
@ -1873,7 +1875,7 @@ rb_fiber_start(void)
enum ruby_tag_type state;
int need_interrupt = TRUE;
VM_ASSERT(th->ec == ruby_current_execution_context_ptr);
VM_ASSERT(th->ec == GET_EC());
VM_ASSERT(FIBER_RESUMED_P(fiber));
if (fiber->blocking) {
@ -1964,13 +1966,15 @@ rb_threadptr_root_fiber_release(rb_thread_t *th)
/* ignore. A root fiber object will free th->ec */
}
else {
rb_execution_context_t *ec = GET_EC();
VM_ASSERT(th->ec->fiber_ptr->cont.type == FIBER_CONTEXT);
VM_ASSERT(th->ec->fiber_ptr->cont.self == 0);
fiber_free(th->ec->fiber_ptr);
if (th->ec == ruby_current_execution_context_ptr) {
ruby_current_execution_context_ptr = NULL;
if (th->ec == ec) {
rb_ractor_set_current_ec(th->ractor, NULL);
}
fiber_free(th->ec->fiber_ptr);
th->ec = NULL;
}
}

Просмотреть файл

@ -26,6 +26,7 @@
#include "vm_debug.h"
#include "vm_callinfo.h"
#include "ruby/thread_native.h"
#include "ractor.h"
/* This is the only place struct RIMemo is actually used */
struct RIMemo {
@ -422,7 +423,6 @@ ruby_debug_log(const char *file, int line, const char *func_name, const char *fm
len += r;
}
#if 0 // not yet
// ractor information
if (GET_VM()->ractor.cnt > 1) {
rb_ractor_t *cr = GET_RACTOR();
@ -433,7 +433,6 @@ ruby_debug_log(const char *file, int line, const char *func_name, const char *fm
len += r;
}
}
#endif
// thread information
if (!rb_thread_alone()) {

883
doc/ractor.md Normal file
Просмотреть файл

@ -0,0 +1,883 @@
# Ractor - Ruby's Actor-like concurrent abstraction
Ractor is designed to provide a parallel execution feature fo Ruby without thread-safety concerns.
## Summary
### Multiple Ractors in an interpreter process
You can make multiple Ractors and they run in parallel.
* Ractors run in parallel.
* Interpreter invokes with the first Ractor (called *main Ractor*).
* If main Ractor terminated, all Ractors receive terminate request like Threads (if main thread (first invoked Thread), Ruby interpreter sends all running threads to terminate execution).
* Each Ractor has 1 or more Threads.
* Threads in a Ractor shares a Ractor-wide global lock like GIL (GVL in MRI terminology), so they can't run in parallel (without releasing GVL explicitly in C-level).
* The overhead of creating a Ractor is similar to overhead of one Thread creation.
### Limited sharing
Ractors don't share everything, unlike threads.
* Most of objects are *Unshareable objects*, so you don't need to care about thread-safety problem which is caused by sharing.
* Some objects are *Shareable objects*.
* Immutable objects: frozen object which doesn't refer unshareable-objects.
* `i = 123`: `i` is an immutable object.
* `s = "str".freeze`: `s` is an immutable object.
* `a = [1, [2], 3].freeze`: `a` is not an immutable object because `a` refer unshareable-object `[2]` (which is not frozen).
* Class/Module objects
* Special shareable objects
* Ractor object itself.
* And more...
### Two-types communication between Ractors
Ractors communicate each other and synchronize the execution by message exchanging between Ractors. There are two message exchange protocol: push type (message passing) and pull type.
* Push type message passing: `Ractor#send(obj)` and `Ractor.receive()` pair.
* Sender ractor passes the `obj` to receiver Ractor.
* Sender knows a destination Ractor (the receiver of `r.send(obj)`) and receiver does not know the sender (accept all message from any ractors).
* Receiver has infinite queue and sender enqueues the message. Sender doesn't block to put message.
* This type is based on actor model
* Pull type communication: `Ractor.yield(obj)` and `Ractor#take()` pair.
* Sender ractor declare to yield the `obj` and receiver Ractor take it.
* Sender doesn't know a destination Ractor and receiver knows the sender (the receiver of `r.take`).
* Sender or receiver will block if there is no other side.
### Copy & Move semantics to send messages
To send unshareable objects as messages, objects are copied or moved.
* Copy: use deep-copy (like dRuby)
* Move: move membership
* Sender can not access to the moved object after moving the object.
* Guarantee that at least only 1 Ractor can access the object.
### Thread-safety
Ractor helps to write a thread-safe program, but we can make thread-unsafe programs with Ractors.
* GOOD: Sharing limitation
* Most of objects are unshareable, so we can't make data-racy and race-conditional programs.
* Shareable objects are protected by an interpreter or locking mechanism.
* BAD: Class/Module can violate this assumption
* To make compatible with old behavior, classes and modules can introduce data-race and so on.
* Ruby programmer should take care if they modify class/module objects on multi Ractor programs.
* BAD: Ractor can't solve all thread-safety problems
* There are several blocking operations (waiting send, waiting yield and waiting take) so you can make a program which has dead-lock and live-lock issues.
* Some kind of shareable objects can introduce transactions (STM, for example). However, misusing transactions will generate inconsistent state.
Without Ractor, we need to trace all of state-mutations to debug thread-safety issues.
With Ractor, you can concentrate to suspicious
## Creation and termination
### `Ractor.new`
* `Ractor.new do expr end` generates another Ractor.
```ruby
# Ractor.new with a block creates new Ractor
r = Ractor.new do
# This block will be run in parallel
end
# You can name a Ractor with `name:` argument.
r = Ractor.new name: 'test-name' do
end
# and Ractor#name returns its name.
r.name #=> 'test-name'
```
### Given block isolation
The Ractor execute given `expr` in a given block.
Given block will be isolated from outer scope by `Proc#isolate`.
```ruby
# To prevent sharing unshareable objects between ractors,
# block outer-variables, `self` and other information are isolated.
# Given block will be isolated by `Proc#isolate` method.
# `Proc#isolate` is called at Ractor creation timing (`Ractor.new` is called)
# and it can cause an error if block accesses outer variables.
begin
a = true
r = Ractor.new do
a #=> ArgumentError because this block accesses `a`.
end
r.take # see later
rescue ArgumentError
end
```
* The `self` of the given block is `Ractor` object itself.
```ruby
r = Ractor.new do
self.object_id
end
r.take == self.object_id #=> false
```
Passed arguments to `Ractor.new()` becomes block parameters for the given block. However, an interpreter does not pass the parameter object references, but send as messages (see bellow for details).
```ruby
r = Ractor.new 'ok' do |msg|
msg #=> 'ok'
end
r.take #=> 'ok'
```
```ruby
# almost similar to the last example
r = Ractor.new do
msg = Ractor.recv
msg
end
r.send 'ok'
r.take #=> 'ok'
```
### An execution result of given block
Return value of the given block becomes an outgoing message (see bellow for details).
```ruby
r = Ractor.new do
'ok'
end
r.take #=> `ok`
```
```ruby
# almost similar to the last example
r = Ractor.new do
Ractor.yield 'ok'
end
r.take #=> 'ok'
```
Error in the given block will be propagated to the receiver of an outgoing message.
```ruby
r = Ractor.new do
raise 'ok' # exception will be transferred receiver
end
begin
r.take
rescue Ractor::RemoteError => e
e.cause.class #=> RuntimeError
e.cause.message #=> 'ok'
e.ractor #=> r
end
```
## Communication between Ractors
Communication between Ractors is achieved by sending and receiving messages.
* (1) Message sending/receiving
* (1-1) push type send/recv (sender knows receiver). similar to the Actor model.
* (1-2) pull type yield/take (receiver knows sender).
* (2) Using shareable container objects (not implemented yet)
Users can control blocking on (1), but should not control on (2) (only manage as critical section).
* (1-1) send/recv (push type)
* `Ractor#send(obj)` (`Ractor#<<(obj)` is an aliases) send a message to the Ractor's incoming port. Incoming port is connected to the infinite size incoming queue so `Ractor#send` will never block.
* `Ractor.recv` dequeue a message from own incoming queue. If the incoming queue is empty, `Ractor.recv` calling will block.
* (1-2) yield/take (pull type)
* `Ractor.yield(obj)` send an message to a Ractor which are calling `Ractor#take` via outgoing port . If no Ractors are waiting for it, the `Ractor.yield(obj)` will block. If multiple Ractors are waiting for `Ractor.yield(obj)`, only one Ractor can receive the message.
* `Ractor#take` receives a message which is waiting by `Ractor.yield(obj)` method from the specified Ractor. If the Ractor does not call `Ractor.yield` yet, the `Ractor#take` call will block.
* `Ractor.select()` can wait for the success of `take`, `yield` and `recv`.
* You can close the incoming port or outgoing port.
* You can close then with `Ractor#close_incoming` and `Ractor#close_outgoing`.
* If the incoming port is closed for a Ractor, you can't `send` to the Ractor. If `Ractor.recv` is blocked for the closed incoming port, then it will raise an exception.
* If the outgoing port is closed for a Ractor, you can't call `Ractor#take` and `Ractor.yield` on the Ractor. If `Ractor#take` is blocked for the Ractor, then it will raise an exception.
* When a Ractor is terminated, the Ractor's ports are closed.
* There are 3 methods to send an object as a message
* (1) Send a reference: Send a shareable object, send only a reference to the object (fast)
* (2) Copy an object: Send an unshareable object by copying deeply and send copied object (slow). Note that you can not send an object which is not support deep copy. Current implementation uses Marshal protocol to get deep copy.
* (3) Move an object: Send an unshareable object reference with a membership. Sender Ractor can not access moved objects anymore (raise an exception). Current implementation makes new object as a moved object for receiver Ractor and copy references of sending object to moved object.
* You can choose "Copy" and "Send" as a keyword for `Ractor#send(obj)` and `Ractor.yield(obj)` (default is "Copy").
### Sending/Receiving ports
Each Ractor has _incoming-port_ and _outgoing-port_. Incoming-port is connected to the infinite sized incoming queue.
```
Ractor r
+-------------------------------------------+
| incoming outgoing |
| port port |
r.send(obj) ->*->[incoming queue] Ractor.yield(obj) ->*-> r.take
| | |
| v |
| Ractor.recv |
+-------------------------------------------+
Connection example: r2.send obj on r1、Ractor.recv on r2
+----+ +----+
* r1 |-----* r2 *
+----+ +----+
Connection example: Ractor.yield(obj) on r1, r1.take on r2
+----+ +----+
* r1 *------ r2 *
+----+ +----+
Connection example: Ractor.yield(obj) on r1 and r2,
and waiting for both simultaneously by Ractor.select(r1, r2)
+----+
* r1 *------+
+----+ |
+----- Ractor.select(r1, r2)
+----+ |
* r2 *------|
+----+
```
```ruby
r = Ractor.new do
msg = Ractor.recv # Receive from r's incoming queue
msg # send back msg as block return value
end
r.send 'ok' # Send 'ok' to r's incoming port -> incoming queue
r.take # Receive from r's outgoing port
```
```ruby
# Actual argument 'ok' for `Ractor.new()` will be send to created Ractor.
r = Ractor.new 'ok' do |msg|
# Values for formal parameters will be received from incoming queue.
# Similar to: msg = Ractor.recv
msg # Return value of the given block will be sent via outgoing port
end
# receive from the r's outgoing port.
r.take #=> `ok`
```
### Wait for multiple Ractors with `Ractor.select`
You can wait multiple Ractor's `yield` with `Ractor.select(*ractors)`.
The return value of `Ractor.select()` is `[r, msg]` where `r` is yielding Ractor and `msg` is yielded message.
Wait for a single ractor (same as `Ractor.take`):
```ruby
r1 = Ractor.new{'r1'}
r, obj = Ractor.select(r1)
r == r1 and obj == 'r1' #=> true
```
Wait for two ractors:
```ruby
r1 = Ractor.new{'r1'}
r2 = Ractor.new{'r2'}
rs = [r1, r2]
as = []
# Wait for r1 or r2's Ractor.yield
r, obj = Ractor.select(*rs)
rs.delete(r)
as << obj
# Second try (rs only contain not-closed ractors)
r, obj = Ractor.select(*rs)
rs.delete(r)
as << obj
as.sort == ['r1', 'r2'] #=> true
```
Complex example:
```ruby
pipe = Ractor.new do
loop do
Ractor.yield Ractor.recv
end
end
RN = 10
rs = RN.times.map{|i|
Ractor.new pipe, i do |pipe, i|
msg = pipe.take
msg # ping-pong
end
}
RN.times{|i|
pipe << i
}
RN.times.map{
r, n = Ractor.select(*rs)
rs.delete r
n
}.sort #=> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
```
Multiple Ractors can send to one Ractor.
```ruby
# Create 10 ractors and they send objects to pipe ractor.
# pipe ractor yield received objects
pipe = Ractor.new do
loop do
Ractor.yield Ractor.recv
end
end
RN = 10
rs = RN.times.map{|i|
Ractor.new pipe, i do |pipe, i|
pipe << i
end
}
RN.times.map{
pipe.take
}.sort #=> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
```
TODO: Current `Ractor.select()` has same issue of `select(2)`, so this interface should be refined.
TODO: `select` syntax of go-language uses round-robbin technique to make fair scheduling. Now `Ractor.select()` doesn't use it.
### Closing Ractor's ports
* `Ractor#close_incoming/outgoing` close incoming/outgoing ports (similar to `Queue#close`).
* `Ractor#close_incoming`
* `r.send(obj) ` where `r`'s incoming port is closed, will raise an exception.
* When the incoming queue is empty and incoming port is closed, `Ractor.recv` raise an exception. If incoming queue is not empty, it dequeues an object.
* `Ractor#close_outgoing`
* `Ractor.yield` on a Ractor which closed the outgoing port, it will raise an exception.
* `Ractor#take` for a Ractor which closed the outgoing port, it will raise an exception. If `Ractor#take` is blocking, it will raise an exception.
* When a Ractor terminates, the ports are closed automatically.
* Return value of the Ractor's block will be yield as `Ractor.yield(ret_val)`, even if the implementation terminate the based native thread.
Example (try to take from closed Ractor):
```ruby
r = Ractor.new do
'finish'
end
r.take # success (will return 'finish')
begin
o = r.take # try to take from closed Ractor
rescue Ractor::ClosedError
'ok'
else
"ng: #{o}"
end
```
Example (try to send to closed (terminated) Ractor):
```ruby
r = Ractor.new do
end
r.take # wait terminate
begin
r.send(1)
rescue Ractor::ClosedError
'ok'
else
'ng'
end
```
When multiple Ractors waiting for `Ractor.yield()`, `Ractor#close_outgoing` will cancel all blocking by raise an exception (`ClosedError`).
### Send a message by copying
`Ractor#send(obj)` or `Ractor.yield(obj)` copy `obj` deeply if `obj` is an unshareable object.
```ruby
obj = 'str'.dup
r = Ractor.new obj do |msg|
# return received msg's object_id
msg.object_id
end
obj.object_id == r.take #=> false
```
Current implementation uses Marshal protocol (similar to dRuby). We can not send Marshal unsupported objects.
```ruby
obj = Thread.new{}
begin
Ractor.new obj do |msg|
msg
end
rescue TypeError => e
e.message #=> no _dump_data is defined for class Thread
else
'ng' # unreachable here
end
```
### Send a message by moving
`Ractor#send(obj, move: true)` or `Ractor.yield(obj, move: true)` move `obj` to the destination Ractor.
If the source Ractor touches the moved object (for example, call the method like `obj.foo()`), it will be an error.
```ruby
# move with Ractor#send
r = Ractor.new do
obj = Ractor.recv
obj << ' world'
end
str = 'hello'
r.send str, move: true
modified = r.take #=> 'hello world'
# str is moved, and accessing str from this Ractor is prohibited
begin
# Error because it touches moved str.
str << ' exception' # raise Ractor::MovedError
rescue Ractor::MovedError
modified #=> 'hello world'
else
raise 'unreachable'
end
```
```ruby
# move with Ractor.yield
r = Ractor.new do
obj = 'hello'
Ractor.yield obj, move: true
obj << 'world' # raise Ractor::MovedError
end
str = r.take
begin
r.take
rescue Ractor::RemoteError
p str #=> "hello"
end
```
Now only `T_FILE`, `T_STRING` and `T_ARRAY` objects are supported.
* `T_FILE` (`IO`, `File`): support to send accepted socket etc.
* `T_STRING` (`String`): support to send a huge string without copying (fast).
* `T_ARRAY` (`Array'): support to send a huge Array without re-allocating the array's buffer. However, all of referred objects from the array should be moved, so it is not so fast.
To achieve the access prohibition for moved objects, _class replacement_ technique is used to implement it.
### Shareable objects
The following objects are shareable.
* Immutable objects
* Small integers, some symbols, `true`, `false`, `nil` (a.k.a. `SPECIAL_CONST_P()` objects in internal)
* Frozen native objects
* Numeric objects: `Float`, `Complex`, `Rational`, big integers (`T_BIGNUM` in internal)
* All Symbols.
* Frozen `String` and `Regexp` objects (which does not have instance variables)
* In future, "Immutable" objects (frozen and only refer shareable objects) will be supported (TODO: introduce an `immutable` flag for objects?)
* Class, Module objects (`T_CLASS`, `T_MODULE` and `T_ICLASS` in internal)
* `Ractor` and other objects which care about synchronization.
Implementation: Now shareable objects (`RVALUE`) have `FL_SHAREABLE` flag. This flag can be added lazily.
```ruby
r = Ractor.new do
while v = Ractor.recv
Ractor.yield v
end
end
class C
end
shareable_objects = [1, :sym, 'xyzzy'.to_sym, 'frozen'.freeze, 1+2r, 3+4i, /regexp/, C]
shareable_objects.map{|o|
r << o
o2 = r.take
[o, o.object_id == o2.object_id]
}
#=> [[1, true], [:sym, true], [:xyzzy, true], [\"frozen\", true], [(3/1), true], [(3+4i), true], [/regexp/, true], [C, true]]
unshareable_objects = ['mutable str'.dup, [:array], {hash: true}].map{|o|
r << o
o2 = r.take
[o, o.object_id == o2.object_id]
}
#+> "[[\"mutable str\", false], [[:array], false], [{:hash=>true}, false]]]"
```
## Language changes to isolate unshareable objects between Ractors
To isolate unshareable objects between Ractors, we introduced additional language semantics on multi-Ractor.
Note that without using Ractors, these additional semantics is not needed (100% compatible with Ruby 2).
### Global variables
Only main Ractor (a Ractor created at starting of interpreter) can access global variables.
```ruby
$gv = 1
r = Ractor.new do
$gv
end
begin
r.take
rescue Ractor::RemoteError => e
e.cause.message #=> 'can not access global variables from non-main Ractors'
end
```
### Instance variables of shareable objects
Only main Ractor can access instance variables of shareable objects.
```ruby
class C
@iv = 'str'
end
r = Ractor.new do
class C
p @iv
end
end
begin
r.take
rescue => e
e.class #=> RuntimeError
end
```
```ruby
shared = Ractor.new{}
shared.instance_variable_set(:@iv, 'str')
r = Ractor.new shared do |shared|
p shared.instance_variable_get(:@iv)
end
begin
r.take
rescue Ractor::RemoteError => e
e.cause.message #=> can not access instance variables of shareable objects from non-main Ractors
end
```
Note that instance variables for class/module objects are also prohibited on Ractors.
### Class variables
Only main Ractor can access class variables.
```ruby
class C
@@cv = 'str'
end
r = Ractor.new do
class C
p @@cv
end
end
begin
r.take
rescue => e
e.class #=> RuntimeError
end
```
### Constants
Only main Ractor can read constants which refer to the unshareable object.
```ruby
class C
CONST = 'str'
end
r = Ractor.new do
C::CONST
end
begin
r.take
rescue => e
e.class #=> NameError
end
```
Only main Ractor can define constants which refer to the unshareable object.
```ruby
class C
end
r = Ractor.new do
C::CONST = 'str'
end
begin
r.take
rescue => e
e.class #=> NameError
end
```
## Implementation note
* Each Ractor has its own thread, it means each Ractor has at least 1 native thread.
* Each Ractor has its own ID (`rb_ractor_t::id`).
* On debug mode, all unshareable objects are labeled with current Ractor's id, and it is checked to detect unshareable object leak (access an object from different Ractor) in VM.
## Examples
### Traditional Ring example in Actor-model
```ruby
RN = 1000
CR = Ractor.current
r = Ractor.new do
p Ractor.recv
CR << :fin
end
RN.times{
Ractor.new r do |next_r|
next_r << Ractor.recv
end
}
p :setup_ok
r << 1
p Ractor.recv
```
### Fork-join
```ruby
def fib n
if n < 2
1
else
fib(n-2) + fib(n-1)
end
end
RN = 10
rs = (1..RN).map do |i|
Ractor.new i do |i|
[i, fib(i)]
end
end
until rs.empty?
r, v = Ractor.select(*rs)
rs.delete r
p answer: v
end
```
### Worker pool
```ruby
require 'prime'
pipe = Ractor.new do
loop do
Ractor.yield Ractor.recv
end
end
N = 1000
RN = 10
workers = (1..RN).map do
Ractor.new pipe do |pipe|
while n = pipe.take
Ractor.yield [n, n.prime?]
end
end
end
(1..N).each{|i|
pipe << i
}
pp (1..N).map{
_r, (n, b) = Ractor.select(*workers)
[n, b]
}.sort_by{|(n, b)| n}
```
### Pipeline
```ruby
# pipeline with yield/take
r1 = Ractor.new do
'r1'
end
r2 = Ractor.new r1 do |r1|
r1.take + 'r2'
end
r3 = Ractor.new r2 do |r2|
r2.take + 'r3'
end
p r3.take #=> 'r1r2r3'
```
```ruby
# pipeline with send/recv
r3 = Ractor.new Ractor.current do |cr|
cr.send Ractor.recv + 'r3'
end
r2 = Ractor.new r3 do |r3|
r3.send Ractor.recv + 'r2'
end
r1 = Ractor.new r2 do |r2|
r2.send Ractor.recv + 'r1'
end
r1 << 'r0'
p Ractor.recv #=> "r0r1r2r3"
```
### Supervise
```ruby
# ring example again
r = Ractor.current
(1..10).map{|i|
r = Ractor.new r, i do |r, i|
r.send Ractor.recv + "r#{i}"
end
}
r.send "r0"
p Ractor.recv #=> "r0r10r9r8r7r6r5r4r3r2r1"
```
```ruby
# ring example with an error
r = Ractor.current
rs = (1..10).map{|i|
r = Ractor.new r, i do |r, i|
loop do
msg = Ractor.recv
raise if /e/ =~ msg
r.send msg + "r#{i}"
end
end
}
r.send "r0"
p Ractor.recv #=> "r0r10r9r8r7r6r5r4r3r2r1"
r.send "r0"
p Ractor.select(*rs, Ractor.current) #=> [:recv, "r0r10r9r8r7r6r5r4r3r2r1"]
[:recv, "r0r10r9r8r7r6r5r4r3r2r1"]
r.send "e0"
p Ractor.select(*rs, Ractor.current)
#=>
#<Thread:0x000056262de28bd8 run> terminated with exception (report_on_exception is true):
Traceback (most recent call last):
2: from /home/ko1/src/ruby/trunk/test.rb:7:in `block (2 levels) in <main>'
1: from /home/ko1/src/ruby/trunk/test.rb:7:in `loop'
/home/ko1/src/ruby/trunk/test.rb:9:in `block (3 levels) in <main>': unhandled exception
Traceback (most recent call last):
2: from /home/ko1/src/ruby/trunk/test.rb:7:in `block (2 levels) in <main>'
1: from /home/ko1/src/ruby/trunk/test.rb:7:in `loop'
/home/ko1/src/ruby/trunk/test.rb:9:in `block (3 levels) in <main>': unhandled exception
1: from /home/ko1/src/ruby/trunk/test.rb:21:in `<main>'
<internal:ractor>:69:in `select': thrown by remote Ractor. (Ractor::RemoteError)
```
```ruby
# resend non-error message
r = Ractor.current
rs = (1..10).map{|i|
r = Ractor.new r, i do |r, i|
loop do
msg = Ractor.recv
raise if /e/ =~ msg
r.send msg + "r#{i}"
end
end
}
r.send "r0"
p Ractor.recv #=> "r0r10r9r8r7r6r5r4r3r2r1"
r.send "r0"
p Ractor.select(*rs, Ractor.current)
[:recv, "r0r10r9r8r7r6r5r4r3r2r1"]
msg = 'e0'
begin
r.send msg
p Ractor.select(*rs, Ractor.current)
rescue Ractor::RemoteError
msg = 'r0'
retry
end
#=> <internal:ractor>:100:in `send': The incoming-port is already closed (Ractor::ClosedError)
# because r == r[-1] is terminated.
```
```ruby
# ring example with supervisor and re-start
def make_ractor r, i
Ractor.new r, i do |r, i|
loop do
msg = Ractor.recv
raise if /e/ =~ msg
r.send msg + "r#{i}"
end
end
end
r = Ractor.current
rs = (1..10).map{|i|
r = make_ractor(r, i)
}
msg = 'e0' # error causing message
begin
r.send msg
p Ractor.select(*rs, Ractor.current)
rescue Ractor::RemoteError
r = rs[-1] = make_ractor(rs[-2], rs.size-1)
msg = 'x0'
retry
end
#=> [:recv, "x0r9r9r8r7r6r5r4r3r2r1"]
```

3
eval.c
Просмотреть файл

@ -35,6 +35,7 @@
#include "probes_helper.h"
#include "ruby/vm.h"
#include "vm_core.h"
#include "ractor.h"
NORETURN(void rb_raise_jump(VALUE, VALUE));
void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
@ -227,7 +228,7 @@ rb_ec_cleanup(rb_execution_context_t *ec, volatile int ex)
th->status = THREAD_KILLED;
errs[0] = ec->errinfo;
SAVE_ROOT_JMPBUF(th, rb_thread_terminate_all());
SAVE_ROOT_JMPBUF(th, rb_ractor_terminate_all());
}
else {
switch (step) {

Просмотреть файл

@ -52,6 +52,20 @@ ripper.E: ripper.c
ripper.o: $(RUBY_EXTCONF_H)
ripper.o: $(arch_hdrdir)/ruby/config.h
ripper.o: $(hdrdir)/ruby.h
ripper.o: $(hdrdir)/ruby/assert.h
ripper.o: $(hdrdir)/ruby/backward.h
ripper.o: $(hdrdir)/ruby/backward/2/assume.h
ripper.o: $(hdrdir)/ruby/backward/2/attributes.h
ripper.o: $(hdrdir)/ruby/backward/2/bool.h
ripper.o: $(hdrdir)/ruby/backward/2/gcc_version_since.h
ripper.o: $(hdrdir)/ruby/backward/2/inttypes.h
ripper.o: $(hdrdir)/ruby/backward/2/limits.h
ripper.o: $(hdrdir)/ruby/backward/2/long_long.h
ripper.o: $(hdrdir)/ruby/backward/2/stdalign.h
ripper.o: $(hdrdir)/ruby/backward/2/stdarg.h
ripper.o: $(hdrdir)/ruby/defines.h
ripper.o: $(hdrdir)/ruby/encoding.h
ripper.o: $(hdrdir)/ruby/intern.h
ripper.o: $(hdrdir)/ruby/internal/anyargs.h
ripper.o: $(hdrdir)/ruby/internal/arithmetic.h
ripper.o: $(hdrdir)/ruby/internal/arithmetic/char.h
@ -192,20 +206,6 @@ ripper.o: $(hdrdir)/ruby/internal/value_type.h
ripper.o: $(hdrdir)/ruby/internal/variable.h
ripper.o: $(hdrdir)/ruby/internal/warning_push.h
ripper.o: $(hdrdir)/ruby/internal/xmalloc.h
ripper.o: $(hdrdir)/ruby/assert.h
ripper.o: $(hdrdir)/ruby/backward.h
ripper.o: $(hdrdir)/ruby/backward/2/assume.h
ripper.o: $(hdrdir)/ruby/backward/2/attributes.h
ripper.o: $(hdrdir)/ruby/backward/2/bool.h
ripper.o: $(hdrdir)/ruby/backward/2/gcc_version_since.h
ripper.o: $(hdrdir)/ruby/backward/2/inttypes.h
ripper.o: $(hdrdir)/ruby/backward/2/limits.h
ripper.o: $(hdrdir)/ruby/backward/2/long_long.h
ripper.o: $(hdrdir)/ruby/backward/2/stdalign.h
ripper.o: $(hdrdir)/ruby/backward/2/stdarg.h
ripper.o: $(hdrdir)/ruby/defines.h
ripper.o: $(hdrdir)/ruby/encoding.h
ripper.o: $(hdrdir)/ruby/intern.h
ripper.o: $(hdrdir)/ruby/io.h
ripper.o: $(hdrdir)/ruby/missing.h
ripper.o: $(hdrdir)/ruby/onigmo.h
@ -244,6 +244,7 @@ ripper.o: $(top_srcdir)/internal/variable.h
ripper.o: $(top_srcdir)/internal/vm.h
ripper.o: $(top_srcdir)/internal/warnings.h
ripper.o: $(top_srcdir)/node.h
ripper.o: $(top_srcdir)/ractor_pub.h
ripper.o: $(top_srcdir)/regenc.h
ripper.o: $(top_srcdir)/ruby_assert.h
ripper.o: $(top_srcdir)/symbol.h

349
gc.c
Просмотреть файл

@ -101,7 +101,9 @@
#include "symbol.h"
#include "transient_heap.h"
#include "vm_core.h"
#include "vm_sync.h"
#include "vm_callinfo.h"
#include "ractor.h"
#include "builtin.h"
@ -402,7 +404,7 @@ int ruby_rgengc_debug;
* 2: enable profiling for each types
*/
#ifndef RGENGC_PROFILE
#define RGENGC_PROFILE 0
#define RGENGC_PROFILE 1
#endif
/* RGENGC_ESTIMATE_OLDMALLOC
@ -884,7 +886,6 @@ VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
#define heap_pages_deferred_final objspace->heap_pages.deferred_final
#define heap_eden (&objspace->eden_heap)
#define heap_tomb (&objspace->tomb_heap)
#define dont_gc objspace->flags.dont_gc
#define during_gc objspace->flags.during_gc
#define finalizing objspace->atomic_flags.finalizing
#define finalizer_table objspace->finalizer_table
@ -897,6 +898,18 @@ VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
#define stress_to_class 0
#endif
#if 0
#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
#define dont_gc_val() (objspace->flags.dont_gc)
#else
#define dont_gc_on() (objspace->flags.dont_gc = 1)
#define dont_gc_off() (objspace->flags.dont_gc = 0)
#define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
#define dont_gc_val() (objspace->flags.dont_gc)
#endif
static inline enum gc_mode
gc_mode_verify(enum gc_mode mode)
{
@ -984,8 +997,8 @@ static int garbage_collect(rb_objspace_t *, int reason);
static int gc_start(rb_objspace_t *objspace, int reason);
static void gc_rest(rb_objspace_t *objspace);
static inline void gc_enter(rb_objspace_t *objspace, const char *event);
static inline void gc_exit(rb_objspace_t *objspace, const char *event);
static inline void gc_enter(rb_objspace_t *objspace, const char *event, unsigned int *lock_lev);
static inline void gc_exit(rb_objspace_t *objspace, const char *event, unsigned int *lock_lev);
static void gc_marks(rb_objspace_t *objspace, int full_mark);
static void gc_marks_start(rb_objspace_t *objspace, int full);
@ -1233,6 +1246,7 @@ check_rvalue_consistency_force(const VALUE obj, int terminate)
goto skip;
}
}
bp();
fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
err++;
skip:
@ -1561,7 +1575,7 @@ rb_objspace_alloc(void)
malloc_limit = gc_params.malloc_limit_min;
list_head_init(&objspace->eden_heap.pages);
list_head_init(&objspace->tomb_heap.pages);
dont_gc = TRUE;
dont_gc_on();
return objspace;
}
@ -2032,7 +2046,7 @@ heap_get_freeobj_head(rb_objspace_t *objspace, rb_heap_t *heap)
{
RVALUE *p = heap->freelist;
if (LIKELY(p != NULL)) {
heap->freelist = p->as.free.next;
heap->freelist = p->as.free.next;
}
asan_unpoison_object((VALUE)p, true);
return (VALUE)p;
@ -2108,6 +2122,10 @@ newobj_init(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_prote
};
MEMCPY(RANY(obj), &buf, RVALUE, 1);
#if RACTOR_CHECK_MODE
rb_ractor_setup_belonging(obj);
#endif
#if RGENGC_CHECK_MODE
GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
@ -2115,58 +2133,57 @@ newobj_init(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_prote
GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
if (flags & FL_PROMOTED1) {
if (RVALUE_AGE(obj) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
if (RVALUE_AGE(obj) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
}
else {
if (RVALUE_AGE(obj) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
if (RVALUE_AGE(obj) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
}
if (rgengc_remembered(objspace, (VALUE)obj)) rb_bug("newobj: %s is remembered.", obj_info(obj));
#endif
if (UNLIKELY(wb_protected == FALSE)) {
MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
}
#if RGENGC_PROFILE
if (wb_protected) {
objspace->profile.total_generated_normal_object_count++;
objspace->profile.total_generated_normal_object_count++;
#if RGENGC_PROFILE >= 2
objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
#endif
}
else {
objspace->profile.total_generated_shady_object_count++;
objspace->profile.total_generated_shady_object_count++;
#if RGENGC_PROFILE >= 2
objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
#endif
}
#endif
objspace->total_allocated_objects++;
#if GC_DEBUG
RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
#endif
objspace->total_allocated_objects++;
gc_report(5, objspace, "newobj: %s\n", obj_info(obj));
#if RGENGC_OLD_NEWOBJ_CHECK > 0
{
static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
if (!is_incremental_marking(objspace) &&
flags & FL_WB_PROTECTED && /* do not promote WB unprotected objects */
! RB_TYPE_P(obj, T_ARRAY)) { /* array.c assumes that allocated objects are new */
if (--newobj_cnt == 0) {
newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
if (!is_incremental_marking(objspace) &&
flags & FL_WB_PROTECTED && /* do not promote WB unprotected objects */
! RB_TYPE_P(obj, T_ARRAY)) { /* array.c assumes that allocated objects are new */
if (--newobj_cnt == 0) {
newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
gc_mark_set(objspace, obj);
RVALUE_AGE_SET_OLD(objspace, obj);
gc_mark_set(objspace, obj);
RVALUE_AGE_SET_OLD(objspace, obj);
rb_gc_writebarrier_remember(obj);
}
}
rb_gc_writebarrier_remember(obj);
}
}
}
#endif
check_rvalue_consistency(obj);
@ -2179,20 +2196,21 @@ newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objsp
VALUE obj;
if (UNLIKELY(during_gc || ruby_gc_stressful)) {
if (during_gc) {
dont_gc = 1;
during_gc = 0;
rb_bug("object allocation during garbage collection phase");
}
if (during_gc) {
dont_gc_on();
during_gc = 0;
rb_bug("object allocation during garbage collection phase");
}
if (ruby_gc_stressful) {
if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
rb_memerror();
}
}
if (ruby_gc_stressful) {
if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
rb_memerror();
}
}
}
obj = heap_get_freeobj(objspace, heap_eden);
newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
gc_event_hook(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj);
return obj;
@ -2219,30 +2237,36 @@ newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protect
rb_objspace_t *objspace = &rb_objspace;
VALUE obj;
RB_DEBUG_COUNTER_INC(obj_newobj);
(void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
RB_VM_LOCK_ENTER();
{
RB_DEBUG_COUNTER_INC(obj_newobj);
(void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
#if GC_DEBUG_STRESS_TO_CLASS
if (UNLIKELY(stress_to_class)) {
long i, cnt = RARRAY_LEN(stress_to_class);
for (i = 0; i < cnt; ++i) {
if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
if (UNLIKELY(stress_to_class)) {
long i, cnt = RARRAY_LEN(stress_to_class);
for (i = 0; i < cnt; ++i) {
if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
}
}
#endif
if (!(during_gc ||
ruby_gc_stressful ||
gc_event_hook_available_p(objspace)) &&
(obj = heap_get_freeobj_head(objspace, heap_eden)) != Qfalse) {
newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
}
else {
RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
obj = wb_protected ?
newobj_slowpath_wb_protected(klass, flags, v1, v2, v3, objspace) :
newobj_slowpath_wb_unprotected(klass, flags, v1, v2, v3, objspace);
}
}
#endif
if (!(during_gc ||
ruby_gc_stressful ||
gc_event_hook_available_p(objspace)) &&
(obj = heap_get_freeobj_head(objspace, heap_eden)) != Qfalse) {
return newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
}
else {
RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
RB_VM_LOCK_LEAVE();
return wb_protected ?
newobj_slowpath_wb_protected(klass, flags, v1, v2, v3, objspace) :
newobj_slowpath_wb_unprotected(klass, flags, v1, v2, v3, objspace);
}
return obj;
}
VALUE
@ -2273,6 +2297,18 @@ rb_newobj_of(VALUE klass, VALUE flags)
return newobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED);
}
VALUE
rb_newobj_with(VALUE src)
{
VALUE klass = RBASIC_CLASS(src);
VALUE flags = RBASIC(src)->flags;
VALUE v1 = RANY(src)->as.values.v1;
VALUE v2 = RANY(src)->as.values.v2;
VALUE v3 = RANY(src)->as.values.v3;
return newobj_of(klass, flags & ~FL_WB_PROTECTED, v1, v2, v3, flags & FL_WB_PROTECTED);
}
#define UNEXPECTED_NODE(func) \
rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
@ -3597,10 +3633,11 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
}
/* prohibit GC because force T_DATA finalizers can break an object graph consistency */
dont_gc = 1;
dont_gc_on();
/* running data/file finalizers are part of garbage collection */
gc_enter(objspace, "rb_objspace_call_finalizer");
unsigned int lock_lev;
gc_enter(objspace, "rb_objspace_call_finalizer", &lock_lev);
/* run data/file object's finalizers */
for (i = 0; i < heap_allocated_pages; i++) {
@ -3642,7 +3679,7 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
}
}
gc_exit(objspace, "rb_objspace_call_finalizer");
gc_exit(objspace, "rb_objspace_call_finalizer", &lock_lev);
if (heap_pages_deferred_final) {
finalize_list(objspace, heap_pages_deferred_final);
@ -4529,15 +4566,16 @@ gc_sweep_rest(rb_objspace_t *objspace)
static void
gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap)
{
GC_ASSERT(dont_gc == FALSE);
GC_ASSERT(dont_gc_val() == FALSE);
if (!GC_ENABLE_LAZY_SWEEP) return;
gc_enter(objspace, "sweep_continue");
unsigned int lock_lev;
gc_enter(objspace, "sweep_continue", &lock_lev);
if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE && heap_increment(objspace, heap)) {
gc_report(3, objspace, "gc_sweep_continue: success heap_increment().\n");
}
gc_sweep_step(objspace, heap);
gc_exit(objspace, "sweep_continue");
gc_exit(objspace, "sweep_continue", &lock_lev);
}
static void
@ -5944,8 +5982,8 @@ objspace_allrefs(rb_objspace_t *objspace)
struct allrefs data;
struct mark_func_data_struct mfd;
VALUE obj;
int prev_dont_gc = dont_gc;
dont_gc = TRUE;
int prev_dont_gc = dont_gc_val();
dont_gc_on();
data.objspace = objspace;
data.references = st_init_numtable();
@ -5966,7 +6004,7 @@ objspace_allrefs(rb_objspace_t *objspace)
}
free_stack_chunks(&data.mark_stack);
dont_gc = prev_dont_gc;
dont_gc_set(prev_dont_gc);
return data.references;
}
@ -6288,8 +6326,8 @@ gc_verify_heap_pages(rb_objspace_t *objspace)
static VALUE
gc_verify_internal_consistency_m(VALUE dummy)
{
ASSERT_vm_locking();
gc_verify_internal_consistency(&rb_objspace);
return Qnil;
}
@ -6639,10 +6677,11 @@ gc_marks_rest(rb_objspace_t *objspace)
static void
gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
{
GC_ASSERT(dont_gc == FALSE);
GC_ASSERT(dont_gc_val() == FALSE);
#if GC_ENABLE_INCREMENTAL_MARK
gc_enter(objspace, "marks_continue");
unsigned int lock_lev;
gc_enter(objspace, "marks_continue", &lock_lev);
PUSH_MARK_FUNC_DATA(NULL);
{
@ -6674,7 +6713,7 @@ gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
}
POP_MARK_FUNC_DATA();
gc_exit(objspace, "marks_continue");
gc_exit(objspace, "marks_continue", &lock_lev);
#endif
}
@ -6973,21 +7012,50 @@ void
rb_gc_writebarrier(VALUE a, VALUE b)
{
rb_objspace_t *objspace = &rb_objspace;
bool retry;
if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const");
if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const");
retry_:
retry = false;
if (!is_incremental_marking(objspace)) {
if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
return;
}
else {
gc_writebarrier_generational(a, b, objspace);
}
if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
// do nothing
}
else {
RB_VM_LOCK_ENTER(); // can change GC state
{
if (!is_incremental_marking(objspace)) {
if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
// do nothing
}
else {
gc_writebarrier_generational(a, b, objspace);
}
}
else {
retry = true;
}
}
RB_VM_LOCK_LEAVE();
}
}
else { /* slow path */
gc_writebarrier_incremental(a, b, objspace);
RB_VM_LOCK_ENTER(); // can change GC state
{
if (is_incremental_marking(objspace)) {
gc_writebarrier_incremental(a, b, objspace);
}
else {
retry = true;
}
}
RB_VM_LOCK_LEAVE();
}
if (retry) goto retry_;
return;
}
void
@ -7154,46 +7222,49 @@ void
rb_gc_force_recycle(VALUE obj)
{
rb_objspace_t *objspace = &rb_objspace;
RB_VM_LOCK_ENTER();
{
int is_old = RVALUE_OLD_P(obj);
int is_old = RVALUE_OLD_P(obj);
gc_report(2, objspace, "rb_gc_force_recycle: %s\n", obj_info(obj));
gc_report(2, objspace, "rb_gc_force_recycle: %s\n", obj_info(obj));
if (is_old) {
if (RVALUE_MARKED(obj)) {
objspace->rgengc.old_objects--;
}
}
CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
if (is_old) {
if (RVALUE_MARKED(obj)) {
objspace->rgengc.old_objects--;
}
}
CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
#if GC_ENABLE_INCREMENTAL_MARK
if (is_incremental_marking(objspace)) {
if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj)) {
invalidate_mark_stack(&objspace->mark_stack, obj);
CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
}
CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
}
else {
if (is_incremental_marking(objspace)) {
if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj)) {
invalidate_mark_stack(&objspace->mark_stack, obj);
CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
}
CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
}
else {
#endif
if (is_old || !GET_HEAP_PAGE(obj)->flags.before_sweep) {
CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
}
CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
if (is_old || !GET_HEAP_PAGE(obj)->flags.before_sweep) {
CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
}
CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
#if GC_ENABLE_INCREMENTAL_MARK
}
}
#endif
objspace->profile.total_freed_objects++;
objspace->profile.total_freed_objects++;
heap_page_add_freeobj(objspace, GET_HEAP_PAGE(obj), obj);
heap_page_add_freeobj(objspace, GET_HEAP_PAGE(obj), obj);
/* Disable counting swept_slots because there are no meaning.
* if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(p), p)) {
* objspace->heap.swept_slots++;
* }
*/
/* Disable counting swept_slots because there are no meaning.
* if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(p), p)) {
* objspace->heap.swept_slots++;
* }
*/
}
RB_VM_LOCK_LEAVE();
}
#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
@ -7281,7 +7352,7 @@ heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
static int
ready_to_gc(rb_objspace_t *objspace)
{
if (dont_gc || during_gc || ruby_disable_gc) {
if (dont_gc_val() || during_gc || ruby_disable_gc) {
heap_ready_to_gc(objspace, heap_eden);
return FALSE;
}
@ -7361,17 +7432,25 @@ gc_reset_malloc_info(rb_objspace_t *objspace)
static int
garbage_collect(rb_objspace_t *objspace, int reason)
{
int ret;
RB_VM_LOCK_ENTER();
{
#if GC_PROFILE_MORE_DETAIL
objspace->profile.prepare_time = getrusage_time();
objspace->profile.prepare_time = getrusage_time();
#endif
gc_rest(objspace);
gc_rest(objspace);
#if GC_PROFILE_MORE_DETAIL
objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
#endif
return gc_start(objspace, reason);
ret = gc_start(objspace, reason);
}
RB_VM_LOCK_LEAVE();
return ret;
}
static int
@ -7389,12 +7468,14 @@ gc_start(rb_objspace_t *objspace, int reason)
GC_ASSERT(gc_mode(objspace) == gc_mode_none);
GC_ASSERT(!is_lazy_sweeping(heap_eden));
GC_ASSERT(!is_incremental_marking(objspace));
unsigned int lock_lev;
gc_enter(objspace, "gc_start", &lock_lev);
#if RGENGC_CHECK_MODE >= 2
gc_verify_internal_consistency(objspace);
#endif
gc_enter(objspace, "gc_start");
if (ruby_gc_stressful) {
int flag = FIXNUM_P(ruby_gc_stress_mode) ? FIX2INT(ruby_gc_stress_mode) : 0;
@ -7478,7 +7559,7 @@ gc_start(rb_objspace_t *objspace, int reason)
}
gc_prof_timer_stop(objspace);
gc_exit(objspace, "gc_start");
gc_exit(objspace, "gc_start", &lock_lev);
return TRUE;
}
@ -7489,7 +7570,8 @@ gc_rest(rb_objspace_t *objspace)
int sweeping = is_lazy_sweeping(heap_eden);
if (marking || sweeping) {
gc_enter(objspace, "gc_rest");
unsigned int lock_lev;
gc_enter(objspace, "gc_rest", &lock_lev);
if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
@ -7501,7 +7583,7 @@ gc_rest(rb_objspace_t *objspace)
if (is_lazy_sweeping(heap_eden)) {
gc_sweep_rest(objspace);
}
gc_exit(objspace, "gc_rest");
gc_exit(objspace, "gc_rest", &lock_lev);
}
}
@ -7587,30 +7669,38 @@ gc_record(rb_objspace_t *objspace, int direction, const char *event)
#endif /* PRINT_ENTER_EXIT_TICK */
static inline void
gc_enter(rb_objspace_t *objspace, const char *event)
gc_enter(rb_objspace_t *objspace, const char *event, unsigned int *lock_lev)
{
// stop other ractors
RB_VM_LOCK_ENTER_LEV(lock_lev);
rb_vm_barrier();
GC_ASSERT(during_gc == 0);
if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
mjit_gc_start_hook();
during_gc = TRUE;
RUBY_DEBUG_LOG("%s (%s)", event, gc_current_status(objspace));
gc_report(1, objspace, "gc_enter: %s [%s]\n", event, gc_current_status(objspace));
gc_record(objspace, 0, event);
gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_ENTER, 0); /* TODO: which parameter should be passed? */
}
static inline void
gc_exit(rb_objspace_t *objspace, const char *event)
gc_exit(rb_objspace_t *objspace, const char *event, unsigned int *lock_lev)
{
GC_ASSERT(during_gc != 0);
gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_EXIT, 0); /* TODO: which parameter should be passsed? */
gc_record(objspace, 1, event);
RUBY_DEBUG_LOG("%s (%s)", event, gc_current_status(objspace));
gc_report(1, objspace, "gc_exit: %s [%s]\n", event, gc_current_status(objspace));
during_gc = FALSE;
mjit_gc_exit_hook();
RB_VM_LOCK_LEAVE_LEV(lock_lev);
}
static void *
@ -7623,7 +7713,7 @@ gc_with_gvl(void *ptr)
static int
garbage_collect_with_gvl(rb_objspace_t *objspace, int reason)
{
if (dont_gc) return TRUE;
if (dont_gc_val()) return TRUE;
if (ruby_thread_has_gvl_p()) {
return garbage_collect(objspace, reason);
}
@ -8647,7 +8737,7 @@ static VALUE
rb_gc_compact(rb_execution_context_t *ec, VALUE self)
{
rb_objspace_t *objspace = &rb_objspace;
if (dont_gc) return Qnil;
if (dont_gc_val()) return Qnil;
gc_compact(objspace, FALSE, FALSE, FALSE);
return gc_compact_stats(objspace);
@ -9349,9 +9439,9 @@ rb_gc_enable(void)
VALUE
rb_objspace_gc_enable(rb_objspace_t *objspace)
{
int old = dont_gc;
int old = dont_gc_val();
dont_gc = FALSE;
dont_gc_off();
return old ? Qtrue : Qfalse;
}
@ -9371,8 +9461,8 @@ rb_gc_disable_no_rest(void)
static VALUE
gc_disable_no_rest(rb_objspace_t *objspace)
{
int old = dont_gc;
dont_gc = TRUE;
int old = dont_gc_val();
dont_gc_on();
return old ? Qtrue : Qfalse;
}
@ -9742,7 +9832,10 @@ rb_memerror(void)
sleep(60);
}
if (during_gc) gc_exit(objspace, "rb_memerror");
if (during_gc) {
// TODO: OMG!! How to implement it?
gc_exit(objspace, "rb_memerror", NULL);
}
exc = nomem_error;
if (!exc ||
@ -9869,7 +9962,7 @@ objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, si
if (type == MEMOP_TYPE_MALLOC) {
retry:
if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc) {
if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc_val()) {
if (ruby_thread_has_gvl_p() && is_lazy_sweeping(heap_eden)) {
gc_rest(objspace); /* gc_rest can reduce malloc_increase */
goto retry;
@ -11607,6 +11700,8 @@ rb_raw_iseq_info(char *buff, const int buff_size, const rb_iseq_t *iseq)
}
}
bool rb_ractor_p(VALUE rv);
const char *
rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
{
@ -11750,6 +11845,10 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
(iseq = vm_block_iseq(block)) != NULL) {
rb_raw_iseq_info(BUFF_ARGS, iseq);
}
else if (rb_ractor_p(obj)) {
rb_ractor_t *r = (void *)DATA_PTR(obj);
APPENDF((BUFF_ARGS, "r:%d", r->id));
}
else {
const char * const type_name = rb_objspace_data_type_name(obj);
if (type_name) {

7
gc.h
Просмотреть файл

@ -16,6 +16,13 @@ NOINLINE(void rb_gc_set_stack_end(VALUE **stack_end_p));
#define USE_CONSERVATIVE_STACK_END
#endif
#define RB_GC_SAVE_MACHINE_CONTEXT(th) \
do { \
FLUSH_REGISTER_WINDOWS; \
setjmp((th)->ec->machine.regs); \
SET_MACHINE_STACK_END(&(th)->ec->machine.stack_end); \
} while (0)
/* for GC debug */
#ifndef RUBY_MARK_FREE_DEBUG

Просмотреть файл

@ -165,6 +165,7 @@ ruby_fl_type {
RUBY_FL_PROMOTED = RUBY_FL_PROMOTED0 | RUBY_FL_PROMOTED1,
RUBY_FL_FINALIZE = (1<<7),
RUBY_FL_TAINT = (1<<8),
RUBY_FL_SHAREABLE = (1<<8),
RUBY_FL_UNTRUSTED = RUBY_FL_TAINT,
RUBY_FL_SEEN_OBJ_ID = (1<<9),
RUBY_FL_EXIVAR = (1<<10),

Просмотреть файл

@ -60,6 +60,7 @@ rb_call_inits(void)
CALL(Math);
CALL(GC);
CALL(Enumerator);
CALL(Ractor);
CALL(VM);
CALL(ISeq);
CALL(Thread);
@ -82,6 +83,7 @@ rb_call_builtin_inits(void)
{
#define BUILTIN(n) CALL(builtin_##n)
BUILTIN(gc);
BUILTIN(ractor);
BUILTIN(integer);
BUILTIN(io);
BUILTIN(dir);

Просмотреть файл

@ -32,6 +32,7 @@ NORETURN(VALUE rb_mod_const_missing(VALUE,VALUE));
rb_gvar_getter_t *rb_gvar_getter_function_of(ID);
rb_gvar_setter_t *rb_gvar_setter_function_of(ID);
void rb_gvar_readonly_setter(VALUE v, ID id, VALUE *_);
void rb_gvar_ractor_local(const char *name);
static inline bool ROBJ_TRANSIENT_P(VALUE obj);
static inline void ROBJ_TRANSIENT_SET(VALUE obj);
static inline void ROBJ_TRANSIENT_UNSET(VALUE obj);

146
io.c
Просмотреть файл

@ -132,6 +132,7 @@
#include "ruby/thread.h"
#include "ruby/util.h"
#include "ruby_atomic.h"
#include "ractor_pub.h"
#if !USE_POLL
# include "vm_core.h"
@ -1478,7 +1479,7 @@ io_binwrite(VALUE str, const char *ptr, long len, rb_io_t *fptr, int nosync)
fptr->wbuf.len = 0;
fptr->wbuf.capa = IO_WBUF_CAPA_MIN;
fptr->wbuf.ptr = ALLOC_N(char, fptr->wbuf.capa);
fptr->write_lock = rb_mutex_new();
fptr->write_lock = rb_mutex_new();
rb_mutex_allow_trap(fptr->write_lock, 1);
}
if ((!nosync && (fptr->mode & (FMODE_SYNC|FMODE_TTY))) ||
@ -1491,7 +1492,7 @@ io_binwrite(VALUE str, const char *ptr, long len, rb_io_t *fptr, int nosync)
arg.ptr = ptr + offset;
arg.length = n;
if (fptr->write_lock) {
r = rb_mutex_synchronize(fptr->write_lock, io_binwrite_string, (VALUE)&arg);
r = rb_mutex_synchronize(fptr->write_lock, io_binwrite_string, (VALUE)&arg);
}
else {
r = io_binwrite_string((VALUE)&arg);
@ -1877,7 +1878,7 @@ static VALUE
rb_io_writev(VALUE io, int argc, const VALUE *argv)
{
if (argc > 1 && rb_obj_method_arity(io, id_write) == 1) {
if (io != rb_stderr && RTEST(ruby_verbose)) {
if (io != rb_ractor_stderr() && RTEST(ruby_verbose)) {
VALUE klass = CLASS_OF(io);
char sep = FL_TEST(klass, FL_SINGLETON) ? (klass = io, '.') : '#';
rb_warning("%+"PRIsVALUE"%c""write is outdated interface"
@ -4291,11 +4292,12 @@ rb_io_getbyte(VALUE io)
GetOpenFile(io, fptr);
rb_io_check_byte_readable(fptr);
READ_CHECK(fptr);
if (fptr->fd == 0 && (fptr->mode & FMODE_TTY) && RB_TYPE_P(rb_stdout, T_FILE)) {
VALUE r_stdout = rb_ractor_stdout();
if (fptr->fd == 0 && (fptr->mode & FMODE_TTY) && RB_TYPE_P(r_stdout, T_FILE)) {
rb_io_t *ofp;
GetOpenFile(rb_stdout, ofp);
GetOpenFile(r_stdout, ofp);
if (ofp->mode & FMODE_TTY) {
rb_io_flush(rb_stdout);
rb_io_flush(r_stdout);
}
}
if (io_fillbuf(fptr) < 0) {
@ -7034,8 +7036,8 @@ popen_finish(VALUE port, VALUE klass)
/* child */
if (rb_block_given_p()) {
rb_yield(Qnil);
rb_io_flush(rb_stdout);
rb_io_flush(rb_stderr);
rb_io_flush(rb_ractor_stdout());
rb_io_flush(rb_ractor_stderr());
_exit(0);
}
return Qnil;
@ -7624,7 +7626,7 @@ rb_f_printf(int argc, VALUE *argv, VALUE _)
if (argc == 0) return Qnil;
if (RB_TYPE_P(argv[0], T_STRING)) {
out = rb_stdout;
out = rb_ractor_stdout();
}
else {
out = argv[0];
@ -7724,7 +7726,7 @@ rb_io_print(int argc, const VALUE *argv, VALUE out)
static VALUE
rb_f_print(int argc, const VALUE *argv, VALUE _)
{
rb_io_print(argc, argv, rb_stdout);
rb_io_print(argc, argv, rb_ractor_stdout());
return Qnil;
}
@ -7775,10 +7777,11 @@ rb_io_putc(VALUE io, VALUE ch)
static VALUE
rb_f_putc(VALUE recv, VALUE ch)
{
if (recv == rb_stdout) {
VALUE r_stdout = rb_ractor_stdout();
if (recv == r_stdout) {
return rb_io_putc(recv, ch);
}
return rb_funcallv(rb_stdout, rb_intern("putc"), 1, &ch);
return rb_funcallv(r_stdout, rb_intern("putc"), 1, &ch);
}
@ -7889,10 +7892,11 @@ rb_io_puts(int argc, const VALUE *argv, VALUE out)
static VALUE
rb_f_puts(int argc, VALUE *argv, VALUE recv)
{
if (recv == rb_stdout) {
VALUE r_stdout = rb_ractor_stdout();
if (recv == r_stdout) {
return rb_io_puts(argc, argv, recv);
}
return rb_funcallv(rb_stdout, rb_intern("puts"), argc, argv);
return rb_funcallv(r_stdout, rb_intern("puts"), argc, argv);
}
static VALUE
@ -7901,12 +7905,13 @@ rb_p_write(VALUE str)
VALUE args[2];
args[0] = str;
args[1] = rb_default_rs;
if (RB_TYPE_P(rb_stdout, T_FILE) &&
rb_method_basic_definition_p(CLASS_OF(rb_stdout), id_write)) {
io_writev(2, args, rb_stdout);
VALUE r_stdout = rb_ractor_stdout();
if (RB_TYPE_P(r_stdout, T_FILE) &&
rb_method_basic_definition_p(CLASS_OF(r_stdout), id_write)) {
io_writev(2, args, r_stdout);
}
else {
rb_io_writev(rb_stdout, 2, args);
rb_io_writev(r_stdout, 2, args);
}
return Qnil;
}
@ -7928,8 +7933,9 @@ rb_p_result(int argc, const VALUE *argv)
else if (argc > 1) {
ret = rb_ary_new4(argc, argv);
}
if (RB_TYPE_P(rb_stdout, T_FILE)) {
rb_io_flush(rb_stdout);
VALUE r_stdout = rb_ractor_stdout();
if (RB_TYPE_P(r_stdout, T_FILE)) {
rb_io_flush(r_stdout);
}
return ret;
}
@ -7992,7 +7998,7 @@ rb_obj_display(int argc, VALUE *argv, VALUE self)
{
VALUE out;
out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
out = (!rb_check_arity(argc, 0, 1) ? rb_ractor_stdout() : argv[0]);
rb_io_write(out, self);
return Qnil;
@ -8001,7 +8007,7 @@ rb_obj_display(int argc, VALUE *argv, VALUE self)
static int
rb_stderr_to_original_p(void)
{
return (rb_stderr == orig_stderr || RFILE(orig_stderr)->fptr->fd < 0);
return (rb_ractor_stderr() == orig_stderr || RFILE(orig_stderr)->fptr->fd < 0);
}
void
@ -8019,7 +8025,7 @@ rb_write_error2(const char *mesg, long len)
}
}
else {
rb_io_write(rb_stderr, rb_str_new(mesg, len));
rb_io_write(rb_ractor_stderr(), rb_str_new(mesg, len));
}
}
@ -8047,7 +8053,7 @@ rb_write_error_str(VALUE mesg)
}
else {
/* may unlock GVL, and */
rb_io_write(rb_stderr, mesg);
rb_io_write(rb_ractor_stderr(), mesg);
}
}
@ -8070,10 +8076,41 @@ must_respond_to(ID mid, VALUE val, ID id)
}
static void
stdout_setter(VALUE val, ID id, VALUE *variable)
stdin_setter(VALUE val, ID id, VALUE *ptr)
{
rb_ractor_stdin_set(val);
}
static VALUE
stdin_getter(ID id, VALUE *ptr)
{
return rb_ractor_stdin();
}
static void
stdout_setter(VALUE val, ID id, VALUE *ptr)
{
must_respond_to(id_write, val, id);
*variable = val;
rb_ractor_stdout_set(val);
}
static VALUE
stdout_getter(ID id, VALUE *ptr)
{
return rb_ractor_stdout();
}
static void
stderr_setter(VALUE val, ID id, VALUE *ptr)
{
must_respond_to(id_write, val, id);
rb_ractor_stderr_set(val);
}
static VALUE
stderr_getter(ID id, VALUE *ptr)
{
return rb_ractor_stderr();
}
static VALUE
@ -8125,6 +8162,24 @@ prep_stdio(FILE *f, int fmode, VALUE klass, const char *path)
return io;
}
VALUE
rb_io_prep_stdin(void)
{
return prep_stdio(stdin, FMODE_READABLE, rb_cIO, "<STDIN>");
}
VALUE
rb_io_prep_stdout(void)
{
return prep_stdio(stdout, FMODE_WRITABLE|FMODE_SIGNAL_ON_EPIPE, rb_cIO, "<STDOUT>");
}
VALUE
rb_io_prep_stderr(void)
{
return prep_stdio(stderr, FMODE_WRITABLE|FMODE_SYNC, rb_cIO, "<STDERR>");
}
FILE *
rb_io_stdio_file(rb_io_t *fptr)
{
@ -8707,8 +8762,10 @@ argf_next_argv(VALUE argf)
int stdout_binmode = 0;
int fmode;
if (RB_TYPE_P(rb_stdout, T_FILE)) {
GetOpenFile(rb_stdout, fptr);
VALUE r_stdout = rb_ractor_stdout();
if (RB_TYPE_P(r_stdout, T_FILE)) {
GetOpenFile(r_stdout, fptr);
if (fptr->mode & FMODE_BINMODE)
stdout_binmode = 1;
}
@ -8759,8 +8816,8 @@ argf_next_argv(VALUE argf)
VALUE str;
int fw;
if (RB_TYPE_P(rb_stdout, T_FILE) && rb_stdout != orig_stdout) {
rb_io_close(rb_stdout);
if (RB_TYPE_P(r_stdout, T_FILE) && r_stdout != orig_stdout) {
rb_io_close(r_stdout);
}
fstat(fr, &st);
str = filename;
@ -8829,7 +8886,7 @@ argf_next_argv(VALUE argf)
}
#endif
write_io = prep_io(fw, FMODE_WRITABLE, rb_cFile, fn);
rb_stdout = write_io;
rb_ractor_stdout_set(write_io);
if (stdout_binmode) rb_io_binmode(rb_stdout);
}
fmode = FMODE_READABLE;
@ -8869,7 +8926,7 @@ argf_next_argv(VALUE argf)
ARGF.filename = rb_str_new2("-");
if (ARGF.inplace) {
rb_warn("Can't do inplace edit for stdio");
rb_stdout = orig_stdout;
rb_ractor_stdout_set(orig_stdout);
}
}
if (ARGF.init_p == -1) ARGF.init_p = 1;
@ -13500,13 +13557,24 @@ Init_IO(void)
rb_define_method(rb_cIO, "autoclose?", rb_io_autoclose_p, 0);
rb_define_method(rb_cIO, "autoclose=", rb_io_set_autoclose, 1);
rb_define_variable("$stdin", &rb_stdin);
rb_stdin = prep_stdio(stdin, FMODE_READABLE, rb_cIO, "<STDIN>");
rb_define_hooked_variable("$stdout", &rb_stdout, 0, stdout_setter);
rb_stdout = prep_stdio(stdout, FMODE_WRITABLE|FMODE_SIGNAL_ON_EPIPE, rb_cIO, "<STDOUT>");
rb_define_hooked_variable("$stderr", &rb_stderr, 0, stdout_setter);
rb_stderr = prep_stdio(stderr, FMODE_WRITABLE|FMODE_SYNC, rb_cIO, "<STDERR>");
rb_define_hooked_variable("$>", &rb_stdout, 0, stdout_setter);
rb_define_virtual_variable("$stdin", stdin_getter, stdin_setter);
rb_define_virtual_variable("$stdout", stdout_getter, stdout_setter);
rb_define_virtual_variable("$>", stdout_getter, stdout_setter);
rb_define_virtual_variable("$stderr", stderr_getter, stderr_setter);
rb_gvar_ractor_local("$stdin");
rb_gvar_ractor_local("$stdout");
rb_gvar_ractor_local("$>");
rb_gvar_ractor_local("$stderr");
rb_stdin = rb_io_prep_stdin();
rb_stdout = rb_io_prep_stdout();
rb_stderr = rb_io_prep_stderr();
rb_global_variable(&rb_stdin);
rb_global_variable(&rb_stdout);
rb_global_variable(&rb_stderr);
orig_stdout = rb_stdout;
orig_stderr = rb_stderr;

11
mjit.c
Просмотреть файл

@ -309,8 +309,8 @@ mark_ec_units(rb_execution_context_t *ec)
static void
unload_units(void)
{
rb_vm_t *vm = GET_THREAD()->vm;
rb_thread_t *th = NULL;
//rb_vm_t *vm = GET_THREAD()->vm;
//rb_thread_t *th = NULL;
struct rb_mjit_unit *unit = 0, *next, *worst;
struct mjit_cont *cont;
int delete_num, units_num = active_units.length;
@ -329,9 +329,10 @@ unload_units(void)
assert(unit->iseq != NULL && unit->handle != NULL);
unit->used_code_p = FALSE;
}
list_for_each(&vm->living_threads, th, vmlt_node) {
mark_ec_units(th->ec);
}
// TODO
//list_for_each(&vm->living_threads, th, lt_node) {
// mark_ec_units(th->ec);
//}
for (cont = first_cont; cont != NULL; cont = cont->next) {
mark_ec_units(cont->ec);
}

Просмотреть файл

@ -58,6 +58,7 @@ struct lex_context {
#include "ruby/st.h"
#include "ruby/util.h"
#include "symbol.h"
#include "ractor_pub.h"
#define AREF(ary, i) RARRAY_AREF(ary, i)
@ -10514,8 +10515,8 @@ rb_parser_fatal(struct parser_params *p, const char *fmt, ...)
rb_str_resize(mesg, 0);
append_bitstack_value(p->cmdarg_stack, mesg);
compile_error(p, "cmdarg_stack: %"PRIsVALUE, mesg);
if (p->debug_output == rb_stdout)
p->debug_output = rb_stderr;
if (p->debug_output == rb_ractor_stdout())
p->debug_output = rb_ractor_stderr();
p->debug = TRUE;
}
@ -12554,7 +12555,7 @@ parser_initialize(struct parser_params *p)
p->error_buffer = Qfalse;
#endif
p->debug_buffer = Qnil;
p->debug_output = rb_stdout;
p->debug_output = rb_ractor_stdout();
p->enc = rb_utf8_encoding();
}

Просмотреть файл

@ -112,6 +112,7 @@ int initgroups(const char *, rb_gid_t);
#include "ruby/thread.h"
#include "ruby/util.h"
#include "vm_core.h"
#include "ractor_pub.h"
/* define system APIs */
#ifdef _WIN32
@ -4342,7 +4343,7 @@ rb_f_abort(int argc, const VALUE *argv)
args[1] = args[0] = argv[0];
StringValue(args[0]);
rb_io_puts(1, args, rb_stderr);
rb_io_puts(1, args, rb_ractor_stderr());
args[0] = INT2NUM(EXIT_FAILURE);
rb_exc_raise(rb_class_new_instance(2, args, rb_eSystemExit));
}

1877
ractor.c Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

269
ractor.h Normal file
Просмотреть файл

@ -0,0 +1,269 @@
#include "ruby/ruby.h"
#include "vm_core.h"
#include "id_table.h"
#include "vm_debug.h"
#include "ractor_pub.h"
#ifndef RACTOR_CHECK_MODE
#define RACTOR_CHECK_MODE (0 || VM_CHECK_MODE || RUBY_DEBUG)
#endif
enum rb_ractor_basket_type {
basket_type_none,
basket_type_shareable,
basket_type_copy_marshal,
basket_type_copy_custom,
basket_type_move,
basket_type_exception,
};
struct rb_ractor_basket {
enum rb_ractor_basket_type type;
VALUE v;
VALUE sender;
};
struct rb_ractor_queue {
struct rb_ractor_basket *baskets;
int cnt;
int size;
};
struct rb_ractor_waiting_list {
int cnt;
int size;
rb_ractor_t **ractors;
};
struct rb_ractor_struct {
// ractor lock
rb_nativethread_lock_t lock;
#if RACTOR_CHECK_MODE > 0
VALUE locked_by;
#endif
// communication
struct rb_ractor_queue incoming_queue;
bool incoming_port_closed;
bool outgoing_port_closed;
struct rb_ractor_waiting_list taking_ractors;
struct ractor_wait {
enum ractor_wait_status {
wait_none = 0x00,
wait_recving = 0x01,
wait_taking = 0x02,
wait_yielding = 0x04,
} status;
enum ractor_wakeup_status {
wakeup_none,
wakeup_by_send,
wakeup_by_yield,
wakeup_by_take,
wakeup_by_close,
wakeup_by_interrupt,
wakeup_by_retry,
} wakeup_status;
struct rb_ractor_basket taken_basket;
struct rb_ractor_basket yielded_basket;
rb_nativethread_cond_t cond;
} wait;
// vm wide barrier synchronization
rb_nativethread_cond_t barrier_wait_cond;
// thread management
struct {
struct list_head set;
unsigned int cnt;
unsigned int blocking_cnt;
unsigned int sleeper;
rb_global_vm_lock_t gvl;
rb_execution_context_t *running_ec;
rb_thread_t *main;
} threads;
VALUE thgroup_default;
// identity
VALUE self;
uint32_t id;
VALUE name;
VALUE loc;
// created
// | ready to run
// ====================== inserted to vm->ractor
// v
// blocking <---+ all threads are blocking
// | |
// v |
// running -----+
// | all threads are terminated.
// ====================== removed from vm->ractor
// v
// terminated
//
// status is protected by VM lock (global state)
enum ractor_status {
ractor_created,
ractor_running,
ractor_blocking,
ractor_terminated,
} status_;
struct list_node vmlr_node;
VALUE r_stdin;
VALUE r_stdout;
VALUE r_stderr;
}; // rb_ractor_t is defined in vm_core.h
rb_ractor_t *rb_ractor_main_alloc(void);
void rb_ractor_main_setup(rb_vm_t *vm, rb_ractor_t *main_ractor, rb_thread_t *main_thread);
VALUE rb_ractor_self(const rb_ractor_t *g);
void rb_ractor_atexit(rb_execution_context_t *ec, VALUE result);
void rb_ractor_atexit_exception(rb_execution_context_t *ec);
void rb_ractor_teardown(rb_execution_context_t *ec);
void rb_ractor_recv_parameters(rb_execution_context_t *ec, rb_ractor_t *g, int len, VALUE *ptr);
void rb_ractor_send_parameters(rb_execution_context_t *ec, rb_ractor_t *g, VALUE args);
VALUE rb_thread_create_ractor(rb_ractor_t *g, VALUE args, VALUE proc); // defined in thread.c
rb_global_vm_lock_t *rb_ractor_gvl(rb_ractor_t *);
int rb_ractor_living_thread_num(const rb_ractor_t *);
VALUE rb_ractor_thread_list(rb_ractor_t *r);
void rb_ractor_living_threads_init(rb_ractor_t *r);
void rb_ractor_living_threads_insert(rb_ractor_t *r, rb_thread_t *th);
void rb_ractor_living_threads_remove(rb_ractor_t *r, rb_thread_t *th);
void rb_ractor_blocking_threads_inc(rb_ractor_t *r, const char *file, int line); // TODO: file, line only for RUBY_DEBUG_LOG
void rb_ractor_blocking_threads_dec(rb_ractor_t *r, const char *file, int line); // TODO: file, line only for RUBY_DEBUG_LOG
void rb_ractor_vm_barrier_interrupt_running_thread(rb_ractor_t *r);
void rb_ractor_terminate_interrupt_main_thread(rb_ractor_t *r);
void rb_ractor_terminate_all(void);
static inline bool
rb_ractor_status_p(rb_ractor_t *r, enum ractor_status status)
{
return r->status_ == status;
}
static inline void
rb_ractor_sleeper_threads_inc(rb_ractor_t *r)
{
r->threads.sleeper++;
}
static inline void
rb_ractor_sleeper_threads_dec(rb_ractor_t *r)
{
r->threads.sleeper--;
}
static inline void
rb_ractor_sleeper_threads_clear(rb_ractor_t *r)
{
r->threads.sleeper = 0;
}
static inline int
rb_ractor_sleeper_thread_num(rb_ractor_t *r)
{
return r->threads.sleeper;
}
static inline void
rb_ractor_thread_switch(rb_ractor_t *cr, rb_thread_t *th)
{
if (cr->threads.running_ec != th->ec) {
if (0) fprintf(stderr, "rb_ractor_thread_switch ec:%p->%p\n",
(void *)cr->threads.running_ec, (void *)th->ec);
}
else {
return;
}
if (cr->threads.running_ec != th->ec) {
th->running_time_us = 0;
}
cr->threads.running_ec = th->ec;
VM_ASSERT(cr == GET_RACTOR());
}
static inline void
rb_ractor_set_current_ec(rb_ractor_t *cr, rb_execution_context_t *ec)
{
native_tls_set(ruby_current_ec_key, ec);
if (cr->threads.running_ec != ec) {
if (0) fprintf(stderr, "rb_ractor_set_current_ec ec:%p->%p\n",
(void *)cr->threads.running_ec, (void *)ec);
}
else {
VM_ASSERT(0); // should be different
}
cr->threads.running_ec = ec;
}
void rb_vm_ractor_blocking_cnt_inc(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line);
void rb_vm_ractor_blocking_cnt_dec(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line);
uint32_t rb_ractor_id(const rb_ractor_t *r);
#if RACTOR_CHECK_MODE > 0
uint32_t rb_ractor_current_id(void);
static inline void
rb_ractor_setup_belonging_to(VALUE obj, uint32_t rid)
{
VALUE flags = RBASIC(obj)->flags & 0xffffffff; // 4B
RBASIC(obj)->flags = flags | ((VALUE)rid << 32);
}
static inline void
rb_ractor_setup_belonging(VALUE obj)
{
rb_ractor_setup_belonging_to(obj, rb_ractor_current_id());
}
static inline uint32_t
rb_ractor_belonging(VALUE obj)
{
if (rb_ractor_shareable_p(obj)) {
return 0;
}
else {
return RBASIC(obj)->flags >> 32;
}
}
static inline VALUE
rb_ractor_confirm_belonging(VALUE obj)
{
uint32_t id = rb_ractor_belonging(obj);
if (id == 0) {
if (!rb_ractor_shareable_p(obj)) {
rp(obj);
rb_bug("id == 0 but not shareable");
}
}
else if (id != rb_ractor_current_id()) {
rb_bug("rb_ractor_confirm_belonging object-ractor id:%u, current-ractor id:%u", id, rb_ractor_current_id());
}
return obj;
}
#else
#define rb_ractor_confirm_belonging(obj) obj
#endif

162
ractor.rb Normal file
Просмотреть файл

@ -0,0 +1,162 @@
class Ractor
# Create a new Ractor with args and a block.
# args are passed via incoming channel.
# A block (Proc) will be isolated (can't acccess to outer variables)
#
# A ractor has default two channels:
# an incoming channel and an outgoing channel.
#
# Other ractors send objects to the ractor via the incoming channel and
# the ractor receives them.
# The ractor send objects via the outgoing channel and other ractors can
# receive them.
#
# The result of the block is sent via the outgoing channel
# and other
#
# r = Ractor.new do
# Ractor.recv # recv via r's mailbox => 1
# Ractor.recv # recv via r's mailbox => 2
# Ractor.yield 3 # yield a message (3) and wait for taking by another ractor.
# 'ok' # the return value will be yielded.
# # and r's incoming/outgoing ports are closed automatically.
# end
# r.send 1 # send a message (1) into r's mailbox.
# r << 2 # << is an alias of `send`.
# p r.take # take a message from r's outgoing port #=> 3
# p r.take # => 'ok'
# p r.take # raise Ractor::ClosedError
#
# other options:
# name: Ractor's name
#
def self.new *args, name: nil, &block
b = block # TODO: builtin bug
raise ArgumentError, "must be called with a block" unless block
loc = caller_locations(1, 1).first
loc = "#{loc.path}:#{loc.lineno}"
__builtin_ractor_create(loc, name, args, b)
end
# return current Ractor
def self.current
__builtin_cexpr! %q{
rb_ec_ractor_ptr(ec)->self
}
end
def self.count
__builtin_cexpr! %q{
ULONG2NUM(GET_VM()->ractor.cnt);
}
end
# Multiplex multiple Ractor communications.
#
# r, obj = Ractor.select(r1, r2)
# #=> wait for taking from r1 or r2
# # returned obj is a taken object from Ractor r
#
# r, obj = Ractor.select(r1, r2, Ractor.current)
# #=> wait for taking from r1 or r2
# # or recv from incoming queue
# # If recv is succeed, then obj is received value
# # and r is :recv (Ractor.current)
#
# r, obj = Ractor.select(r1, r2, Ractor.current, yield_value: obj)
# #=> wait for taking from r1 or r2
# # or recv from incoming queue
# # or yield (Ractor.yield) obj
# # If yield is succeed, then obj is nil
# # and r is :yield
#
def self.select *ractors, yield_value: yield_unspecified = true, move: false
__builtin_cstmt! %q{
const VALUE *rs = RARRAY_CONST_PTR_TRANSIENT(ractors);
VALUE rv;
VALUE v = ractor_select(ec, rs, RARRAY_LENINT(ractors),
yield_unspecified == Qtrue ? Qundef : yield_value,
(bool)RTEST(move) ? true : false, &rv);
return rb_ary_new_from_args(2, rv, v);
}
end
# Receive an incoming message from Ractor's incoming queue.
def self.recv
__builtin_cexpr! %q{
ractor_recv(ec, rb_ec_ractor_ptr(ec))
}
end
private def recv
__builtin_cexpr! %q{
// TODO: check current actor
ractor_recv(ec, RACTOR_PTR(self))
}
end
# Send a message to a Ractor's incoming queue.
#
# # Example:
# r = Ractor.new do
# p Ractor.recv #=> 'ok'
# end
# r.send 'ok' # send to r's incoming queue.
def send obj, move: false
__builtin_cexpr! %q{
ractor_send(ec, RACTOR_PTR(self), obj, move)
}
end
# yield a message to the ractor's outgoing port.
def self.yield obj, move: false
__builtin_cexpr! %q{
ractor_yield(ec, rb_ec_ractor_ptr(ec), obj, move)
}
end
# Take a message from ractor's outgoing port.
#
# Example:
# r = Ractor.new{ 'oK' }
# p r.take #=> 'ok'
def take
__builtin_cexpr! %q{
ractor_take(ec, RACTOR_PTR(self))
}
end
alias << send
def inspect
loc = __builtin_cexpr! %q{ RACTOR_PTR(self)->loc }
name = __builtin_cexpr! %q{ RACTOR_PTR(self)->name }
id = __builtin_cexpr! %q{ INT2FIX(RACTOR_PTR(self)->id) }
"#<Ractor:##{id}#{name ? ' '+name : ''}#{loc ? " " + loc : ''}>"
end
def name
__builtin_cexpr! %q{ RACTOR_PTR(self)->name }
end
class RemoteError
attr_reader :ractor
end
def close_incoming
__builtin_cexpr! %q{
ractor_close_incoming(ec, RACTOR_PTR(self));
}
end
def close_outgoing
__builtin_cexpr! %q{
ractor_close_outgoing(ec, RACTOR_PTR(self));
}
end
def close
close_incoming
close_outgoing
end
end

33
ractor_pub.h Normal file
Просмотреть файл

@ -0,0 +1,33 @@
int rb_ractor_main_p(void);
bool rb_ractor_shareable_p_continue(VALUE obj);
#define RB_OBJ_SHAREABLE_P(obj) FL_TEST_RAW((obj), RUBY_FL_SHAREABLE)
// TODO: deep frozen
static inline bool
rb_ractor_shareable_p(VALUE obj)
{
if (SPECIAL_CONST_P(obj)) {
return true;
}
else if (RB_OBJ_SHAREABLE_P(obj)) {
return true;
}
else {
return rb_ractor_shareable_p_continue(obj);
}
}
RUBY_SYMBOL_EXPORT_BEGIN
VALUE rb_ractor_stdin(void);
VALUE rb_ractor_stdout(void);
VALUE rb_ractor_stderr(void);
void rb_ractor_stdin_set(VALUE);
void rb_ractor_stdout_set(VALUE);
void rb_ractor_stderr_set(VALUE);
RUBY_SYMBOL_EXPORT_END

Просмотреть файл

@ -8,7 +8,6 @@
* modify this file, provided that the conditions mentioned in the
* file COPYING are met. Consult the file for details.
*/
#include "ruby/assert.h"
#undef assert
#define assert RUBY_ASSERT_NDEBUG

Просмотреть файл

@ -448,7 +448,7 @@ rb_f_kill(int argc, const VALUE *argv)
}
}
else {
const rb_pid_t self = (GET_THREAD() == GET_VM()->main_thread) ? getpid() : -1;
const rb_pid_t self = (GET_THREAD() == GET_VM()->ractor.main_thread) ? getpid() : -1;
int wakeup = 0;
for (i=1; i<argc; i++) {
@ -495,7 +495,7 @@ rb_f_kill(int argc, const VALUE *argv)
}
}
if (wakeup) {
rb_threadptr_check_signal(GET_VM()->main_thread);
rb_threadptr_check_signal(GET_VM()->ractor.main_thread);
}
}
rb_thread_execute_interrupts(rb_thread_current());

575
thread.c

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -122,26 +122,14 @@ static struct {
};
#endif
void rb_native_mutex_lock(rb_nativethread_lock_t *lock);
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock);
static int native_mutex_trylock(rb_nativethread_lock_t *lock);
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock);
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock);
void rb_native_cond_signal(rb_nativethread_cond_t *cond);
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond);
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex);
void rb_native_cond_initialize(rb_nativethread_cond_t *cond);
void rb_native_cond_destroy(rb_nativethread_cond_t *cond);
static void clear_thread_cache_altstack(void);
static void ubf_wakeup_all_threads(void);
static int ubf_threads_empty(void);
static int native_cond_timedwait(rb_nativethread_cond_t *, pthread_mutex_t *,
const rb_hrtime_t *abs);
static const rb_hrtime_t *sigwait_timeout(rb_thread_t *, int sigwait_fd,
const rb_hrtime_t *,
int *drained_p);
static void ubf_timer_disarm(void);
static void threadptr_trap_interrupt(rb_thread_t *);
static void clear_thread_cache_altstack(void);
static void ubf_wakeup_all_threads(void);
static int ubf_threads_empty(void);
#define TIMER_THREAD_CREATED_P() (signal_self_pipe.owner_process == getpid())
@ -180,17 +168,18 @@ static const void *const condattr_monotonic = NULL;
#define TIME_QUANTUM_NSEC (TIME_QUANTUM_USEC * 1000)
static rb_hrtime_t native_cond_timeout(rb_nativethread_cond_t *, rb_hrtime_t);
static int native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const rb_hrtime_t *abs);
/*
* Designate the next gvl.timer thread, favor the last thread in
* the waitq since it will be in waitq longest
*/
static int
designate_timer_thread(rb_vm_t *vm)
designate_timer_thread(rb_global_vm_lock_t *gvl)
{
native_thread_data_t *last;
last = list_tail(&vm->gvl.waitq, native_thread_data_t, node.ubf);
last = list_tail(&gvl->waitq, native_thread_data_t, node.ubf);
if (last) {
rb_native_cond_signal(&last->cond.gvlq);
return TRUE;
@ -203,29 +192,30 @@ designate_timer_thread(rb_vm_t *vm)
* periodically. Continue on old timeout if it expired.
*/
static void
do_gvl_timer(rb_vm_t *vm, rb_thread_t *th)
do_gvl_timer(rb_global_vm_lock_t *gvl, rb_thread_t *th)
{
static rb_hrtime_t abs;
native_thread_data_t *nd = &th->native_thread_data;
vm->gvl.timer = th;
gvl->timer = th;
/* take over wakeups from UBF_TIMER */
ubf_timer_disarm();
if (vm->gvl.timer_err == ETIMEDOUT) {
if (gvl->timer_err == ETIMEDOUT) {
abs = native_cond_timeout(&nd->cond.gvlq, TIME_QUANTUM_NSEC);
}
vm->gvl.timer_err = native_cond_timedwait(&nd->cond.gvlq, &vm->gvl.lock, &abs);
gvl->timer_err = native_cond_timedwait(&nd->cond.gvlq, &gvl->lock, &abs);
ubf_wakeup_all_threads();
ruby_sigchld_handler(vm);
ruby_sigchld_handler(GET_VM());
if (UNLIKELY(rb_signal_buff_size())) {
if (th == vm->main_thread) {
if (th == GET_VM()->ractor.main_thread) {
RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
}
else {
threadptr_trap_interrupt(vm->main_thread);
threadptr_trap_interrupt(GET_VM()->ractor.main_thread);
}
}
@ -233,77 +223,77 @@ do_gvl_timer(rb_vm_t *vm, rb_thread_t *th)
* Timeslice. Warning: the process may fork while this
* thread is contending for GVL:
*/
if (vm->gvl.owner) timer_thread_function();
vm->gvl.timer = 0;
if (gvl->owner) timer_thread_function(gvl->owner->ec);
gvl->timer = 0;
}
static void
gvl_acquire_common(rb_vm_t *vm, rb_thread_t *th)
gvl_acquire_common(rb_global_vm_lock_t *gvl, rb_thread_t *th)
{
if (vm->gvl.owner) {
if (gvl->owner) {
native_thread_data_t *nd = &th->native_thread_data;
VM_ASSERT(th->unblock.func == 0 &&
"we must not be in ubf_list and GVL waitq at the same time");
list_add_tail(&vm->gvl.waitq, &nd->node.gvl);
list_add_tail(&gvl->waitq, &nd->node.gvl);
do {
if (!vm->gvl.timer) {
do_gvl_timer(vm, th);
if (!gvl->timer) {
do_gvl_timer(gvl, th);
}
else {
rb_native_cond_wait(&nd->cond.gvlq, &vm->gvl.lock);
rb_native_cond_wait(&nd->cond.gvlq, &gvl->lock);
}
} while (vm->gvl.owner);
} while (gvl->owner);
list_del_init(&nd->node.gvl);
if (vm->gvl.need_yield) {
vm->gvl.need_yield = 0;
rb_native_cond_signal(&vm->gvl.switch_cond);
if (gvl->need_yield) {
gvl->need_yield = 0;
rb_native_cond_signal(&gvl->switch_cond);
}
}
else { /* reset timer if uncontended */
vm->gvl.timer_err = ETIMEDOUT;
gvl->timer_err = ETIMEDOUT;
}
vm->gvl.owner = th;
if (!vm->gvl.timer) {
if (!designate_timer_thread(vm) && !ubf_threads_empty()) {
gvl->owner = th;
if (!gvl->timer) {
if (!designate_timer_thread(gvl) && !ubf_threads_empty()) {
rb_thread_wakeup_timer_thread(-1);
}
}
}
static void
gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
gvl_acquire(rb_global_vm_lock_t *gvl, rb_thread_t *th)
{
rb_native_mutex_lock(&vm->gvl.lock);
gvl_acquire_common(vm, th);
rb_native_mutex_unlock(&vm->gvl.lock);
rb_native_mutex_lock(&gvl->lock);
gvl_acquire_common(gvl, th);
rb_native_mutex_unlock(&gvl->lock);
}
static const native_thread_data_t *
gvl_release_common(rb_vm_t *vm)
gvl_release_common(rb_global_vm_lock_t *gvl)
{
native_thread_data_t *next;
vm->gvl.owner = 0;
next = list_top(&vm->gvl.waitq, native_thread_data_t, node.ubf);
gvl->owner = 0;
next = list_top(&gvl->waitq, native_thread_data_t, node.ubf);
if (next) rb_native_cond_signal(&next->cond.gvlq);
return next;
}
static void
gvl_release(rb_vm_t *vm)
gvl_release(rb_global_vm_lock_t *gvl)
{
rb_native_mutex_lock(&vm->gvl.lock);
gvl_release_common(vm);
rb_native_mutex_unlock(&vm->gvl.lock);
rb_native_mutex_lock(&gvl->lock);
gvl_release_common(gvl);
rb_native_mutex_unlock(&gvl->lock);
}
static void
gvl_yield(rb_vm_t *vm, rb_thread_t *th)
gvl_yield(rb_global_vm_lock_t *gvl, rb_thread_t *th)
{
const native_thread_data_t *next;
@ -312,49 +302,49 @@ gvl_yield(rb_vm_t *vm, rb_thread_t *th)
* (perhaps looping in io_close_fptr) so we kick them:
*/
ubf_wakeup_all_threads();
rb_native_mutex_lock(&vm->gvl.lock);
next = gvl_release_common(vm);
rb_native_mutex_lock(&gvl->lock);
next = gvl_release_common(gvl);
/* An another thread is processing GVL yield. */
if (UNLIKELY(vm->gvl.wait_yield)) {
while (vm->gvl.wait_yield)
rb_native_cond_wait(&vm->gvl.switch_wait_cond, &vm->gvl.lock);
if (UNLIKELY(gvl->wait_yield)) {
while (gvl->wait_yield)
rb_native_cond_wait(&gvl->switch_wait_cond, &gvl->lock);
}
else if (next) {
/* Wait until another thread task takes GVL. */
vm->gvl.need_yield = 1;
vm->gvl.wait_yield = 1;
while (vm->gvl.need_yield)
rb_native_cond_wait(&vm->gvl.switch_cond, &vm->gvl.lock);
vm->gvl.wait_yield = 0;
rb_native_cond_broadcast(&vm->gvl.switch_wait_cond);
gvl->need_yield = 1;
gvl->wait_yield = 1;
while (gvl->need_yield)
rb_native_cond_wait(&gvl->switch_cond, &gvl->lock);
gvl->wait_yield = 0;
rb_native_cond_broadcast(&gvl->switch_wait_cond);
}
else {
rb_native_mutex_unlock(&vm->gvl.lock);
rb_native_mutex_unlock(&gvl->lock);
native_thread_yield();
rb_native_mutex_lock(&vm->gvl.lock);
rb_native_cond_broadcast(&vm->gvl.switch_wait_cond);
rb_native_mutex_lock(&gvl->lock);
rb_native_cond_broadcast(&gvl->switch_wait_cond);
}
gvl_acquire_common(vm, th);
rb_native_mutex_unlock(&vm->gvl.lock);
gvl_acquire_common(gvl, th);
rb_native_mutex_unlock(&gvl->lock);
}
static void
gvl_init(rb_vm_t *vm)
void
rb_gvl_init(rb_global_vm_lock_t *gvl)
{
rb_native_mutex_initialize(&vm->gvl.lock);
rb_native_cond_initialize(&vm->gvl.switch_cond);
rb_native_cond_initialize(&vm->gvl.switch_wait_cond);
list_head_init(&vm->gvl.waitq);
vm->gvl.owner = 0;
vm->gvl.timer = 0;
vm->gvl.timer_err = ETIMEDOUT;
vm->gvl.need_yield = 0;
vm->gvl.wait_yield = 0;
rb_native_mutex_initialize(&gvl->lock);
rb_native_cond_initialize(&gvl->switch_cond);
rb_native_cond_initialize(&gvl->switch_wait_cond);
list_head_init(&gvl->waitq);
gvl->owner = 0;
gvl->timer = 0;
gvl->timer_err = ETIMEDOUT;
gvl->need_yield = 0;
gvl->wait_yield = 0;
}
static void
gvl_destroy(rb_vm_t *vm)
gvl_destroy(rb_global_vm_lock_t *gvl)
{
/*
* only called once at VM shutdown (not atfork), another thread
@ -362,9 +352,9 @@ gvl_destroy(rb_vm_t *vm)
* the end of thread_start_func_2
*/
if (0) {
rb_native_cond_destroy(&vm->gvl.switch_wait_cond);
rb_native_cond_destroy(&vm->gvl.switch_cond);
rb_native_mutex_destroy(&vm->gvl.lock);
rb_native_cond_destroy(&gvl->switch_wait_cond);
rb_native_cond_destroy(&gvl->switch_cond);
rb_native_mutex_destroy(&gvl->lock);
}
clear_thread_cache_altstack();
}
@ -372,11 +362,11 @@ gvl_destroy(rb_vm_t *vm)
#if defined(HAVE_WORKING_FORK)
static void thread_cache_reset(void);
static void
gvl_atfork(rb_vm_t *vm)
gvl_atfork(rb_global_vm_lock_t *gvl)
{
thread_cache_reset();
gvl_init(vm);
gvl_acquire(vm, GET_THREAD());
rb_gvl_init(gvl);
gvl_acquire(gvl, GET_THREAD());
}
#endif
@ -415,8 +405,8 @@ rb_native_mutex_unlock(pthread_mutex_t *lock)
}
}
static inline int
native_mutex_trylock(pthread_mutex_t *lock)
int
rb_native_mutex_trylock(pthread_mutex_t *lock)
{
int r;
mutex_debug("trylock", lock);
@ -513,8 +503,7 @@ rb_native_cond_wait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex)
}
static int
native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex,
const rb_hrtime_t *abs)
native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const rb_hrtime_t *abs)
{
int r;
struct timespec ts;
@ -526,16 +515,24 @@ native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex,
* Let's hide it from arch generic code.
*/
do {
r = pthread_cond_timedwait(cond, mutex, rb_hrtime2timespec(&ts, abs));
rb_hrtime2timespec(&ts, abs);
r = pthread_cond_timedwait(cond, mutex, &ts);
} while (r == EINTR);
if (r != 0 && r != ETIMEDOUT) {
rb_bug_errno("pthread_cond_timedwait", r);
rb_bug_errno("pthread_cond_timedwait", r);
}
return r;
}
void
rb_native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, unsigned long msec)
{
rb_hrtime_t hrmsec = native_cond_timeout(cond, RB_HRTIME_PER_MSEC * msec);
native_cond_timedwait(cond, mutex, &hrmsec);
}
static rb_hrtime_t
native_cond_timeout(rb_nativethread_cond_t *cond, const rb_hrtime_t rel)
{
@ -570,6 +567,9 @@ ruby_thread_from_native(void)
static int
ruby_thread_set_native(rb_thread_t *th)
{
if (th && th->ec) {
rb_ractor_set_current_ec(th->ractor, th->ec);
}
return pthread_setspecific(ruby_native_thread_key, th) == 0;
}
@ -587,8 +587,14 @@ Init_native_thread(rb_thread_t *th)
if (r) condattr_monotonic = NULL;
}
#endif
pthread_key_create(&ruby_native_thread_key, 0);
if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
rb_bug("pthread_key_create failed (ruby_native_thread_key)");
}
if (pthread_key_create(&ruby_current_ec_key, 0) == EAGAIN) {
rb_bug("pthread_key_create failed (ruby_current_ec_key)");
}
th->thread_id = pthread_self();
ruby_thread_set_native(th);
fill_thread_id_str(th);
native_thread_init(th);
posix_signal(SIGVTALRM, null_func);
@ -605,7 +611,6 @@ native_thread_init(rb_thread_t *th)
rb_native_cond_initialize(&nd->cond.gvlq);
if (&nd->cond.gvlq != &nd->cond.intr)
rb_native_cond_initialize(&nd->cond.intr);
ruby_thread_set_native(th);
}
#ifndef USE_THREAD_CACHE
@ -1116,7 +1121,7 @@ native_thread_create(rb_thread_t *th)
# endif
CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
thread_debug("create: %p (%d)\n", (void *)th, err);
/* should be done in the created thread */
fill_thread_id_str(th);
@ -1207,7 +1212,7 @@ native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
}
end = native_cond_timeout(cond, *rel);
native_cond_timedwait(cond, lock, &end);
native_cond_timedwait(cond, lock, &end);
}
}
th->unblock.func = 0;
@ -1277,7 +1282,7 @@ static void
ubf_select(void *ptr)
{
rb_thread_t *th = (rb_thread_t *)ptr;
rb_vm_t *vm = th->vm;
rb_global_vm_lock_t *gvl = rb_ractor_gvl(th->ractor);
const rb_thread_t *cur = ruby_thread_from_native(); /* may be 0 */
register_ubf_list(th);
@ -1292,17 +1297,17 @@ ubf_select(void *ptr)
* sigwait_th thread, otherwise we can deadlock with a thread
* in unblock_function_clear.
*/
if (cur != vm->gvl.timer && cur != sigwait_th) {
if (cur != gvl->timer && cur != sigwait_th) {
/*
* Double-checked locking above was to prevent nested locking
* by the SAME thread. We use trylock here to prevent deadlocks
* between DIFFERENT threads
*/
if (native_mutex_trylock(&vm->gvl.lock) == 0) {
if (!vm->gvl.timer) {
if (rb_native_mutex_trylock(&gvl->lock) == 0) {
if (!gvl->timer) {
rb_thread_wakeup_timer_thread(-1);
}
rb_native_mutex_unlock(&vm->gvl.lock);
rb_native_mutex_unlock(&gvl->lock);
}
}
@ -1471,7 +1476,7 @@ rb_thread_wakeup_timer_thread(int sig)
* on heap for maximum safety (and startup/shutdown speed)
*/
if (!vm) return;
mth = vm->main_thread;
mth = vm->ractor.main_thread;
if (!mth || system_working <= 0) return;
/* this relies on GC for grace period before cont_free */
@ -2063,12 +2068,12 @@ ubf_ppoll_sleep(void *ignore)
*/
#define GVL_UNLOCK_BEGIN_YIELD(th) do { \
const native_thread_data_t *next; \
rb_vm_t *vm = th->vm; \
rb_global_vm_lock_t *gvl = rb_ractor_gvl(th->ractor); \
RB_GC_SAVE_MACHINE_CONTEXT(th); \
rb_native_mutex_lock(&vm->gvl.lock); \
next = gvl_release_common(vm); \
rb_native_mutex_unlock(&vm->gvl.lock); \
if (!next && vm_living_thread_num(vm) > 1) { \
rb_native_mutex_lock(&gvl->lock); \
next = gvl_release_common(gvl); \
rb_native_mutex_unlock(&gvl->lock); \
if (!next && rb_ractor_living_thread_num(th->ractor) > 1) { \
native_thread_yield(); \
}
@ -2117,6 +2122,7 @@ static void
native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
{
int sigwait_fd = rb_sigwait_fd_get(th);
rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
if (sigwait_fd >= 0) {
rb_native_mutex_lock(&th->interrupt_lock);
@ -2136,12 +2142,14 @@ native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
rb_sigwait_fd_put(th, sigwait_fd);
rb_sigwait_fd_migrate(th->vm);
}
else if (th == th->vm->main_thread) { /* always able to handle signals */
else if (th == th->vm->ractor.main_thread) { /* always able to handle signals */
native_ppoll_sleep(th, rel);
}
else {
native_cond_sleep(th, rel);
}
rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
}
#if UBF_TIMER == UBF_TIMER_PTHREAD
@ -2149,7 +2157,7 @@ static void *
timer_pthread_fn(void *p)
{
rb_vm_t *vm = p;
pthread_t main_thread_id = vm->main_thread->thread_id;
pthread_t main_thread_id = vm->ractor.main_thread->thread_id;
struct pollfd pfd;
int timeout = -1;
int ccp;

Просмотреть файл

@ -39,6 +39,18 @@ typedef struct native_thread_data_struct {
} cond;
} native_thread_data_t;
void rb_native_mutex_lock(rb_nativethread_lock_t *lock);
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock);
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock);
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock);
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock);
void rb_native_cond_signal(rb_nativethread_cond_t *cond);
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond);
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex);
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec);
void rb_native_cond_initialize(rb_nativethread_cond_t *cond);
void rb_native_cond_destroy(rb_nativethread_cond_t *cond);
#undef except
#undef try
#undef leave
@ -71,4 +83,24 @@ typedef struct rb_global_vm_lock_struct {
int wait_yield;
} rb_global_vm_lock_t;
typedef pthread_key_t native_tls_key_t;
static inline void *
native_tls_get(native_tls_key_t key)
{
void *ptr = pthread_getspecific(key);
if (UNLIKELY(ptr == NULL)) {
rb_bug("pthread_getspecific returns NULL");
}
return ptr;
}
static inline void
native_tls_set(native_tls_key_t key, void *ptr)
{
if (UNLIKELY(pthread_setspecific(key, ptr) != 0)) {
rb_bug("pthread_setspecific error");
}
}
#endif /* RUBY_THREAD_PTHREAD_H */

Просмотреть файл

@ -264,13 +264,13 @@ do_mutex_lock(VALUE self, int interruptible_p)
th->status = THREAD_STOPPED_FOREVER;
th->locking_mutex = self;
th->vm->sleeper++;
rb_ractor_sleeper_threads_inc(th->ractor);
/*
* Carefully! while some contended threads are in native_sleep(),
* vm->sleeper is unstable value. we have to avoid both deadlock
* ractor->sleeper is unstable value. we have to avoid both deadlock
* and busy loop.
*/
if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
if ((rb_ractor_living_thread_num(th->ractor) == rb_ractor_sleeper_thread_num(th->ractor)) &&
!patrol_thread) {
timeout = &rel;
patrol_thread = th;
@ -289,17 +289,18 @@ do_mutex_lock(VALUE self, int interruptible_p)
th->locking_mutex = Qfalse;
if (mutex->th && timeout && !RUBY_VM_INTERRUPTED(th->ec)) {
rb_check_deadlock(th->vm);
rb_check_deadlock(th->ractor);
}
if (th->status == THREAD_STOPPED_FOREVER) {
th->status = prev_status;
}
th->vm->sleeper--;
rb_ractor_sleeper_threads_dec(th->ractor);
if (interruptible_p) {
/* release mutex before checking for interrupts...as interrupt checking
* code might call rb_raise() */
if (mutex->th == th) mutex->th = 0;
RUBY_VM_CHECK_INTS_BLOCKING(th->ec); /* may release mutex */
if (!mutex->th) {
mutex->th = th;

Просмотреть файл

@ -28,8 +28,6 @@
static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
static int w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th);
void rb_native_mutex_lock(rb_nativethread_lock_t *lock);
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock);
static void
w32_error(const char *func)
@ -97,38 +95,38 @@ w32_mutex_create(void)
#define GVL_DEBUG 0
static void
gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
gvl_acquire(rb_global_vm_lock_t *gvl, rb_thread_t *th)
{
w32_mutex_lock(vm->gvl.lock);
w32_mutex_lock(gvl->lock);
if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
}
static void
gvl_release(rb_vm_t *vm)
gvl_release(rb_global_vm_lock_t *gvl)
{
ReleaseMutex(vm->gvl.lock);
ReleaseMutex(gvl->lock);
}
static void
gvl_yield(rb_vm_t *vm, rb_thread_t *th)
gvl_yield(rb_global_vm_lock_t *gvl, rb_thread_t *th)
{
gvl_release(th->vm);
gvl_release(gvl);
native_thread_yield();
gvl_acquire(vm, th);
gvl_acquire(gvl, th);
}
static void
gvl_init(rb_vm_t *vm)
void
rb_gvl_init(rb_global_vm_lock_t *gvl)
{
if (GVL_DEBUG) fprintf(stderr, "gvl init\n");
vm->gvl.lock = w32_mutex_create();
gvl->lock = w32_mutex_create();
}
static void
gvl_destroy(rb_vm_t *vm)
gvl_destroy(rb_global_vm_lock_t *gvl)
{
if (GVL_DEBUG) fprintf(stderr, "gvl destroy\n");
CloseHandle(vm->gvl.lock);
CloseHandle(gvl->lock);
}
static rb_thread_t *
@ -140,13 +138,21 @@ ruby_thread_from_native(void)
static int
ruby_thread_set_native(rb_thread_t *th)
{
if (th && th->ec) {
rb_ractor_set_current_ec(th->ractor, th->ec);
}
return TlsSetValue(ruby_native_thread_key, th);
}
void
Init_native_thread(rb_thread_t *th)
{
ruby_native_thread_key = TlsAlloc();
if ((ruby_current_ec_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
rb_bug("TlsAlloc() for ruby_current_ec_key fails");
}
if ((ruby_native_thread_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
rb_bug("TlsAlloc() for ruby_native_thread_key fails");
}
ruby_thread_set_native(th);
DuplicateHandle(GetCurrentProcess(),
GetCurrentThread(),
@ -458,7 +464,6 @@ rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
native_cond_timedwait_ms(cond, mutex, INFINITE);
}
#if 0
static unsigned long
abs_timespec_to_timeout_ms(const struct timespec *ts)
{
@ -487,6 +492,19 @@ native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mute
return native_cond_timedwait_ms(cond, mutex, timeout_ms);
}
static struct timespec native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel);
void
rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
{
struct timespec rel = {
.tv_sec = msec / 1000,
.tv_nsec = (msec % 1000) * 1000 * 1000,
};
struct timespec ts = native_cond_timeout(cond, rel);
native_cond_timedwait(cond, mutex, &ts);
}
static struct timespec
native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
{
@ -516,7 +534,6 @@ native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
return timeout;
}
#endif
void
rb_native_cond_initialize(rb_nativethread_cond_t *cond)
@ -694,9 +711,13 @@ timer_thread_func(void *dummy)
rb_w32_set_thread_description(GetCurrentThread(), L"ruby-timer-thread");
while (WaitForSingleObject(timer_thread.lock, TIME_QUANTUM_USEC/1000) ==
WAIT_TIMEOUT) {
timer_thread_function();
rb_execution_context_t *running_ec = vm->ractor.main_ractor->threads.running_ec;
if (running_ec) {
timer_thread_function(running_ec);
}
ruby_sigchld_handler(vm); /* probably no-op */
rb_threadptr_check_signal(vm->main_thread);
rb_threadptr_check_signal(vm->ractor.main_thread);
}
thread_debug("timer killed\n");
return 0;

Просмотреть файл

@ -32,4 +32,35 @@ typedef struct rb_global_vm_lock_struct {
HANDLE lock;
} rb_global_vm_lock_t;
typedef DWORD native_tls_key_t; // TLS index
static inline void *
native_tls_get(native_tls_key_t key)
{
void *ptr = TlsGetValue(key);
if (UNLIKELY(ptr == NULL)) {
rb_bug("TlsGetValue() returns NULL");
}
return ptr;
}
static inline void
native_tls_set(native_tls_key_t key, void *ptr)
{
if (UNLIKELY(TlsSetValue(key, ptr) == 0)) {
rb_bug("TlsSetValue() error");
}
}
void rb_native_mutex_lock(rb_nativethread_lock_t *lock);
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock);
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock);
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock);
void rb_native_cond_signal(rb_nativethread_cond_t *cond);
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond);
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex);
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec);
void rb_native_cond_initialize(rb_nativethread_cond_t *cond);
void rb_native_cond_destroy(rb_nativethread_cond_t *cond);
#endif /* RUBY_THREAD_WIN32_H */

Просмотреть файл

@ -82,7 +82,7 @@
% # JIT: cache hit path of vm_getivar, or cancel JIT (recompile it without any ivar optimization)
fprintf(f, " struct gen_ivtbl *ivtbl;\n");
fprintf(f, " VALUE val;\n");
fprintf(f, " if (LIKELY(FL_TEST_RAW(obj, FL_EXIVAR) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && st_lookup(rb_ivar_generic_ivtbl(), (st_data_t)obj, (st_data_t *)&ivtbl) && index < ivtbl->numiv && (val = ivtbl->ivptr[index]) != Qundef)) {\n");
fprintf(f, " if (LIKELY(FL_TEST_RAW(obj, FL_EXIVAR) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && st_lookup(rb_ivar_generic_ivtbl(obj), (st_data_t)obj, (st_data_t *)&ivtbl) && index < ivtbl->numiv && (val = ivtbl->ivptr[index]) != Qundef)) {\n");
fprintf(f, " stack[%d] = val;\n", b->stack_size);
fprintf(f, " }\n");
fprintf(f, " else {\n");

Просмотреть файл

@ -20,6 +20,7 @@
#include "ruby_assert.h"
#include "transient_heap.h"
#include "vm_debug.h"
#include "vm_sync.h"
#if USE_TRANSIENT_HEAP /* USE_TRANSIENT_HEAP */
/*
@ -364,68 +365,76 @@ transient_heap_allocatable_header(struct transient_heap* theap, size_t size)
void *
rb_transient_heap_alloc(VALUE obj, size_t req_size)
{
struct transient_heap* theap = transient_heap_get();
size_t size = ROUND_UP(req_size + sizeof(struct transient_alloc_header), TRANSIENT_HEAP_ALLOC_ALIGN);
void *ret;
TH_ASSERT(RB_TYPE_P(obj, T_ARRAY) ||
RB_TYPE_P(obj, T_OBJECT) ||
RB_TYPE_P(obj, T_STRUCT) ||
RB_TYPE_P(obj, T_HASH)); /* supported types */
RB_VM_LOCK_ENTER();
{
struct transient_heap* theap = transient_heap_get();
size_t size = ROUND_UP(req_size + sizeof(struct transient_alloc_header), TRANSIENT_HEAP_ALLOC_ALIGN);
if (size > TRANSIENT_HEAP_ALLOC_MAX) {
if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [too big: %ld] %s\n", (long)size, rb_obj_info(obj));
return NULL;
}
TH_ASSERT(RB_TYPE_P(obj, T_ARRAY) ||
RB_TYPE_P(obj, T_OBJECT) ||
RB_TYPE_P(obj, T_STRUCT) ||
RB_TYPE_P(obj, T_HASH)); /* supported types */
if (size > TRANSIENT_HEAP_ALLOC_MAX) {
if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [too big: %ld] %s\n", (long)size, rb_obj_info(obj));
ret = NULL;
}
#if TRANSIENT_HEAP_DEBUG_DONT_PROMOTE == 0
else if (RB_OBJ_PROMOTED_RAW(obj)) {
if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [promoted object] %s\n", rb_obj_info(obj));
return NULL;
}
else if (RB_OBJ_PROMOTED_RAW(obj)) {
if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [promoted object] %s\n", rb_obj_info(obj));
ret = NULL;
}
#else
else if (RBASIC_CLASS(obj) == 0) {
if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [hidden object] %s\n", rb_obj_info(obj));
return NULL;
}
else if (RBASIC_CLASS(obj) == 0) {
if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [hidden object] %s\n", rb_obj_info(obj));
ret = NULL;
}
#endif
else {
struct transient_alloc_header *header = transient_heap_allocatable_header(theap, size);
if (header) {
void *ptr;
else {
struct transient_alloc_header *header = transient_heap_allocatable_header(theap, size);
if (header) {
void *ptr;
/* header is poisoned to prevent buffer overflow, should
* unpoison first... */
asan_unpoison_memory_region(header, sizeof *header, true);
/* header is poisoned to prevent buffer overflow, should
* unpoison first... */
asan_unpoison_memory_region(header, sizeof *header, true);
header->size = size;
header->magic = TRANSIENT_HEAP_ALLOC_MAGIC;
header->next_marked_index = TRANSIENT_HEAP_ALLOC_MARKING_FREE;
header->obj = obj; /* TODO: can we eliminate it? */
header->size = size;
header->magic = TRANSIENT_HEAP_ALLOC_MAGIC;
header->next_marked_index = TRANSIENT_HEAP_ALLOC_MARKING_FREE;
header->obj = obj; /* TODO: can we eliminate it? */
/* header is fixed; shall poison again */
asan_poison_memory_region(header, sizeof *header);
ptr = header + 1;
/* header is fixed; shall poison again */
asan_poison_memory_region(header, sizeof *header);
ptr = header + 1;
theap->total_objects++; /* statistics */
theap->total_objects++; /* statistics */
#if TRANSIENT_HEAP_DEBUG_DONT_PROMOTE
if (RB_OBJ_PROMOTED_RAW(obj)) {
transient_heap_promote_add(theap, obj);
}
if (RB_OBJ_PROMOTED_RAW(obj)) {
transient_heap_promote_add(theap, obj);
}
#endif
if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: header:%p ptr:%p size:%d obj:%s\n", (void *)header, ptr, (int)size, rb_obj_info(obj));
if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: header:%p ptr:%p size:%d obj:%s\n", (void *)header, ptr, (int)size, rb_obj_info(obj));
RB_DEBUG_COUNTER_INC(theap_alloc);
RB_DEBUG_COUNTER_INC(theap_alloc);
/* ptr is set up; OK to unpoison. */
asan_unpoison_memory_region(ptr, size - sizeof *header, true);
return ptr;
}
else {
if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [no enough space: %ld] %s\n", (long)size, rb_obj_info(obj));
RB_DEBUG_COUNTER_INC(theap_alloc_fail);
return NULL;
/* ptr is set up; OK to unpoison. */
asan_unpoison_memory_region(ptr, size - sizeof *header, true);
ret = ptr;
}
else {
if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [no enough space: %ld] %s\n", (long)size, rb_obj_info(obj));
RB_DEBUG_COUNTER_INC(theap_alloc_fail);
ret = NULL;
}
}
}
RB_VM_LOCK_LEAVE();
return ret;
}
void
@ -534,6 +543,8 @@ alloc_header_to_block(struct transient_heap *theap, struct transient_alloc_heade
void
rb_transient_heap_mark(VALUE obj, const void *ptr)
{
ASSERT_vm_locking();
struct transient_alloc_header *header = ptr_to_alloc_header(ptr);
asan_unpoison_memory_region(header, sizeof *header, false);
if (header->magic != TRANSIENT_HEAP_ALLOC_MAGIC) rb_bug("rb_transient_heap_mark: wrong header, %s (%p)", rb_obj_info(obj), ptr);
@ -645,6 +656,8 @@ transient_heap_promote_add(struct transient_heap* theap, VALUE obj)
void
rb_transient_heap_promote(VALUE obj)
{
ASSERT_vm_locking();
if (transient_heap_ptr(obj, FALSE)) {
struct transient_heap* theap = transient_heap_get();
transient_heap_promote_add(theap, obj);
@ -663,6 +676,8 @@ alloc_header(struct transient_heap_block* block, int index)
static void
transient_heap_reset(void)
{
ASSERT_vm_locking();
struct transient_heap* theap = transient_heap_get();
struct transient_heap_block* block;
@ -759,53 +774,61 @@ transient_heap_update_status(struct transient_heap* theap, enum transient_heap_s
static void
transient_heap_evacuate(void *dmy)
{
struct transient_heap* theap = transient_heap_get();
RB_VM_LOCK_ENTER();
{
struct transient_heap* theap = transient_heap_get();
if (theap->status == transient_heap_marking) {
if (TRANSIENT_HEAP_DEBUG >= 1) fprintf(stderr, "!! transient_heap_evacuate: skip while transient_heap_marking\n");
}
else {
VALUE gc_disabled = rb_gc_disable_no_rest();
struct transient_heap_block* block;
if (theap->status == transient_heap_marking) {
if (TRANSIENT_HEAP_DEBUG >= 1) fprintf(stderr, "!! transient_heap_evacuate: skip while transient_heap_marking\n");
}
else {
VALUE gc_disabled = rb_gc_disable_no_rest();
struct transient_heap_block* block;
if (TRANSIENT_HEAP_DEBUG >= 1) {
int i;
fprintf(stderr, "!! transient_heap_evacuate start total_blocks:%d\n", theap->total_blocks);
if (TRANSIENT_HEAP_DEBUG >= 4) {
for (i=0; i<theap->promoted_objects_index; i++) fprintf(stderr, "%4d %s\n", i, rb_obj_info(theap->promoted_objects[i]));
RUBY_DEBUG_LOG("start gc_disabled:%d", RTEST(gc_disabled));
if (TRANSIENT_HEAP_DEBUG >= 1) {
int i;
fprintf(stderr, "!! transient_heap_evacuate start total_blocks:%d\n", theap->total_blocks);
if (TRANSIENT_HEAP_DEBUG >= 4) {
for (i=0; i<theap->promoted_objects_index; i++) fprintf(stderr, "%4d %s\n", i, rb_obj_info(theap->promoted_objects[i]));
}
}
}
if (TRANSIENT_HEAP_DEBUG >= 2) transient_heap_dump(theap);
if (TRANSIENT_HEAP_DEBUG >= 2) transient_heap_dump(theap);
TH_ASSERT(theap->status == transient_heap_none);
transient_heap_update_status(theap, transient_heap_escaping);
TH_ASSERT(theap->status == transient_heap_none);
transient_heap_update_status(theap, transient_heap_escaping);
/* evacuate from marked blocks */
block = theap->marked_blocks;
while (block) {
transient_heap_block_evacuate(theap, block);
block = block->info.next_block;
}
/* evacuate from marked blocks */
block = theap->marked_blocks;
while (block) {
transient_heap_block_evacuate(theap, block);
block = block->info.next_block;
}
/* evacuate from using blocks
/* evacuate from using blocks
only affect incremental marking */
block = theap->using_blocks;
while (block) {
transient_heap_block_evacuate(theap, block);
block = block->info.next_block;
block = theap->using_blocks;
while (block) {
transient_heap_block_evacuate(theap, block);
block = block->info.next_block;
}
/* all objects in marked_objects are escaped. */
transient_heap_reset();
if (TRANSIENT_HEAP_DEBUG > 0) {
fprintf(stderr, "!! transient_heap_evacuate end total_blocks:%d\n", theap->total_blocks);
}
transient_heap_verify(theap);
transient_heap_update_status(theap, transient_heap_none);
if (gc_disabled != Qtrue) rb_gc_enable();
RUBY_DEBUG_LOG("finish", 0);
}
/* all objects in marked_objects are escaped. */
transient_heap_reset();
if (TRANSIENT_HEAP_DEBUG > 0) {
fprintf(stderr, "!! transient_heap_evacuate end total_blocks:%d\n", theap->total_blocks);
}
transient_heap_verify(theap);
transient_heap_update_status(theap, transient_heap_none);
if (gc_disabled != Qtrue) rb_gc_enable();
}
RB_VM_LOCK_LEAVE();
}
static void
@ -875,6 +898,8 @@ transient_heap_blocks_update_refs(struct transient_heap* theap, struct transient
void
rb_transient_heap_update_references(void)
{
ASSERT_vm_locking();
struct transient_heap* theap = transient_heap_get();
int i;
@ -890,6 +915,7 @@ rb_transient_heap_update_references(void)
void
rb_transient_heap_start_marking(int full_marking)
{
ASSERT_vm_locking();
RUBY_DEBUG_LOG("full?:%d", full_marking);
struct transient_heap* theap = transient_heap_get();
@ -940,6 +966,7 @@ rb_transient_heap_start_marking(int full_marking)
void
rb_transient_heap_finish_marking(void)
{
ASSERT_vm_locking();
RUBY_DEBUG_LOG("", 0);
struct transient_heap* theap = transient_heap_get();

Просмотреть файл

@ -36,6 +36,7 @@
#include "transient_heap.h"
#include "variable.h"
#include "vm_core.h"
#include "ractor_pub.h"
typedef void rb_gvar_compact_t(void *var);
@ -46,7 +47,7 @@ static VALUE autoload_featuremap; /* feature => autoload_i */
static void check_before_mod_set(VALUE, ID, VALUE, const char *);
static void setup_const_entry(rb_const_entry_t *, VALUE, VALUE, rb_const_flag_t);
static VALUE rb_const_search(VALUE klass, ID id, int exclude, int recurse, int visibility);
static st_table *generic_iv_tbl;
static st_table *generic_iv_tbl_;
struct ivar_update {
union {
@ -61,7 +62,7 @@ void
Init_var_tables(void)
{
rb_global_tbl = rb_id_table_create(0);
generic_iv_tbl = st_init_numtable();
generic_iv_tbl_ = st_init_numtable();
autoload = rb_intern_const("__autoload__");
/* __classpath__: fully qualified class path */
classpath = rb_intern_const("__classpath__");
@ -329,28 +330,37 @@ struct rb_global_variable {
struct rb_global_entry {
struct rb_global_variable *var;
ID id;
bool ractor_local;
};
static struct rb_id_table *
global_tbl(void)
{
return rb_global_tbl;
}
static struct rb_global_entry*
rb_find_global_entry(ID id)
{
struct rb_global_entry *entry;
VALUE data;
if (!rb_id_table_lookup(global_tbl(), id, &data)) {
return NULL;
if (!rb_id_table_lookup(rb_global_tbl, id, &data)) {
entry = NULL;
}
entry = (struct rb_global_entry *)data;
ASSUME(entry != NULL);
else {
entry = (struct rb_global_entry *)data;
RUBY_ASSERT(entry != NULL);
}
if (UNLIKELY(!rb_ractor_main_p()) && (!entry || !entry->ractor_local)) {
rb_raise(rb_eRuntimeError, "can not access global variables %s from non-main Ractors", rb_id2name(id));
}
return entry;
}
void
rb_gvar_ractor_local(const char *name)
{
struct rb_global_entry *entry = rb_find_global_entry(rb_intern(name));
entry->ractor_local = true;
}
static void
rb_gvar_undef_compactor(void *var)
{
@ -366,6 +376,7 @@ rb_global_entry(ID id)
var = ALLOC(struct rb_global_variable);
entry->id = id;
entry->var = var;
entry->ractor_local = false;
var->counter = 1;
var->data = 0;
var->getter = rb_gvar_undef_getter;
@ -375,7 +386,7 @@ rb_global_entry(ID id)
var->block_trace = 0;
var->trace = 0;
rb_id_table_insert(global_tbl(), id, (VALUE)entry);
rb_id_table_insert(rb_global_tbl, id, (VALUE)entry);
}
return entry;
}
@ -502,8 +513,9 @@ update_global_entry(VALUE v, void *ignored)
void
rb_gc_update_global_tbl(void)
{
if (rb_global_tbl)
if (rb_global_tbl) {
rb_id_table_foreach_values(rb_global_tbl, update_global_entry, 0);
}
}
static ID
@ -646,18 +658,17 @@ rb_f_untrace_var(int argc, const VALUE *argv)
ID id;
struct rb_global_entry *entry;
struct trace_var *trace;
VALUE data;
rb_scan_args(argc, argv, "11", &var, &cmd);
id = rb_check_id(&var);
if (!id) {
rb_name_error_str(var, "undefined global variable %"PRIsVALUE"", QUOTE(var));
}
if (!rb_id_table_lookup(global_tbl(), id, &data)) {
if ((entry = rb_find_global_entry(id)) == NULL) {
rb_name_error(id, "undefined global variable %"PRIsVALUE"", QUOTE_ID(id));
}
trace = (entry = (struct rb_global_entry *)data)->var->trace;
trace = entry->var->trace;
if (NIL_P(cmd)) {
VALUE ary = rb_ary_new();
@ -801,7 +812,11 @@ rb_f_global_variables(void)
VALUE ary = rb_ary_new();
VALUE sym, backref = rb_backref_get();
rb_id_table_foreach(global_tbl(), gvar_i, (void *)ary);
if (!rb_ractor_main_p()) {
rb_raise(rb_eRuntimeError, "can not access global variables from non-main Ractors");
}
rb_id_table_foreach(rb_global_tbl, gvar_i, (void *)ary);
if (!NIL_P(backref)) {
char buf[2];
int i, nmatch = rb_match_count(backref);
@ -828,7 +843,11 @@ rb_alias_variable(ID name1, ID name2)
{
struct rb_global_entry *entry1, *entry2;
VALUE data1;
struct rb_id_table *gtbl = global_tbl();
struct rb_id_table *gtbl = rb_global_tbl;
if (!rb_ractor_main_p()) {
rb_raise(rb_eRuntimeError, "can not access global variables from non-main Ractors");
}
entry2 = rb_global_entry(name2);
if (!rb_id_table_lookup(gtbl, name1, &data1)) {
@ -859,30 +878,61 @@ rb_alias_variable(ID name1, ID name2)
entry1->var = entry2->var;
}
static void
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(ID id)
{
if (UNLIKELY(!rb_ractor_main_p())) {
if (rb_is_instance_id(id)) { // check only normal ivars
rb_raise(rb_eRuntimeError, "can not access instance variables of classes/modules from non-main Ractors");
}
}
}
#define CVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR() \
if (UNLIKELY(!rb_ractor_main_p())) { \
rb_raise(rb_eRuntimeError, "can not access class variables from non-main Ractors"); \
}
static inline struct st_table *
generic_ivtbl(VALUE obj, ID id, bool force_check_ractor)
{
if ((force_check_ractor || rb_is_instance_id(id)) && // not internal ID
UNLIKELY(rb_ractor_shareable_p(obj) && !rb_ractor_main_p())) {
rb_raise(rb_eRuntimeError, "can not access instance variables of shareable objects from non-main Ractors");
}
return generic_iv_tbl_;
}
static inline struct st_table *
generic_ivtbl_no_ractor_check(VALUE obj)
{
return generic_ivtbl(obj, 0, false);
}
MJIT_FUNC_EXPORTED struct st_table *
rb_ivar_generic_ivtbl(VALUE obj)
{
return generic_ivtbl(obj, 0, true);
}
static int
gen_ivtbl_get(VALUE obj, struct gen_ivtbl **ivtbl)
gen_ivtbl_get(VALUE obj, ID id, struct gen_ivtbl **ivtbl)
{
st_data_t data;
if (st_lookup(generic_iv_tbl, (st_data_t)obj, &data)) {
if (st_lookup(generic_ivtbl(obj, id, false), (st_data_t)obj, &data)) {
*ivtbl = (struct gen_ivtbl *)data;
return 1;
}
return 0;
}
MJIT_FUNC_EXPORTED struct st_table *
rb_ivar_generic_ivtbl(void)
{
return generic_iv_tbl;
}
static VALUE
generic_ivar_delete(VALUE obj, ID id, VALUE undef)
{
struct gen_ivtbl *ivtbl;
if (gen_ivtbl_get(obj, &ivtbl)) {
if (gen_ivtbl_get(obj, id, &ivtbl)) {
st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
st_data_t index;
@ -903,7 +953,7 @@ generic_ivar_get(VALUE obj, ID id, VALUE undef)
{
struct gen_ivtbl *ivtbl;
if (gen_ivtbl_get(obj, &ivtbl)) {
if (gen_ivtbl_get(obj, id, &ivtbl)) {
st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
st_data_t index;
@ -993,7 +1043,7 @@ generic_ivar_defined(VALUE obj, ID id)
if (!iv_index_tbl) return Qfalse;
if (!st_lookup(iv_index_tbl, (st_data_t)id, &index)) return Qfalse;
if (!gen_ivtbl_get(obj, &ivtbl)) return Qfalse;
if (!gen_ivtbl_get(obj, id, &ivtbl)) return Qfalse;
if ((index < ivtbl->numiv) && (ivtbl->ivptr[index] != Qundef))
return Qtrue;
@ -1011,7 +1061,7 @@ generic_ivar_remove(VALUE obj, ID id, VALUE *valp)
if (!iv_index_tbl) return 0;
if (!st_lookup(iv_index_tbl, key, &index)) return 0;
if (!gen_ivtbl_get(obj, &ivtbl)) return 0;
if (!gen_ivtbl_get(obj, id, &ivtbl)) return 0;
if (index < ivtbl->numiv) {
if (ivtbl->ivptr[index] != Qundef) {
@ -1038,7 +1088,7 @@ rb_mark_generic_ivar(VALUE obj)
{
struct gen_ivtbl *ivtbl;
if (gen_ivtbl_get(obj, &ivtbl)) {
if (gen_ivtbl_get(obj, 0, &ivtbl)) {
gen_ivtbl_mark(ivtbl);
}
}
@ -1049,8 +1099,8 @@ rb_mv_generic_ivar(VALUE rsrc, VALUE dst)
st_data_t key = (st_data_t)rsrc;
struct gen_ivtbl *ivtbl;
if (st_delete(generic_iv_tbl, &key, (st_data_t *)&ivtbl))
st_insert(generic_iv_tbl, (st_data_t)dst, (st_data_t)ivtbl);
if (st_delete(generic_ivtbl_no_ractor_check(rsrc), &key, (st_data_t *)&ivtbl))
st_insert(generic_ivtbl_no_ractor_check(dst), (st_data_t)dst, (st_data_t)ivtbl);
}
void
@ -1059,7 +1109,7 @@ rb_free_generic_ivar(VALUE obj)
st_data_t key = (st_data_t)obj;
struct gen_ivtbl *ivtbl;
if (st_delete(generic_iv_tbl, &key, (st_data_t *)&ivtbl))
if (st_delete(generic_ivtbl_no_ractor_check(obj), &key, (st_data_t *)&ivtbl))
xfree(ivtbl);
}
@ -1068,7 +1118,7 @@ rb_generic_ivar_memsize(VALUE obj)
{
struct gen_ivtbl *ivtbl;
if (gen_ivtbl_get(obj, &ivtbl))
if (gen_ivtbl_get(obj, 0, &ivtbl))
return gen_ivtbl_bytes(ivtbl->numiv);
return 0;
}
@ -1111,6 +1161,7 @@ rb_ivar_lookup(VALUE obj, ID id, VALUE undef)
break;
case T_CLASS:
case T_MODULE:
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
if (RCLASS_IV_TBL(obj) &&
st_lookup(RCLASS_IV_TBL(obj), (st_data_t)id, &index))
return (VALUE)index;
@ -1167,6 +1218,7 @@ rb_ivar_delete(VALUE obj, ID id, VALUE undef)
break;
case T_CLASS:
case T_MODULE:
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
if (RCLASS_IV_TBL(obj) &&
st_delete(RCLASS_IV_TBL(obj), (st_data_t *)&id, &index))
return (VALUE)index;
@ -1223,7 +1275,7 @@ generic_ivar_set(VALUE obj, ID id, VALUE val)
ivup.iv_extended = 0;
ivup.u.iv_index_tbl = iv_index_tbl_make(obj);
iv_index_tbl_extend(&ivup, id);
st_update(generic_iv_tbl, (st_data_t)obj, generic_ivar_update,
st_update(generic_ivtbl(obj, id, false), (st_data_t)obj, generic_ivar_update,
(st_data_t)&ivup);
ivup.u.ivtbl->ivptr[ivup.index] = val;
@ -1347,6 +1399,7 @@ ivar_set(VALUE obj, ID id, VALUE val)
break;
case T_CLASS:
case T_MODULE:
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
if (!RCLASS_IV_TBL(obj)) RCLASS_IV_TBL(obj) = st_init_numtable();
rb_class_ivar_set(obj, id, val);
break;
@ -1393,6 +1446,7 @@ rb_ivar_defined(VALUE obj, ID id)
break;
case T_CLASS:
case T_MODULE:
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
if (RCLASS_IV_TBL(obj) && st_is_member(RCLASS_IV_TBL(obj), (st_data_t)id))
return Qtrue;
break;
@ -1469,7 +1523,7 @@ gen_ivar_each(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg)
st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
if (!iv_index_tbl) return;
if (!gen_ivtbl_get(obj, &data.ivtbl)) return;
if (!gen_ivtbl_get(obj, 0, &data.ivtbl)) return;
data.func = (int (*)(ID key, VALUE val, st_data_t arg))func;
data.arg = arg;
@ -1513,14 +1567,14 @@ rb_copy_generic_ivar(VALUE clone, VALUE obj)
if (!FL_TEST(obj, FL_EXIVAR)) {
goto clear;
}
if (gen_ivtbl_get(obj, &ivtbl)) {
if (gen_ivtbl_get(obj, 0, &ivtbl)) {
struct givar_copy c;
uint32_t i;
if (gen_ivtbl_count(ivtbl) == 0)
goto clear;
if (gen_ivtbl_get(clone, &c.ivtbl)) {
if (gen_ivtbl_get(clone, 0, &c.ivtbl)) {
for (i = 0; i < c.ivtbl->numiv; i++)
c.ivtbl->ivptr[i] = Qundef;
}
@ -1536,7 +1590,8 @@ rb_copy_generic_ivar(VALUE clone, VALUE obj)
* c.ivtbl may change in gen_ivar_copy due to realloc,
* no need to free
*/
st_insert(generic_iv_tbl, (st_data_t)clone, (st_data_t)c.ivtbl);
generic_ivtbl_no_ractor_check(clone);
st_insert(generic_ivtbl_no_ractor_check(obj), (st_data_t)clone, (st_data_t)c.ivtbl);
}
return;
@ -1557,6 +1612,7 @@ rb_ivar_foreach(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg)
break;
case T_CLASS:
case T_MODULE:
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(0);
if (RCLASS_IV_TBL(obj)) {
st_foreach_safe(RCLASS_IV_TBL(obj), func, arg);
}
@ -1599,7 +1655,7 @@ rb_ivar_count(VALUE obj)
if (FL_TEST(obj, FL_EXIVAR)) {
struct gen_ivtbl *ivtbl;
if (gen_ivtbl_get(obj, &ivtbl)) {
if (gen_ivtbl_get(obj, 0, &ivtbl)) {
return gen_ivtbl_count(ivtbl);
}
}
@ -1720,6 +1776,7 @@ rb_obj_remove_instance_variable(VALUE obj, VALUE name)
break;
case T_CLASS:
case T_MODULE:
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
n = id;
if (RCLASS_IV_TBL(obj) && st_delete(RCLASS_IV_TBL(obj), &n, &v)) {
return (VALUE)v;
@ -2383,7 +2440,14 @@ static VALUE
rb_const_get_0(VALUE klass, ID id, int exclude, int recurse, int visibility)
{
VALUE c = rb_const_search(klass, id, exclude, recurse, visibility);
if (c != Qundef) return c;
if (c != Qundef) {
if (UNLIKELY(!rb_ractor_main_p())) {
if (!rb_ractor_shareable_p(c)) {
rb_raise(rb_eNameError, "can not access non-sharable objects in constant %"PRIsVALUE"::%s by non-main Ractor.", rb_class_path(klass), rb_id2name(id));
}
}
return c;
}
return rb_const_missing(klass, ID2SYM(id));
}
@ -2824,6 +2888,10 @@ rb_const_set(VALUE klass, ID id, VALUE val)
QUOTE_ID(id));
}
if (!rb_ractor_shareable_p(val) && !rb_ractor_main_p()) {
rb_raise(rb_eNameError, "can not set constants with non-shareable objects by non-main Ractors");
}
check_before_mod_set(klass, id, val, "constant");
if (!tbl) {
RCLASS_CONST_TBL(klass) = tbl = rb_id_table_create(0);
@ -3141,6 +3209,7 @@ cvar_overtaken(VALUE front, VALUE target, ID id)
}
#define CVAR_LOOKUP(v,r) do {\
CVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(); \
if (cvar_lookup_at(klass, id, (v))) {r;}\
CVAR_FOREACH_ANCESTORS(klass, v, r);\
} while(0)

Просмотреть файл

@ -16,6 +16,6 @@ struct gen_ivtbl {
VALUE ivptr[FLEX_ARY_LEN];
};
struct st_table *rb_ivar_generic_ivtbl(void);
struct st_table *rb_ivar_generic_ivtbl(VALUE obj);
#endif /* RUBY_TOPLEVEL_VARIABLE_H */

124
vm.c
Просмотреть файл

@ -1,6 +1,6 @@
/**********************************************************************
vm.c -
Vm.c -
$Author$
@ -34,6 +34,8 @@
#include "vm_debug.h"
#include "vm_exec.h"
#include "vm_insnhelper.h"
#include "ractor.h"
#include "vm_sync.h"
#include "builtin.h"
@ -376,7 +378,7 @@ VALUE rb_block_param_proxy;
#define ruby_vm_redefined_flag GET_VM()->redefined_flag
VALUE ruby_vm_const_missing_count = 0;
rb_vm_t *ruby_current_vm_ptr = NULL;
rb_execution_context_t *ruby_current_execution_context_ptr = NULL;
native_tls_key_t ruby_current_ec_key;
rb_event_flag_t ruby_vm_event_flags;
rb_event_flag_t ruby_vm_event_enabled_global_flags;
@ -398,6 +400,8 @@ static const struct rb_callcache vm_empty_cc = {
static void thread_free(void *ptr);
//
void
rb_vm_inc_const_missing_count(void)
{
@ -568,7 +572,6 @@ rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_
MJIT_FUNC_EXPORTED rb_control_frame_t *
rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
{
if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) bp();
while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
if (VM_FRAME_RUBYFRAME_P(cfp)) {
return (rb_control_frame_t *)cfp;
@ -944,6 +947,27 @@ rb_proc_dup(VALUE self)
return procval;
}
VALUE
rb_proc_isolate_bang(VALUE self)
{
// check accesses
const rb_iseq_t *iseq = vm_proc_iseq(self);
if (iseq && iseq->body->access_outer_variables) {
rb_raise(rb_eArgError, "can not isolate a Proc because it can accesses outer variables.");
}
rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
proc->is_isolated = TRUE;
return self;
}
VALUE
rb_proc_isolate(VALUE self)
{
VALUE dst = rb_proc_dup(self);
rb_proc_isolate_bang(dst);
return dst;
}
MJIT_FUNC_EXPORTED VALUE
rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
@ -1283,6 +1307,20 @@ rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc,
}
}
VALUE
rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
{
vm_block_handler_verify(passed_block_handler);
if (proc->is_from_method) {
return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
}
else {
return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
}
}
/* special variable */
static rb_control_frame_t *
@ -2257,17 +2295,9 @@ rb_vm_update_references(void *ptr)
{
if (ptr) {
rb_vm_t *vm = ptr;
rb_thread_t *th = 0;
rb_gc_update_tbl_refs(vm->frozen_strings);
list_for_each(&vm->living_threads, th, vmlt_node) {
th->self = rb_gc_location(th->self);
}
vm->thgroup_default = rb_gc_location(vm->thgroup_default);
vm->mark_object_ary = rb_gc_location(vm->mark_object_ary);
vm->load_path = rb_gc_location(vm->load_path);
vm->load_path_snapshot = rb_gc_location(vm->load_path_snapshot);
@ -2294,14 +2324,17 @@ rb_vm_mark(void *ptr)
RUBY_GC_INFO("-------------------------------------------------\n");
if (ptr) {
rb_vm_t *vm = ptr;
rb_thread_t *th = 0;
rb_ractor_t *r;
long i, len;
const VALUE *obj_ary;
list_for_each(&vm->living_threads, th, vmlt_node) {
rb_gc_mark_movable(th->self);
}
rb_gc_mark_movable(vm->thgroup_default);
list_for_each(&vm->ractor.set, r, vmlr_node) {
// ractor.set only contains blocking or running ractors
VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
rb_ractor_status_p(r, ractor_running));
rb_gc_mark(rb_ractor_self(r));
}
rb_gc_mark_movable(vm->mark_object_ary);
len = RARRAY_LEN(vm->mark_object_ary);
@ -2379,10 +2412,11 @@ ruby_vm_destruct(rb_vm_t *vm)
RUBY_FREE_ENTER("vm");
if (vm) {
rb_thread_t *th = vm->main_thread;
rb_thread_t *th = vm->ractor.main_thread;
struct rb_objspace *objspace = vm->objspace;
vm->main_thread = 0;
if (th) {
vm->ractor.main_thread = NULL;
if (th) {
rb_fiber_reset_root_local_storage(th);
thread_free(th);
}
@ -2397,7 +2431,6 @@ ruby_vm_destruct(rb_vm_t *vm)
st_free_table(vm->frozen_strings);
vm->frozen_strings = 0;
}
rb_vm_gvl_destroy(vm);
RB_ALTSTACK_FREE(vm->main_altstack);
if (objspace) {
rb_objspace_free(objspace);
@ -2416,7 +2449,8 @@ vm_memsize(const void *ptr)
const rb_vm_t *vmobj = ptr;
size_t size = sizeof(rb_vm_t);
size += vmobj->living_thread_num * sizeof(rb_thread_t);
// TODO
// size += vmobj->ractor_num * sizeof(rb_ractor_t);
if (vmobj->defined_strings) {
size += DEFINED_EXPR * sizeof(VALUE);
@ -2573,6 +2607,7 @@ rb_execution_context_mark(const rb_execution_context_t *ec)
rb_control_frame_t *cfp = ec->cfp;
rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
VM_ASSERT(sp == ec->cfp->sp);
rb_gc_mark_vm_stack_values((long)(sp - p), p);
while (cfp != limit_cfp) {
@ -2640,6 +2675,7 @@ thread_mark(void *ptr)
/* mark ruby objects */
switch (th->invoke_type) {
case thread_invoke_type_proc:
case thread_invoke_type_ractor_proc:
RUBY_MARK_UNLESS_NULL(th->invoke_arg.proc.proc);
RUBY_MARK_UNLESS_NULL(th->invoke_arg.proc.args);
break;
@ -2650,6 +2686,7 @@ thread_mark(void *ptr)
break;
}
rb_gc_mark(rb_ractor_self(th->ractor));
RUBY_MARK_UNLESS_NULL(th->thgroup);
RUBY_MARK_UNLESS_NULL(th->value);
RUBY_MARK_UNLESS_NULL(th->pending_interrupt_queue);
@ -2685,8 +2722,8 @@ thread_free(void *ptr)
rb_threadptr_root_fiber_release(th);
if (th->vm && th->vm->main_thread == th) {
RUBY_GC_INFO("main thread\n");
if (th->vm && th->vm->ractor.main_thread == th) {
RUBY_GC_INFO("MRI main thread\n");
}
else {
ruby_xfree(ptr);
@ -2815,15 +2852,17 @@ th_init(rb_thread_t *th, VALUE self)
static VALUE
ruby_thread_init(VALUE self)
{
rb_thread_t *th = rb_thread_ptr(self);
rb_vm_t *vm = GET_THREAD()->vm;
rb_thread_t *th = GET_THREAD();
rb_thread_t *targe_th = rb_thread_ptr(self);
rb_vm_t *vm = th->vm;
th->vm = vm;
th_init(th, self);
targe_th->vm = vm;
th_init(targe_th, self);
th->top_wrapper = 0;
th->top_self = rb_vm_top_self();
th->ec->root_svar = Qfalse;
targe_th->top_wrapper = 0;
targe_th->top_self = rb_vm_top_self();
targe_th->ec->root_svar = Qfalse;
targe_th->ractor = th->ractor;
return self;
}
@ -3341,23 +3380,21 @@ Init_VM(void)
VALUE filename = rb_fstring_lit("<main>");
const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
// Ractor setup
rb_ractor_main_setup(vm, th->ractor, th);
/* create vm object */
vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
/* create main thread */
th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
vm->main_thread = th;
vm->running_thread = th;
th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
vm->ractor.main_thread = th;
vm->ractor.main_ractor = th->ractor;
th->vm = vm;
th->top_wrapper = 0;
th->top_self = rb_vm_top_self();
rb_thread_set_current(th);
rb_vm_living_threads_insert(vm, th);
rb_gc_register_mark_object((VALUE)iseq);
rb_gc_register_mark_object((VALUE)iseq);
th->ec->cfp->iseq = iseq;
th->ec->cfp->pc = iseq->body->iseq_encoded;
th->ec->cfp->self = th->top_self;
@ -3385,7 +3422,7 @@ Init_VM(void)
void
rb_vm_set_progname(VALUE filename)
{
rb_thread_t *th = GET_VM()->main_thread;
rb_thread_t *th = GET_VM()->ractor.main_thread;
rb_control_frame_t *cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size);
--cfp;
@ -3413,8 +3450,13 @@ Init_BareVM(void)
Init_native_thread(th);
th->vm = vm;
th_init(th, 0);
rb_thread_set_current_raw(th);
vm->ractor.main_ractor = th->ractor = rb_ractor_main_alloc();
rb_ractor_set_current_ec(th->ractor, th->ec);
ruby_thread_init_stack(th);
rb_native_mutex_initialize(&vm->ractor.sync.lock);
rb_native_cond_initialize(&vm->ractor.sync.barrier_cond);
rb_native_cond_initialize(&vm->ractor.sync.terminate_cond);
}
void

119
vm_core.h
Просмотреть файл

@ -419,6 +419,7 @@ struct rb_iseq_constant_body {
char catch_except_p; /* If a frame of this ISeq may catch exception, set TRUE */
bool builtin_inline_p; // This ISeq's builtin func is safe to be inlined by MJIT
char access_outer_variables;
#if USE_MJIT
/* The following fields are MJIT related info. */
@ -554,12 +555,30 @@ typedef const struct rb_builtin_function *RB_BUILTIN;
typedef struct rb_vm_struct {
VALUE self;
rb_global_vm_lock_t gvl;
struct {
struct list_head set;
unsigned int cnt;
unsigned int blocking_cnt;
struct rb_thread_struct *main_thread;
struct rb_ractor_struct *main_ractor;
struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
/* persists across uncontended GVL release/acquire for time slice */
const struct rb_thread_struct *running_thread;
struct {
// monitor
rb_nativethread_lock_t lock;
struct rb_ractor_struct *lock_owner;
unsigned int lock_rec;
// barrier
bool barrier_waiting;
unsigned int barrier_cnt;
rb_nativethread_cond_t barrier_cond;
// join at exit
rb_nativethread_cond_t terminate_cond;
bool terminate_waiting;
} sync;
} ractor;
#ifdef USE_SIGALTSTACK
void *main_altstack;
@ -570,9 +589,6 @@ typedef struct rb_vm_struct {
struct list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */
struct list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */
struct list_head waiting_fds; /* <=> struct waiting_fd */
struct list_head living_threads;
VALUE thgroup_default;
int living_thread_num;
/* set in single-threaded processes only: */
volatile int ubf_async_safe;
@ -580,9 +596,7 @@ typedef struct rb_vm_struct {
unsigned int running: 1;
unsigned int thread_abort_on_exception: 1;
unsigned int thread_report_on_exception: 1;
unsigned int safe_level_: 1;
int sleeper;
/* object management */
VALUE mark_object_ary;
@ -890,9 +904,12 @@ void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t
// @param ec the execution context to update.
void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
typedef struct rb_ractor_struct rb_ractor_t;
typedef struct rb_thread_struct {
struct list_node vmlt_node;
struct list_node lt_node; // managed by a ractor
VALUE self;
rb_ractor_t *ractor;
rb_vm_t *vm;
rb_execution_context_t *ec;
@ -955,9 +972,10 @@ typedef struct rb_thread_struct {
} func;
} invoke_arg;
enum {
enum thread_invoke_type {
thread_invoke_type_none = 0,
thread_invoke_type_proc,
thread_invoke_type_ractor_proc,
thread_invoke_type_func
} invoke_type;
@ -1039,8 +1057,12 @@ typedef struct {
const struct rb_block block;
unsigned int is_from_method: 1; /* bool */
unsigned int is_lambda: 1; /* bool */
unsigned int is_isolated: 1; /* bool */
} rb_proc_t;
VALUE rb_proc_isolate(VALUE self);
VALUE rb_proc_isolate_bang(VALUE self);
typedef struct {
VALUE flags; /* imemo header */
rb_iseq_t *iseq;
@ -1628,11 +1650,12 @@ VALUE rb_vm_env_local_variables(const rb_env_t *env);
const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
void rb_vm_inc_const_missing_count(void);
void rb_vm_gvl_destroy(rb_vm_t *vm);
VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec);
void rb_gvl_destroy(rb_global_vm_lock_t *gvl);
void rb_thread_start_timer_thread(void);
void rb_thread_stop_timer_thread(void);
void rb_thread_reset_timer_thread(void);
@ -1645,22 +1668,7 @@ rb_vm_living_threads_init(rb_vm_t *vm)
list_head_init(&vm->waiting_pids);
list_head_init(&vm->workqueue);
list_head_init(&vm->waiting_grps);
list_head_init(&vm->living_threads);
vm->living_thread_num = 0;
}
static inline void
rb_vm_living_threads_insert(rb_vm_t *vm, rb_thread_t *th)
{
list_add_tail(&vm->living_threads, &th->vmlt_node);
vm->living_thread_num++;
}
static inline void
rb_vm_living_threads_remove(rb_vm_t *vm, rb_thread_t *th)
{
list_del(&th->vmlt_node);
vm->living_thread_num--;
list_head_init(&vm->ractor.set);
}
typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
@ -1700,20 +1708,24 @@ MJIT_STATIC const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
/* for thread */
#if RUBY_VM_THREAD_MODEL == 2
RUBY_SYMBOL_EXPORT_BEGIN
RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
RUBY_EXTERN rb_execution_context_t *ruby_current_execution_context_ptr;
RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
RUBY_EXTERN unsigned int ruby_vm_event_local_num;
RUBY_EXTERN native_tls_key_t ruby_current_ec_key;
RUBY_SYMBOL_EXPORT_END
#define GET_VM() rb_current_vm()
#define GET_RACTOR() rb_current_ractor()
#define GET_THREAD() rb_current_thread()
#define GET_EC() rb_current_execution_context()
@ -1723,6 +1735,19 @@ rb_ec_thread_ptr(const rb_execution_context_t *ec)
return ec->thread_ptr;
}
static inline rb_ractor_t *
rb_ec_ractor_ptr(const rb_execution_context_t *ec)
{
const rb_thread_t *th = rb_ec_thread_ptr(ec);
if (th) {
VM_ASSERT(th->ractor != NULL);
return th->ractor;
}
else {
return NULL;
}
}
static inline rb_vm_t *
rb_ec_vm_ptr(const rb_execution_context_t *ec)
{
@ -1738,7 +1763,9 @@ rb_ec_vm_ptr(const rb_execution_context_t *ec)
static inline rb_execution_context_t *
rb_current_execution_context(void)
{
return ruby_current_execution_context_ptr;
rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
VM_ASSERT(ec != NULL);
return ec;
}
static inline rb_thread_t *
@ -1748,33 +1775,27 @@ rb_current_thread(void)
return rb_ec_thread_ptr(ec);
}
static inline rb_ractor_t *
rb_current_ractor(void)
{
const rb_execution_context_t *ec = GET_EC();
return rb_ec_ractor_ptr(ec);
}
static inline rb_vm_t *
rb_current_vm(void)
{
#if 0 // TODO: reconsider the assertions
VM_ASSERT(ruby_current_vm_ptr == NULL ||
ruby_current_execution_context_ptr == NULL ||
rb_ec_thread_ptr(GET_EC()) == NULL ||
rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
#endif
return ruby_current_vm_ptr;
}
static inline void
rb_thread_set_current_raw(const rb_thread_t *th)
{
ruby_current_execution_context_ptr = th->ec;
}
static inline void
rb_thread_set_current(rb_thread_t *th)
{
if (th->vm->running_thread != th) {
th->running_time_us = 0;
}
rb_thread_set_current_raw(th);
th->vm->running_thread = th;
}
#else
#error "unsupported thread model"
#endif
@ -1783,13 +1804,17 @@ enum {
TIMER_INTERRUPT_MASK = 0x01,
PENDING_INTERRUPT_MASK = 0x02,
POSTPONED_JOB_INTERRUPT_MASK = 0x04,
TRAP_INTERRUPT_MASK = 0x08
TRAP_INTERRUPT_MASK = 0x08,
TERMINATE_INTERRUPT_MASK = 0x10,
VM_BARRIER_INTERRUPT_MASK = 0x20,
};
#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
(PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
#define RUBY_VM_INTERRUPTED_ANY(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask)

Просмотреть файл

@ -35,6 +35,7 @@
#include "internal/vm.h"
#include "iseq.h"
#include "vm_core.h"
#include "ractor.h"
#define MAX_POSBUF 128
@ -1092,12 +1093,13 @@ const char *ruby_fill_thread_id_string(rb_nativethread_id_t thid, rb_thread_id_s
void
rb_vmdebug_stack_dump_all_threads(void)
{
rb_vm_t *vm = GET_VM();
rb_thread_t *th = NULL;
rb_ractor_t *r = GET_RACTOR();
list_for_each(&vm->living_threads, th, vmlt_node) {
// TODO: now it only shows current ractor
list_for_each(&r->threads.set, th, lt_node) {
#ifdef NON_SCALAR_THREAD_ID
rb_thread_id_string_t buf;
rb_thread_id_string_t buf;
ruby_fill_thread_id_string(th->thread_id, buf);
fprintf(stderr, "th: %p, native_id: %s\n", th, buf);
#else

Просмотреть файл

@ -985,7 +985,13 @@ vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_
if (is_defined) {
return 1;
}
else {
else {
if (UNLIKELY(!rb_ractor_main_p())) {
if (!rb_ractor_shareable_p(val)) {
rb_raise(rb_eNameError,
"can not access non-sharable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
}
}
return val;
}
}
@ -1084,7 +1090,7 @@ vm_getivar(VALUE obj, ID id, IVC ic, const struct rb_callcache *cc, int is_attr)
else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
struct gen_ivtbl *ivtbl;
if (LIKELY(st_lookup(rb_ivar_generic_ivtbl(), (st_data_t)obj, (st_data_t *)&ivtbl)) &&
if (LIKELY(st_lookup(rb_ivar_generic_ivtbl(obj), (st_data_t)obj, (st_data_t *)&ivtbl)) &&
LIKELY(index < ivtbl->numiv)) {
val = ivtbl->ivptr[index];
}
@ -1106,8 +1112,7 @@ vm_getivar(VALUE obj, ID id, IVC ic, const struct rb_callcache *cc, int is_attr)
}
else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
struct gen_ivtbl *ivtbl;
if (LIKELY(st_lookup(rb_ivar_generic_ivtbl(), (st_data_t)obj, (st_data_t *)&ivtbl))) {
if (LIKELY(st_lookup(rb_ivar_generic_ivtbl(obj), (st_data_t)obj, (st_data_t *)&ivtbl))) {
numiv = ivtbl->numiv;
ivptr = ivtbl->ivptr;
iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
@ -1634,26 +1639,30 @@ vm_search_cc(VALUE klass, const struct rb_callinfo *ci)
MJIT_FUNC_EXPORTED void
rb_vm_search_method_slowpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
{
const struct rb_callcache *cc = vm_search_cc(klass, cd->ci);
RB_VM_LOCK_ENTER();
{
const struct rb_callcache *cc = vm_search_cc(klass, cd->ci);
VM_ASSERT(cc);
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc);
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
if (! cd_owner) {
cd->cc = cc;
}
else if (cc == &vm_empty_cc) {
cd->cc = cc;
}
else {
VM_ASSERT(vm_cc_markable(cc));
RB_OBJ_WRITE(cd_owner, &cd->cc, cc);
}
if (! cd_owner) {
cd->cc = cc;
}
else if (cc == &vm_empty_cc) {
cd->cc = cc;
}
else {
VM_ASSERT(vm_cc_markable(cc));
RB_OBJ_WRITE(cd_owner, &cd->cc, cc);
}
VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
}
RB_VM_LOCK_LEAVE();
}
#endif
@ -4297,7 +4306,8 @@ vm_opt_newarray_min(rb_num_t num, const VALUE *ptr)
static int
vm_ic_hit_p(IC ic, const VALUE *reg_ep)
{
if (ic->ic_serial == GET_GLOBAL_CONSTANT_STATE()) {
if (ic->ic_serial == GET_GLOBAL_CONSTANT_STATE() &&
rb_ractor_main_p()) {
return (ic->ic_cref == NULL || // no need to check CREF
ic->ic_cref == vm_get_cref(reg_ep));
}
@ -5023,6 +5033,7 @@ Init_vm_stack_canary(void)
{
/* This has to be called _after_ our PRNG is properly set up. */
int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
vm_stack_canary_was_born = true;
VM_ASSERT(n == 0);

Просмотреть файл

@ -93,7 +93,7 @@ enum vm_regan_acttype {
#define SET_SP(x) (VM_REG_SP = (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
#define INC_SP(x) (VM_REG_SP += (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
#define DEC_SP(x) (VM_REG_SP -= (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
#define SET_SV(x) (*GET_SP() = (x))
#define SET_SV(x) (*GET_SP() = rb_ractor_confirm_belonging(x))
/* set current stack value as x */
/* instruction sequence C struct */

250
vm_sync.c Normal file
Просмотреть файл

@ -0,0 +1,250 @@
#include "vm_core.h"
#include "vm_sync.h"
#include "ractor.h"
#include "vm_debug.h"
#include "gc.h"
static bool vm_barrier_finish_p(rb_vm_t *vm);
static bool
vm_locked(rb_vm_t *vm)
{
return vm->ractor.sync.lock_owner == GET_RACTOR();
}
#if VM_CHECK_MODE > 0
void
ASSERT_vm_locking(void)
{
if (rb_multi_ractor_p()) {
rb_vm_t *vm = GET_VM();
VM_ASSERT(vm_locked(vm));
}
}
#endif
#if VM_CHECK_MODE > 0
void
ASSERT_vm_unlocking(void)
{
if (rb_multi_ractor_p()) {
rb_vm_t *vm = GET_VM();
VM_ASSERT(!vm_locked(vm));
}
}
#endif
bool
rb_vm_locked_p(void)
{
return vm_locked(GET_VM());
}
static void
vm_lock_enter(rb_vm_t *vm, bool locked, unsigned int *lev APPEND_LOCATION_ARGS)
{
if (locked) {
ASSERT_vm_locking();
}
else {
rb_ractor_t *cr = GET_RACTOR();
#if RACTOR_CHECK_MODE
// locking ractor and acquire VM lock will cause deadlock
VM_ASSERT(cr->locked_by != cr->self);
#endif
// lock
rb_native_mutex_lock(&vm->ractor.sync.lock);
VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
vm->ractor.sync.lock_owner = cr;
// barrier
while (vm->ractor.sync.barrier_waiting) {
unsigned int barrier_cnt = vm->ractor.sync.barrier_cnt;
rb_thread_t *th = GET_THREAD();
bool running;
RB_GC_SAVE_MACHINE_CONTEXT(th);
if (rb_ractor_status_p(cr, ractor_running)) {
rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
running = true;
}
else {
running = false;
}
VM_ASSERT(rb_ractor_status_p(cr, ractor_blocking));
if (vm_barrier_finish_p(vm)) {
RUBY_DEBUG_LOG("wakeup barrier owner", 0);
rb_native_cond_signal(&vm->ractor.sync.barrier_cond);
}
else {
RUBY_DEBUG_LOG("wait for barrier finish", 0);
}
// wait for restart
while (barrier_cnt == vm->ractor.sync.barrier_cnt) {
vm->ractor.sync.lock_owner = NULL;
rb_native_cond_wait(&cr->barrier_wait_cond, &vm->ractor.sync.lock);
VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
vm->ractor.sync.lock_owner = cr;
}
RUBY_DEBUG_LOG("barrier is released. Acquire vm_lock", 0);
if (running) {
rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
}
}
VM_ASSERT(vm->ractor.sync.lock_rec == 0);
VM_ASSERT(vm->ractor.sync.lock_owner == cr);
}
vm->ractor.sync.lock_rec++;
*lev = vm->ractor.sync.lock_rec;
RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%d", vm->ractor.sync.lock_rec, rb_ractor_id(vm->ractor.sync.lock_owner));
}
static void
vm_lock_leave(rb_vm_t *vm, unsigned int *lev APPEND_LOCATION_ARGS)
{
RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%d", vm->ractor.sync.lock_rec, rb_ractor_id(vm->ractor.sync.lock_owner));
ASSERT_vm_locking();
VM_ASSERT(vm->ractor.sync.lock_rec > 0);
VM_ASSERT(vm->ractor.sync.lock_rec == *lev);
vm->ractor.sync.lock_rec--;
if (vm->ractor.sync.lock_rec == 0) {
vm->ractor.sync.lock_owner = NULL;
rb_native_mutex_unlock(&vm->ractor.sync.lock);
}
}
void
rb_vm_lock_enter_body(unsigned int *lev APPEND_LOCATION_ARGS)
{
rb_vm_t *vm = GET_VM();
vm_lock_enter(vm, vm_locked(vm), lev APPEND_LOCATION_PARAMS);
}
void
rb_vm_lock_leave_body(unsigned int *lev APPEND_LOCATION_ARGS)
{
vm_lock_leave(GET_VM(), lev APPEND_LOCATION_PARAMS);
}
void
rb_vm_lock_body(LOCATION_ARGS)
{
rb_vm_t *vm = GET_VM();
ASSERT_vm_unlocking();
vm_lock_enter(vm, false, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
}
void
rb_vm_unlock_body(LOCATION_ARGS)
{
rb_vm_t *vm = GET_VM();
ASSERT_vm_locking();
VM_ASSERT(vm->ractor.sync.lock_rec == 1);
vm_lock_leave(vm, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
}
static void
vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
{
ASSERT_vm_locking();
unsigned int lock_rec = vm->ractor.sync.lock_rec;
rb_ractor_t *cr = vm->ractor.sync.lock_owner;
vm->ractor.sync.lock_rec = 0;
vm->ractor.sync.lock_owner = NULL;
if (msec > 0) {
rb_native_cond_timedwait(cond, &vm->ractor.sync.lock, msec);
}
else {
rb_native_cond_wait(cond, &vm->ractor.sync.lock);
}
vm->ractor.sync.lock_rec = lock_rec;
vm->ractor.sync.lock_owner = cr;
}
void
rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond)
{
vm_cond_wait(vm, cond, 0);
}
void
rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
{
vm_cond_wait(vm, cond, msec);
}
static bool
vm_barrier_finish_p(rb_vm_t *vm)
{
RUBY_DEBUG_LOG("cnt:%u living:%u blocking:%u",
vm->ractor.sync.barrier_cnt,
vm->ractor.cnt,
vm->ractor.blocking_cnt);
VM_ASSERT(vm->ractor.blocking_cnt <= vm->ractor.cnt);
return vm->ractor.blocking_cnt == vm->ractor.cnt;
}
void
rb_vm_barrier(void)
{
if (!rb_multi_ractor_p()) {
// no other ractors
return;
}
else {
rb_vm_t *vm = GET_VM();
VM_ASSERT(vm->ractor.sync.barrier_waiting == false);
ASSERT_vm_locking();
rb_ractor_t *cr = vm->ractor.sync.lock_owner;
VM_ASSERT(cr == GET_RACTOR());
VM_ASSERT(rb_ractor_status_p(cr, ractor_running));
vm->ractor.sync.barrier_waiting = true;
RUBY_DEBUG_LOG("barrier start. cnt:%u living:%u blocking:%u",
vm->ractor.sync.barrier_cnt,
vm->ractor.cnt,
vm->ractor.blocking_cnt);
rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
// send signal
rb_ractor_t *r;
list_for_each(&vm->ractor.set, r, vmlr_node) {
if (r != cr) {
rb_ractor_vm_barrier_interrupt_running_thread(r);
}
}
// wait
while (!vm_barrier_finish_p(vm)) {
rb_vm_cond_wait(vm, &vm->ractor.sync.barrier_cond);
}
RUBY_DEBUG_LOG("cnt:%u barrier success", vm->ractor.sync.barrier_cnt);
rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
vm->ractor.sync.barrier_waiting = false;
vm->ractor.sync.barrier_cnt++;
list_for_each(&vm->ractor.set, r, vmlr_node) {
rb_native_cond_signal(&r->barrier_wait_cond);
}
}
}

96
vm_sync.h Normal file
Просмотреть файл

@ -0,0 +1,96 @@
#ifndef RUBY_VM_SYNC_H
#define RUBY_VM_SYNC_H
#include "vm_core.h"
#include "vm_debug.h"
#if USE_RUBY_DEBUG_LOG
#define LOCATION_ARGS const char *file, int line
#define LOCATION_PARAMS file, line
#define APPEND_LOCATION_ARGS , const char *file, int line
#define APPEND_LOCATION_PARAMS , file, line
#else
#define LOCATION_ARGS void
#define LOCATION_PARAMS
#define APPEND_LOCATION_ARGS
#define APPEND_LOCATION_PARAMS
#endif
bool rb_vm_locked_p(void);
void rb_vm_lock_body(LOCATION_ARGS);
void rb_vm_unlock_body(LOCATION_ARGS);
void rb_vm_lock_enter_body(unsigned int *lev APPEND_LOCATION_ARGS);
void rb_vm_lock_leave_body(unsigned int *lev APPEND_LOCATION_ARGS);
void rb_vm_barrier(void);
void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
extern bool ruby_multi_ractor;
static inline bool
rb_multi_ractor_p(void)
{
if (LIKELY(!ruby_multi_ractor)) {
// 0 on boot time.
VM_ASSERT(GET_VM()->ractor.cnt <= 1);
return false;
}
else {
// multi-ractor mode can run ractor.cnt == 1
return true;
}
}
static inline void
rb_vm_lock(const char *file, int line)
{
if (rb_multi_ractor_p()) {
rb_vm_lock_body(LOCATION_PARAMS);
}
}
static inline void
rb_vm_unlock(const char *file, int line)
{
if (rb_multi_ractor_p()) {
rb_vm_unlock_body(LOCATION_PARAMS);
}
}
static inline void
rb_vm_lock_enter(unsigned int *lev, const char *file, int line)
{
if (rb_multi_ractor_p()) {
rb_vm_lock_enter_body(lev APPEND_LOCATION_PARAMS);
}
}
static inline void
rb_vm_lock_leave(unsigned int *lev, const char *file, int line)
{
if (rb_multi_ractor_p()) {
rb_vm_lock_leave_body(lev APPEND_LOCATION_PARAMS);
}
}
#define RB_VM_LOCKED_P() rb_vm_locked_p()
#define RB_VM_LOCK() rb_vm_lock(__FILE__, __LINE__)
#define RB_VM_UNLOCK() rb_vm_unlock(__FILE__, __LINE__)
#define RB_VM_LOCK_ENTER_LEV(levp) rb_vm_lock_enter(levp, __FILE__, __LINE__);
#define RB_VM_LOCK_LEAVE_LEV(levp) rb_vm_lock_leave(levp, __FILE__, __LINE__);
#define RB_VM_LOCK_ENTER() { unsigned int _lev; RB_VM_LOCK_ENTER_LEV(&_lev);
#define RB_VM_LOCK_LEAVE() RB_VM_LOCK_LEAVE_LEV(&_lev); }
#if VM_CHECK_MODE > 0
void ASSERT_vm_locking(void);
void ASSERT_vm_unlocking(void);
#else
#define ASSERT_vm_locking()
#define ASSERT_vm_unlocking()
#endif
#endif // RUBY_VM_SYNC_H

Просмотреть файл

@ -1653,7 +1653,8 @@ rb_workqueue_register(unsigned flags, rb_postponed_job_func_t func, void *data)
list_add_tail(&vm->workqueue, &wq_job->jnode);
rb_nativethread_lock_unlock(&vm->workqueue_lock);
RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());
// TODO: current implementation affects only main ractor
RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(rb_vm_main_ractor_ec(vm));
return TRUE;
}