2003-10-13 21:09:23 +04:00
|
|
|
/************************************************
|
|
|
|
|
|
|
|
enumerator.c - provides Enumerator class
|
|
|
|
|
|
|
|
$Author$
|
|
|
|
|
|
|
|
Copyright (C) 2001-2003 Akinori MUSHA
|
|
|
|
|
|
|
|
$Idaemons: /home/cvs/rb/enumerator/enumerator.c,v 1.1.1.1 2001/07/15 10:12:48 knu Exp $
|
|
|
|
$RoughId: enumerator.c,v 1.6 2003/07/27 11:03:24 nobu Exp $
|
|
|
|
$Id$
|
|
|
|
|
|
|
|
************************************************/
|
|
|
|
|
2020-05-08 12:31:09 +03:00
|
|
|
#include "ruby/internal/config.h"
|
2003-10-13 21:09:23 +04:00
|
|
|
|
2018-08-06 12:08:28 +03:00
|
|
|
#ifdef HAVE_FLOAT_H
|
|
|
|
#include <float.h>
|
|
|
|
#endif
|
|
|
|
|
2019-12-04 11:16:30 +03:00
|
|
|
#include "id.h"
|
|
|
|
#include "internal.h"
|
2022-12-16 07:32:13 +03:00
|
|
|
#include "internal/class.h"
|
2019-12-04 11:16:30 +03:00
|
|
|
#include "internal/enumerator.h"
|
|
|
|
#include "internal/error.h"
|
|
|
|
#include "internal/hash.h"
|
|
|
|
#include "internal/imemo.h"
|
|
|
|
#include "internal/numeric.h"
|
|
|
|
#include "internal/range.h"
|
2020-12-09 12:48:59 +03:00
|
|
|
#include "internal/rational.h"
|
2019-12-04 11:16:30 +03:00
|
|
|
#include "ruby/ruby.h"
|
|
|
|
|
2005-07-14 19:15:22 +04:00
|
|
|
/*
|
2008-08-13 10:25:53 +04:00
|
|
|
* Document-class: Enumerator
|
2005-07-14 19:15:22 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* A class which allows both internal and external iteration.
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* An Enumerator can be created by the following methods.
|
2019-08-25 00:05:19 +03:00
|
|
|
* - Object#to_enum
|
|
|
|
* - Object#enum_for
|
2009-08-19 20:36:00 +04:00
|
|
|
* - Enumerator.new
|
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* Most methods have two forms: a block form where the contents
|
|
|
|
* are evaluated for each item in the enumeration, and a non-block form
|
|
|
|
* which returns a new Enumerator wrapping the iteration.
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* enumerator = %w(one two three).each
|
|
|
|
* puts enumerator.class # => Enumerator
|
2012-02-14 01:19:11 +04:00
|
|
|
*
|
|
|
|
* enumerator.each_with_object("foo") do |item, obj|
|
2011-05-23 03:33:21 +04:00
|
|
|
* puts "#{obj}: #{item}"
|
|
|
|
* end
|
2012-02-14 01:19:11 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* # foo: one
|
|
|
|
* # foo: two
|
|
|
|
* # foo: three
|
2012-02-14 01:19:11 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* enum_with_obj = enumerator.each_with_object("foo")
|
|
|
|
* puts enum_with_obj.class # => Enumerator
|
2012-02-14 01:19:11 +04:00
|
|
|
*
|
|
|
|
* enum_with_obj.each do |item, obj|
|
|
|
|
* puts "#{obj}: #{item}"
|
2011-05-23 03:33:21 +04:00
|
|
|
* end
|
2012-02-14 01:19:11 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* # foo: one
|
|
|
|
* # foo: two
|
|
|
|
* # foo: three
|
|
|
|
*
|
|
|
|
* This allows you to chain Enumerators together. For example, you
|
|
|
|
* can map a list's elements to strings containing the index
|
|
|
|
* and the element as a string via:
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
2012-02-14 01:19:11 +04:00
|
|
|
* puts %w[foo bar baz].map.with_index { |w, i| "#{i}:#{w}" }
|
2011-05-23 03:33:21 +04:00
|
|
|
* # => ["0:foo", "1:bar", "2:baz"]
|
|
|
|
*
|
2022-12-26 17:28:37 +03:00
|
|
|
* == External Iteration
|
2022-12-21 00:10:37 +03:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* An Enumerator can also be used as an external iterator.
|
|
|
|
* For example, Enumerator#next returns the next value of the iterator
|
|
|
|
* or raises StopIteration if the Enumerator is at the end.
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
2009-08-21 19:18:57 +04:00
|
|
|
* e = [1,2,3].each # returns an enumerator object.
|
2011-05-23 03:33:21 +04:00
|
|
|
* puts e.next # => 1
|
|
|
|
* puts e.next # => 2
|
|
|
|
* puts e.next # => 3
|
|
|
|
* puts e.next # raises StopIteration
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
2023-10-25 10:32:25 +03:00
|
|
|
* +next+, +next_values+, +peek+, and +peek_values+ are the only methods
|
|
|
|
* which use external iteration (and Array#zip(Enumerable-not-Array) which uses +next+ internally).
|
2022-12-21 00:10:37 +03:00
|
|
|
*
|
|
|
|
* These methods do not affect other internal enumeration methods,
|
|
|
|
* unless the underlying iteration method itself has side-effect, e.g. IO#each_line.
|
|
|
|
*
|
2023-10-25 10:32:25 +03:00
|
|
|
* FrozenError will be raised if these methods are called against a frozen enumerator.
|
|
|
|
* Since +rewind+ and +feed+ also change state for external iteration,
|
|
|
|
* these methods may raise FrozenError too.
|
|
|
|
*
|
2022-12-21 00:10:37 +03:00
|
|
|
* External iteration differs *significantly* from internal iteration
|
|
|
|
* due to using a Fiber:
|
2022-12-26 17:28:37 +03:00
|
|
|
* - The Fiber adds some overhead compared to internal enumeration.
|
|
|
|
* - The stacktrace will only include the stack from the Enumerator, not above.
|
|
|
|
* - Fiber-local variables are *not* inherited inside the Enumerator Fiber,
|
|
|
|
* which instead starts with no Fiber-local variables.
|
|
|
|
* - Fiber storage variables *are* inherited and are designed
|
|
|
|
* to handle Enumerator Fibers. Assigning to a Fiber storage variable
|
|
|
|
* only affects the current Fiber, so if you want to change state
|
|
|
|
* in the caller Fiber of the Enumerator Fiber, you need to use an
|
|
|
|
* extra indirection (e.g., use some object in the Fiber storage
|
|
|
|
* variable and mutate some ivar of it).
|
2022-12-21 00:10:37 +03:00
|
|
|
*
|
|
|
|
* Concretely:
|
2022-12-26 17:28:37 +03:00
|
|
|
*
|
2022-12-21 00:10:37 +03:00
|
|
|
* Thread.current[:fiber_local] = 1
|
2022-12-21 01:02:25 +03:00
|
|
|
* Fiber[:storage_var] = 1
|
2022-12-21 00:10:37 +03:00
|
|
|
* e = Enumerator.new do |y|
|
|
|
|
* p Thread.current[:fiber_local] # for external iteration: nil, for internal iteration: 1
|
2022-12-21 01:02:25 +03:00
|
|
|
* p Fiber[:storage_var] # => 1, inherited
|
|
|
|
* Fiber[:storage_var] += 1
|
2022-12-21 00:10:37 +03:00
|
|
|
* y << 42
|
|
|
|
* end
|
|
|
|
*
|
|
|
|
* p e.next # => 42
|
2022-12-21 01:02:25 +03:00
|
|
|
* p Fiber[:storage_var] # => 1 (it ran in a different Fiber)
|
2022-12-21 00:10:37 +03:00
|
|
|
*
|
|
|
|
* e.each { p _1 }
|
2022-12-21 01:02:25 +03:00
|
|
|
* p Fiber[:storage_var] # => 2 (it ran in the same Fiber/"stack" as the current Fiber)
|
2020-05-06 00:02:59 +03:00
|
|
|
*
|
2022-12-26 17:28:37 +03:00
|
|
|
* == Convert External Iteration to Internal Iteration
|
2020-05-06 00:02:59 +03:00
|
|
|
*
|
2022-12-21 00:10:37 +03:00
|
|
|
* You can use an external iterator to implement an internal iterator as follows:
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
|
|
|
* def ext_each(e)
|
|
|
|
* while true
|
|
|
|
* begin
|
|
|
|
* vs = e.next_values
|
|
|
|
* rescue StopIteration
|
|
|
|
* return $!.result
|
|
|
|
* end
|
2009-08-22 07:19:53 +04:00
|
|
|
* y = yield(*vs)
|
2009-08-19 20:36:00 +04:00
|
|
|
* e.feed y
|
|
|
|
* end
|
|
|
|
* end
|
|
|
|
*
|
|
|
|
* o = Object.new
|
2011-05-23 03:33:21 +04:00
|
|
|
*
|
2009-08-19 20:36:00 +04:00
|
|
|
* def o.each
|
2011-05-23 03:33:21 +04:00
|
|
|
* puts yield
|
|
|
|
* puts yield(1)
|
|
|
|
* puts yield(1, 2)
|
2009-08-22 07:40:35 +04:00
|
|
|
* 3
|
2009-08-19 20:36:00 +04:00
|
|
|
* end
|
|
|
|
*
|
|
|
|
* # use o.each as an internal iterator directly.
|
2011-05-23 03:33:21 +04:00
|
|
|
* puts o.each {|*x| puts x; [:b, *x] }
|
|
|
|
* # => [], [:b], [1], [:b, 1], [1, 2], [:b, 1, 2], 3
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
2009-08-22 07:19:53 +04:00
|
|
|
* # convert o.each to an external iterator for
|
2009-08-19 20:36:00 +04:00
|
|
|
* # implementing an internal iterator.
|
2011-05-23 03:33:21 +04:00
|
|
|
* puts ext_each(o.to_enum) {|*x| puts x; [:b, *x] }
|
|
|
|
* # => [], [:b], [1], [:b, 1], [1, 2], [:b, 1, 2], 3
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
2005-07-14 19:15:22 +04:00
|
|
|
*/
|
2008-04-20 16:01:27 +04:00
|
|
|
VALUE rb_cEnumerator;
|
ruby/ruby.h: remove unnecessary exports from C-API
Needlessly exporting can reduce performance locally and increase
binary size.
Increasing the footprint of our C-API larger is also detrimental
to our development as it encourages tighter coupling with our
internals; making it harder for us to preserve compatibility.
If some parts of the core codebase needs access to globals,
internal.h should be used instead of anything in include/ruby/*.
"Urabe, Shyouhei" <shyouhei@ruby-lang.org> wrote:
> On Thu, Jan 18, 2018 at 7:33 PM, Eric Wong <normalperson@yhbt.net> wrote:
> > shyouhei@ruby-lang.org wrote:
> >> https://svn.ruby-lang.org/cgi-bin/viewvc.cgi?view=revision&revision=61908
> >>
> >> export rb_mFConst
> >
> > Why are we exporting all these and making the public C-API bigger?
> > If anything, we should make these static. Thanks.
>
> No concrete reason, except they have already been externed in 2.5.
> These variables had lacked declarations so far, which resulted in their
> visibility to be that of extern. The commit is just confirming the status quo.
>
> I'm not against to turn them into static.
This reverts changes from r61910, r61909, r61908, r61907, and r61906.
* transcode.c (rb_eUndefinedConversionError): make static
(rb_eInvalidByteSequenceError): ditto
(rb_eConverterNotFoundError): ditto
* process.c (rb_mProcGID, rb_mProcUid, rb_mProcID_Syscall): ditto
* file.c (rb_mFConst): ditto
* error.c (rb_mWarning, rb_cWarningBuffer): ditto
* enumerator.c (rb_cLazy): ditto
[Misc #14381]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62029 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-01-25 00:07:14 +03:00
|
|
|
static VALUE rb_cLazy;
|
2022-07-29 07:56:54 +03:00
|
|
|
static ID id_rewind, id_new, id_to_enum, id_each_entry;
|
2018-08-16 03:13:47 +03:00
|
|
|
static ID id_next, id_result, id_receiver, id_arguments, id_memo, id_method, id_force;
|
2019-08-01 10:40:03 +03:00
|
|
|
static ID id_begin, id_end, id_step, id_exclude_end;
|
2019-05-15 06:41:31 +03:00
|
|
|
static VALUE sym_each, sym_cycle, sym_yield;
|
2005-07-11 18:50:42 +04:00
|
|
|
|
Fix Enumerator::Lazy#{to_enum,enum_for} where method is defined in Lazy
Previously, passing to_enum/enum_for a method that was defined in
Lazy itself returned wrong results:
[1,2,3].to_enum(:map).to_a
# => [1, 2, 3]
[1,2,3].lazy.to_enum(:map).to_a
# => []
I'm not sure why methods that are designed to be lazy do not work
with to_enum/enum_for. However, one possible way to work around
this bug is to have to_enum/enum_for use the implementation found
in Enumerable/Enumerator, which is what this commit does.
While this commit works around the problem, it is a band-aid, not a
real fix. It doesn't handle aliases of Enumerable::Lazy methods,
for instance. A better fix would be appreciated.
2019-09-02 23:22:26 +03:00
|
|
|
static VALUE lazy_use_super_method;
|
|
|
|
|
2022-07-29 16:09:54 +03:00
|
|
|
extern ID ruby_static_id_cause;
|
|
|
|
|
2018-01-09 05:45:03 +03:00
|
|
|
#define id_call idCall
|
2022-07-29 16:09:54 +03:00
|
|
|
#define id_cause ruby_static_id_cause
|
2018-01-09 05:45:03 +03:00
|
|
|
#define id_each idEach
|
|
|
|
#define id_eqq idEqq
|
|
|
|
#define id_initialize idInitialize
|
|
|
|
#define id_size idSize
|
|
|
|
|
2007-08-08 11:07:03 +04:00
|
|
|
VALUE rb_eStopIteration;
|
|
|
|
|
2005-07-11 18:50:42 +04:00
|
|
|
struct enumerator {
|
2008-04-20 15:58:44 +04:00
|
|
|
VALUE obj;
|
2008-04-21 11:07:39 +04:00
|
|
|
ID meth;
|
2005-07-11 18:50:42 +04:00
|
|
|
VALUE args;
|
2007-08-06 20:41:17 +04:00
|
|
|
VALUE fib;
|
2007-08-06 20:53:36 +04:00
|
|
|
VALUE dst;
|
2009-08-18 16:02:53 +04:00
|
|
|
VALUE lookahead;
|
2009-08-19 20:36:00 +04:00
|
|
|
VALUE feedvalue;
|
|
|
|
VALUE stop_exc;
|
2012-11-06 21:10:06 +04:00
|
|
|
VALUE size;
|
2016-09-19 04:36:56 +03:00
|
|
|
VALUE procs;
|
2013-06-26 17:43:22 +04:00
|
|
|
rb_enumerator_size_func *size_fn;
|
2019-09-06 23:33:19 +03:00
|
|
|
int kw_splat;
|
2005-07-11 18:50:42 +04:00
|
|
|
};
|
|
|
|
|
2023-11-26 13:09:16 +03:00
|
|
|
RUBY_REFERENCES(enumerator_refs) = {
|
2023-11-26 10:04:27 +03:00
|
|
|
RUBY_REF_EDGE(struct enumerator, obj),
|
|
|
|
RUBY_REF_EDGE(struct enumerator, args),
|
|
|
|
RUBY_REF_EDGE(struct enumerator, fib),
|
|
|
|
RUBY_REF_EDGE(struct enumerator, dst),
|
|
|
|
RUBY_REF_EDGE(struct enumerator, lookahead),
|
|
|
|
RUBY_REF_EDGE(struct enumerator, feedvalue),
|
|
|
|
RUBY_REF_EDGE(struct enumerator, stop_exc),
|
|
|
|
RUBY_REF_EDGE(struct enumerator, size),
|
|
|
|
RUBY_REF_EDGE(struct enumerator, procs),
|
2023-11-26 13:09:16 +03:00
|
|
|
RUBY_REF_END
|
|
|
|
};
|
2023-03-17 01:26:34 +03:00
|
|
|
|
2019-08-29 14:05:10 +03:00
|
|
|
static VALUE rb_cGenerator, rb_cYielder, rb_cEnumProducer;
|
2008-08-26 09:42:12 +04:00
|
|
|
|
|
|
|
struct generator {
|
|
|
|
VALUE proc;
|
2016-09-19 04:36:56 +03:00
|
|
|
VALUE obj;
|
2008-08-26 09:42:12 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct yielder {
|
|
|
|
VALUE proc;
|
|
|
|
};
|
|
|
|
|
2019-08-29 14:05:10 +03:00
|
|
|
struct producer {
|
|
|
|
VALUE init;
|
|
|
|
VALUE proc;
|
|
|
|
};
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
typedef struct MEMO *lazyenum_proc_func(VALUE, struct MEMO *, VALUE, long);
|
|
|
|
typedef VALUE lazyenum_size_func(VALUE, VALUE);
|
2022-08-25 07:42:35 +03:00
|
|
|
typedef int lazyenum_precheck_func(VALUE proc_entry);
|
2016-09-19 04:36:56 +03:00
|
|
|
typedef struct {
|
|
|
|
lazyenum_proc_func *proc;
|
|
|
|
lazyenum_size_func *size;
|
2022-08-25 07:42:35 +03:00
|
|
|
lazyenum_precheck_func *precheck;
|
2016-09-19 04:36:56 +03:00
|
|
|
} lazyenum_funcs;
|
|
|
|
|
|
|
|
struct proc_entry {
|
|
|
|
VALUE proc;
|
|
|
|
VALUE memo;
|
|
|
|
const lazyenum_funcs *fn;
|
|
|
|
};
|
|
|
|
|
2008-08-26 09:42:12 +04:00
|
|
|
static VALUE generator_allocate(VALUE klass);
|
|
|
|
static VALUE generator_init(VALUE obj, VALUE proc);
|
|
|
|
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
static VALUE rb_cEnumChain;
|
|
|
|
|
|
|
|
struct enum_chain {
|
|
|
|
VALUE enums;
|
|
|
|
long pos;
|
|
|
|
};
|
|
|
|
|
2022-07-29 07:56:54 +03:00
|
|
|
static VALUE rb_cEnumProduct;
|
|
|
|
|
|
|
|
struct enum_product {
|
|
|
|
VALUE enums;
|
|
|
|
};
|
|
|
|
|
2018-12-21 16:05:16 +03:00
|
|
|
VALUE rb_cArithSeq;
|
2018-08-06 12:08:28 +03:00
|
|
|
|
2009-09-09 06:46:00 +04:00
|
|
|
static const rb_data_type_t enumerator_data_type = {
|
|
|
|
"enumerator",
|
2010-07-18 11:31:54 +04:00
|
|
|
{
|
2023-11-26 10:04:27 +03:00
|
|
|
RUBY_REFS_LIST_PTR(enumerator_refs),
|
2023-11-20 19:28:36 +03:00
|
|
|
RUBY_TYPED_DEFAULT_FREE,
|
|
|
|
NULL, // Nothing allocated externally, so don't need a memsize function
|
2023-03-17 01:26:34 +03:00
|
|
|
NULL,
|
2010-07-18 11:31:54 +04:00
|
|
|
},
|
2023-11-22 18:00:01 +03:00
|
|
|
0, NULL, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_DECL_MARKING | RUBY_TYPED_EMBEDDABLE
|
2009-09-09 06:46:00 +04:00
|
|
|
};
|
|
|
|
|
2005-07-11 18:50:42 +04:00
|
|
|
static struct enumerator *
|
* array.c: moved to ANSI function style from K&R function style.
(used protoize on windows, so still K&R remains on #ifdef part of
other platforms. And `foo _((boo))' stuff is still there)
[ruby-dev:26975]
* bignum.c, class.c, compar.c, dir.c, dln.c, dmyext.c, enum.c,
enumerator.c, error.c, eval.c, file.c, gc.c, hash.c, inits.c,
io.c, main.c, marshal.c, math.c, numeric.c, object.c, pack.c,
prec.c, process.c, random.c, range.c, re.c, regcomp.c, regenc.c,
regerror.c, regexec.c, regparse.c, regparse.h, ruby.c, signal.c,
sprintf.c, st.c, string.c, struct.c, time.c, util.h, variable.c,
version.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@9126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2005-09-12 14:44:21 +04:00
|
|
|
enumerator_ptr(VALUE obj)
|
2005-07-11 18:50:42 +04:00
|
|
|
{
|
|
|
|
struct enumerator *ptr;
|
|
|
|
|
2009-09-09 06:46:00 +04:00
|
|
|
TypedData_Get_Struct(obj, struct enumerator, &enumerator_data_type, ptr);
|
2022-11-15 07:24:08 +03:00
|
|
|
if (!ptr || UNDEF_P(ptr->obj)) {
|
2005-07-11 18:50:42 +04:00
|
|
|
rb_raise(rb_eArgError, "uninitialized enumerator");
|
|
|
|
}
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static void
|
|
|
|
proc_entry_mark(void *p)
|
|
|
|
{
|
|
|
|
struct proc_entry *ptr = p;
|
2019-08-13 00:00:34 +03:00
|
|
|
rb_gc_mark_movable(ptr->proc);
|
|
|
|
rb_gc_mark_movable(ptr->memo);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
proc_entry_compact(void *p)
|
|
|
|
{
|
|
|
|
struct proc_entry *ptr = p;
|
|
|
|
ptr->proc = rb_gc_location(ptr->proc);
|
|
|
|
ptr->memo = rb_gc_location(ptr->memo);
|
2016-09-19 04:36:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static const rb_data_type_t proc_entry_data_type = {
|
|
|
|
"proc_entry",
|
|
|
|
{
|
|
|
|
proc_entry_mark,
|
2023-11-22 21:29:54 +03:00
|
|
|
RUBY_TYPED_DEFAULT_FREE,
|
|
|
|
NULL, // Nothing allocated externally, so don't need a memsize function
|
2019-08-13 17:00:56 +03:00
|
|
|
proc_entry_compact,
|
2016-09-19 04:36:56 +03:00
|
|
|
},
|
2023-11-22 21:29:54 +03:00
|
|
|
0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
|
2016-09-19 04:36:56 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct proc_entry *
|
|
|
|
proc_entry_ptr(VALUE proc_entry)
|
|
|
|
{
|
|
|
|
struct proc_entry *ptr;
|
|
|
|
|
|
|
|
TypedData_Get_Struct(proc_entry, struct proc_entry, &proc_entry_data_type, ptr);
|
|
|
|
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2005-07-14 19:15:22 +04:00
|
|
|
/*
|
2011-05-23 03:33:21 +04:00
|
|
|
* call-seq:
|
2013-02-05 07:49:22 +04:00
|
|
|
* obj.to_enum(method = :each, *args) -> enum
|
|
|
|
* obj.enum_for(method = :each, *args) -> enum
|
|
|
|
* obj.to_enum(method = :each, *args) {|*args| block} -> enum
|
|
|
|
* obj.enum_for(method = :each, *args){|*args| block} -> enum
|
2011-05-23 03:33:21 +04:00
|
|
|
*
|
2013-02-05 07:49:22 +04:00
|
|
|
* Creates a new Enumerator which will enumerate by calling +method+ on
|
2019-10-24 19:35:36 +03:00
|
|
|
* +obj+, passing +args+ if any. What was _yielded_ by method becomes
|
|
|
|
* values of enumerator.
|
2005-07-14 19:15:22 +04:00
|
|
|
*
|
2013-02-05 07:49:22 +04:00
|
|
|
* If a block is given, it will be used to calculate the size of
|
|
|
|
* the enumerator without the need to iterate it (see Enumerator#size).
|
2005-07-14 19:15:22 +04:00
|
|
|
*
|
2013-02-05 07:49:22 +04:00
|
|
|
* === Examples
|
2008-03-09 04:04:46 +03:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* str = "xyz"
|
2005-07-14 19:15:22 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* enum = str.enum_for(:each_byte)
|
|
|
|
* enum.each { |b| puts b }
|
|
|
|
* # => 120
|
|
|
|
* # => 121
|
|
|
|
* # => 122
|
2005-07-14 19:15:22 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* # protect an array from being modified by some_method
|
|
|
|
* a = [1, 2, 3]
|
|
|
|
* some_method(a.to_enum)
|
2005-07-14 19:15:22 +04:00
|
|
|
*
|
2019-10-24 19:35:36 +03:00
|
|
|
* # String#split in block form is more memory-effective:
|
2019-12-22 23:56:44 +03:00
|
|
|
* very_large_string.split("|") { |chunk| return chunk if chunk.include?('DATE') }
|
2019-10-24 19:35:36 +03:00
|
|
|
* # This could be rewritten more idiomatically with to_enum:
|
|
|
|
* very_large_string.to_enum(:split, "|").lazy.grep(/DATE/).first
|
|
|
|
*
|
2013-02-05 07:49:22 +04:00
|
|
|
* It is typical to call to_enum when defining methods for
|
|
|
|
* a generic Enumerable, in case no block is passed.
|
|
|
|
*
|
|
|
|
* Here is such an example, with parameter passing and a sizing block:
|
|
|
|
*
|
|
|
|
* module Enumerable
|
|
|
|
* # a generic method to repeat the values of any enumerable
|
|
|
|
* def repeat(n)
|
|
|
|
* raise ArgumentError, "#{n} is negative!" if n < 0
|
|
|
|
* unless block_given?
|
|
|
|
* return to_enum(__method__, n) do # __method__ is :repeat here
|
|
|
|
* sz = size # Call size and multiply by n...
|
|
|
|
* sz * n if sz # but return nil if size itself is nil
|
|
|
|
* end
|
|
|
|
* end
|
|
|
|
* each do |*val|
|
|
|
|
* n.times { yield *val }
|
|
|
|
* end
|
|
|
|
* end
|
|
|
|
* end
|
|
|
|
*
|
|
|
|
* %i[hello world].repeat(2) { |w| puts w }
|
|
|
|
* # => Prints 'hello', 'hello', 'world', 'world'
|
|
|
|
* enum = (1..14).repeat(3)
|
|
|
|
* # => returns an Enumerator when called without a block
|
|
|
|
* enum.first(4) # => [1, 1, 1, 2]
|
|
|
|
* enum.size # => 42
|
2005-07-14 19:15:22 +04:00
|
|
|
*/
|
2003-10-13 21:09:23 +04:00
|
|
|
static VALUE
|
* array.c: moved to ANSI function style from K&R function style.
(used protoize on windows, so still K&R remains on #ifdef part of
other platforms. And `foo _((boo))' stuff is still there)
[ruby-dev:26975]
* bignum.c, class.c, compar.c, dir.c, dln.c, dmyext.c, enum.c,
enumerator.c, error.c, eval.c, file.c, gc.c, hash.c, inits.c,
io.c, main.c, marshal.c, math.c, numeric.c, object.c, pack.c,
prec.c, process.c, random.c, range.c, re.c, regcomp.c, regenc.c,
regerror.c, regexec.c, regparse.c, regparse.h, ruby.c, signal.c,
sprintf.c, st.c, string.c, struct.c, time.c, util.h, variable.c,
version.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@9126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2005-09-12 14:44:21 +04:00
|
|
|
obj_to_enum(int argc, VALUE *argv, VALUE obj)
|
2003-10-13 21:09:23 +04:00
|
|
|
{
|
2019-05-15 06:41:31 +03:00
|
|
|
VALUE enumerator, meth = sym_each;
|
2003-10-13 21:09:23 +04:00
|
|
|
|
2005-07-14 19:15:22 +04:00
|
|
|
if (argc > 0) {
|
|
|
|
--argc;
|
|
|
|
meth = *argv++;
|
|
|
|
}
|
2012-11-07 23:46:16 +04:00
|
|
|
enumerator = rb_enumeratorize_with_size(obj, meth, argc, argv, 0);
|
2012-11-06 21:10:20 +04:00
|
|
|
if (rb_block_given_p()) {
|
2023-11-22 18:00:01 +03:00
|
|
|
RB_OBJ_WRITE(enumerator, &enumerator_ptr(enumerator)->size, rb_block_proc());
|
2012-11-06 21:10:20 +04:00
|
|
|
}
|
|
|
|
return enumerator;
|
2003-10-13 21:09:23 +04:00
|
|
|
}
|
|
|
|
|
2005-07-11 18:50:42 +04:00
|
|
|
static VALUE
|
* array.c: moved to ANSI function style from K&R function style.
(used protoize on windows, so still K&R remains on #ifdef part of
other platforms. And `foo _((boo))' stuff is still there)
[ruby-dev:26975]
* bignum.c, class.c, compar.c, dir.c, dln.c, dmyext.c, enum.c,
enumerator.c, error.c, eval.c, file.c, gc.c, hash.c, inits.c,
io.c, main.c, marshal.c, math.c, numeric.c, object.c, pack.c,
prec.c, process.c, random.c, range.c, re.c, regcomp.c, regenc.c,
regerror.c, regexec.c, regparse.c, regparse.h, ruby.c, signal.c,
sprintf.c, st.c, string.c, struct.c, time.c, util.h, variable.c,
version.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@9126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2005-09-12 14:44:21 +04:00
|
|
|
enumerator_allocate(VALUE klass)
|
2005-07-11 18:50:42 +04:00
|
|
|
{
|
|
|
|
struct enumerator *ptr;
|
2008-06-03 07:58:30 +04:00
|
|
|
VALUE enum_obj;
|
|
|
|
|
2009-09-09 06:46:00 +04:00
|
|
|
enum_obj = TypedData_Make_Struct(klass, struct enumerator, &enumerator_data_type, ptr);
|
2008-06-03 07:58:30 +04:00
|
|
|
ptr->obj = Qundef;
|
|
|
|
|
|
|
|
return enum_obj;
|
2003-10-13 21:09:23 +04:00
|
|
|
}
|
|
|
|
|
2005-10-18 21:35:18 +04:00
|
|
|
static VALUE
|
2019-09-06 23:33:19 +03:00
|
|
|
enumerator_init(VALUE enum_obj, VALUE obj, VALUE meth, int argc, const VALUE *argv, rb_enumerator_size_func *size_fn, VALUE size, int kw_splat)
|
2003-10-13 21:09:23 +04:00
|
|
|
{
|
2008-06-03 07:58:30 +04:00
|
|
|
struct enumerator *ptr;
|
|
|
|
|
2013-07-29 16:06:39 +04:00
|
|
|
rb_check_frozen(enum_obj);
|
2009-09-09 06:46:00 +04:00
|
|
|
TypedData_Get_Struct(enum_obj, struct enumerator, &enumerator_data_type, ptr);
|
2008-06-03 07:58:30 +04:00
|
|
|
|
|
|
|
if (!ptr) {
|
|
|
|
rb_raise(rb_eArgError, "unallocated enumerator");
|
|
|
|
}
|
2003-10-13 21:09:23 +04:00
|
|
|
|
2023-11-22 18:00:01 +03:00
|
|
|
RB_OBJ_WRITE(enum_obj, &ptr->obj, obj);
|
2008-04-21 11:07:39 +04:00
|
|
|
ptr->meth = rb_to_id(meth);
|
2023-11-22 18:00:01 +03:00
|
|
|
if (argc) RB_OBJ_WRITE(enum_obj, &ptr->args, rb_ary_new4(argc, argv));
|
2007-08-06 20:41:17 +04:00
|
|
|
ptr->fib = 0;
|
2007-08-24 14:48:43 +04:00
|
|
|
ptr->dst = Qnil;
|
2009-08-18 16:02:53 +04:00
|
|
|
ptr->lookahead = Qundef;
|
2009-08-19 20:36:00 +04:00
|
|
|
ptr->feedvalue = Qundef;
|
|
|
|
ptr->stop_exc = Qfalse;
|
2023-11-22 18:00:01 +03:00
|
|
|
RB_OBJ_WRITE(enum_obj, &ptr->size, size);
|
2012-11-06 21:10:06 +04:00
|
|
|
ptr->size_fn = size_fn;
|
2019-09-06 23:33:19 +03:00
|
|
|
ptr->kw_splat = kw_splat;
|
2003-10-13 21:09:23 +04:00
|
|
|
|
2005-07-14 19:15:22 +04:00
|
|
|
return enum_obj;
|
|
|
|
}
|
|
|
|
|
2020-08-13 05:44:15 +03:00
|
|
|
static VALUE
|
|
|
|
convert_to_feasible_size_value(VALUE obj)
|
|
|
|
{
|
|
|
|
if (NIL_P(obj)) {
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
else if (rb_respond_to(obj, id_call)) {
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
else if (RB_FLOAT_TYPE_P(obj) && RFLOAT_VALUE(obj) == HUGE_VAL) {
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return rb_to_int(obj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-07-14 19:15:22 +04:00
|
|
|
/*
|
2011-05-23 03:33:21 +04:00
|
|
|
* call-seq:
|
2012-11-06 21:10:06 +04:00
|
|
|
* Enumerator.new(size = nil) { |yielder| ... }
|
2005-07-14 19:15:22 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* Creates a new Enumerator object, which can be used as an
|
|
|
|
* Enumerable.
|
2008-08-26 09:42:12 +04:00
|
|
|
*
|
2020-08-13 05:44:15 +03:00
|
|
|
* Iteration is defined by the given block, in
|
2011-05-23 03:33:21 +04:00
|
|
|
* which a "yielder" object, given as block parameter, can be used to
|
2019-12-24 11:03:42 +03:00
|
|
|
* yield a value by calling the +yield+ method (aliased as <code><<</code>):
|
2008-08-26 09:42:12 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* fib = Enumerator.new do |y|
|
|
|
|
* a = b = 1
|
|
|
|
* loop do
|
|
|
|
* y << a
|
|
|
|
* a, b = b, a + b
|
|
|
|
* end
|
|
|
|
* end
|
2005-07-14 19:15:22 +04:00
|
|
|
*
|
2019-12-24 11:03:42 +03:00
|
|
|
* fib.take(10) # => [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
|
2008-08-26 09:42:12 +04:00
|
|
|
*
|
2012-11-06 21:10:06 +04:00
|
|
|
* The optional parameter can be used to specify how to calculate the size
|
2013-02-23 21:38:12 +04:00
|
|
|
* in a lazy fashion (see Enumerator#size). It can either be a value or
|
2012-11-06 21:10:06 +04:00
|
|
|
* a callable object.
|
2005-07-14 19:15:22 +04:00
|
|
|
*/
|
|
|
|
static VALUE
|
* array.c: moved to ANSI function style from K&R function style.
(used protoize on windows, so still K&R remains on #ifdef part of
other platforms. And `foo _((boo))' stuff is still there)
[ruby-dev:26975]
* bignum.c, class.c, compar.c, dir.c, dln.c, dmyext.c, enum.c,
enumerator.c, error.c, eval.c, file.c, gc.c, hash.c, inits.c,
io.c, main.c, marshal.c, math.c, numeric.c, object.c, pack.c,
prec.c, process.c, random.c, range.c, re.c, regcomp.c, regenc.c,
regerror.c, regexec.c, regparse.c, regparse.h, ruby.c, signal.c,
sprintf.c, st.c, string.c, struct.c, time.c, util.h, variable.c,
version.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@9126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2005-09-12 14:44:21 +04:00
|
|
|
enumerator_initialize(int argc, VALUE *argv, VALUE obj)
|
2005-07-14 19:15:22 +04:00
|
|
|
{
|
2020-08-13 05:44:15 +03:00
|
|
|
VALUE iter = rb_block_proc();
|
|
|
|
VALUE recv = generator_init(generator_allocate(rb_cGenerator), iter);
|
|
|
|
VALUE arg0 = rb_check_arity(argc, 0, 1) ? argv[0] : Qnil;
|
|
|
|
VALUE size = convert_to_feasible_size_value(arg0);
|
2008-08-26 09:42:12 +04:00
|
|
|
|
2020-08-13 05:44:15 +03:00
|
|
|
return enumerator_init(obj, recv, sym_each, 0, 0, 0, size, false);
|
2005-07-14 19:15:22 +04:00
|
|
|
}
|
|
|
|
|
2007-01-26 01:52:38 +03:00
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
|
|
|
enumerator_init_copy(VALUE obj, VALUE orig)
|
|
|
|
{
|
2007-12-21 08:59:20 +03:00
|
|
|
struct enumerator *ptr0, *ptr1;
|
|
|
|
|
2012-06-05 15:13:18 +04:00
|
|
|
if (!OBJ_INIT_COPY(obj, orig)) return obj;
|
2007-12-21 08:59:20 +03:00
|
|
|
ptr0 = enumerator_ptr(orig);
|
2007-12-21 09:18:25 +03:00
|
|
|
if (ptr0->fib) {
|
2007-12-21 08:59:20 +03:00
|
|
|
/* Fibers cannot be copied */
|
|
|
|
rb_raise(rb_eTypeError, "can't copy execution context");
|
|
|
|
}
|
2008-06-03 14:34:45 +04:00
|
|
|
|
2009-09-09 06:46:00 +04:00
|
|
|
TypedData_Get_Struct(obj, struct enumerator, &enumerator_data_type, ptr1);
|
2008-06-03 14:34:45 +04:00
|
|
|
|
|
|
|
if (!ptr1) {
|
|
|
|
rb_raise(rb_eArgError, "unallocated enumerator");
|
|
|
|
}
|
2007-01-26 01:52:38 +03:00
|
|
|
|
2023-11-22 18:00:01 +03:00
|
|
|
RB_OBJ_WRITE(obj, &ptr1->obj, ptr0->obj);
|
2024-09-23 21:58:55 +03:00
|
|
|
ptr1->meth = ptr0->meth;
|
2023-11-22 18:00:01 +03:00
|
|
|
RB_OBJ_WRITE(obj, &ptr1->args, ptr0->args);
|
2007-12-21 08:59:20 +03:00
|
|
|
ptr1->fib = 0;
|
2009-08-18 16:02:53 +04:00
|
|
|
ptr1->lookahead = Qundef;
|
2009-08-19 20:36:00 +04:00
|
|
|
ptr1->feedvalue = Qundef;
|
2023-11-22 18:00:01 +03:00
|
|
|
RB_OBJ_WRITE(obj, &ptr1->size, ptr0->size);
|
2012-11-06 21:10:06 +04:00
|
|
|
ptr1->size_fn = ptr0->size_fn;
|
2007-01-26 01:52:38 +03:00
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
2012-11-07 23:46:16 +04:00
|
|
|
/*
|
|
|
|
* For backwards compatibility; use rb_enumeratorize_with_size
|
|
|
|
*/
|
|
|
|
VALUE
|
2014-06-18 10:16:39 +04:00
|
|
|
rb_enumeratorize(VALUE obj, VALUE meth, int argc, const VALUE *argv)
|
2012-11-07 23:46:16 +04:00
|
|
|
{
|
|
|
|
return rb_enumeratorize_with_size(obj, meth, argc, argv, 0);
|
|
|
|
}
|
|
|
|
|
2022-08-25 07:42:35 +03:00
|
|
|
static VALUE lazy_to_enum_i(VALUE self, VALUE meth, int argc, const VALUE *argv, rb_enumerator_size_func *size_fn, int kw_splat);
|
|
|
|
static int lazy_precheck(VALUE procs);
|
2013-02-05 07:49:41 +04:00
|
|
|
|
2005-07-14 19:15:22 +04:00
|
|
|
VALUE
|
2020-11-25 01:05:22 +03:00
|
|
|
rb_enumeratorize_with_size_kw(VALUE obj, VALUE meth, int argc, const VALUE *argv, rb_enumerator_size_func *size_fn, int kw_splat)
|
2005-07-14 19:15:22 +04:00
|
|
|
{
|
2020-11-25 01:05:22 +03:00
|
|
|
VALUE base_class = rb_cEnumerator;
|
|
|
|
|
|
|
|
if (RTEST(rb_obj_is_kind_of(obj, rb_cLazy))) {
|
|
|
|
base_class = rb_cLazy;
|
|
|
|
}
|
|
|
|
else if (RTEST(rb_obj_is_kind_of(obj, rb_cEnumChain))) {
|
|
|
|
obj = enumerator_init(enumerator_allocate(rb_cEnumerator), obj, sym_each, 0, 0, 0, Qnil, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
return enumerator_init(enumerator_allocate(base_class),
|
|
|
|
obj, meth, argc, argv, size_fn, Qnil, kw_splat);
|
2003-10-13 21:09:23 +04:00
|
|
|
}
|
|
|
|
|
2019-09-30 07:33:59 +03:00
|
|
|
VALUE
|
2020-11-25 01:05:22 +03:00
|
|
|
rb_enumeratorize_with_size(VALUE obj, VALUE meth, int argc, const VALUE *argv, rb_enumerator_size_func *size_fn)
|
2019-09-30 07:33:59 +03:00
|
|
|
{
|
2020-11-25 01:05:22 +03:00
|
|
|
return rb_enumeratorize_with_size_kw(obj, meth, argc, argv, size_fn, rb_keyword_given_p());
|
2019-09-30 07:33:59 +03:00
|
|
|
}
|
|
|
|
|
2009-09-29 19:37:28 +04:00
|
|
|
static VALUE
|
|
|
|
enumerator_block_call(VALUE obj, rb_block_call_func *func, VALUE arg)
|
|
|
|
{
|
|
|
|
int argc = 0;
|
2014-03-17 08:20:16 +04:00
|
|
|
const VALUE *argv = 0;
|
2009-09-29 19:37:28 +04:00
|
|
|
const struct enumerator *e = enumerator_ptr(obj);
|
|
|
|
ID meth = e->meth;
|
|
|
|
|
2023-12-24 20:10:10 +03:00
|
|
|
VALUE args = e->args;
|
|
|
|
if (args) {
|
|
|
|
argc = RARRAY_LENINT(args);
|
|
|
|
argv = RARRAY_CONST_PTR(args);
|
2009-09-29 19:37:28 +04:00
|
|
|
}
|
2023-12-24 20:10:10 +03:00
|
|
|
|
|
|
|
VALUE ret = rb_block_call_kw(e->obj, meth, argc, argv, func, arg, e->kw_splat);
|
|
|
|
|
|
|
|
RB_GC_GUARD(args);
|
|
|
|
|
|
|
|
return ret;
|
2009-09-29 19:37:28 +04:00
|
|
|
}
|
|
|
|
|
2005-07-14 19:15:22 +04:00
|
|
|
/*
|
2011-05-23 03:33:21 +04:00
|
|
|
* call-seq:
|
2013-09-20 19:51:17 +04:00
|
|
|
* enum.each { |elm| block } -> obj
|
|
|
|
* enum.each -> enum
|
|
|
|
* enum.each(*appending_args) { |elm| block } -> obj
|
|
|
|
* enum.each(*appending_args) -> an_enumerator
|
2005-07-14 19:15:22 +04:00
|
|
|
*
|
2013-09-20 19:51:17 +04:00
|
|
|
* Iterates over the block according to how this Enumerator was constructed.
|
|
|
|
* If no block and no arguments are given, returns self.
|
|
|
|
*
|
|
|
|
* === Examples
|
|
|
|
*
|
|
|
|
* "Hello, world!".scan(/\w+/) #=> ["Hello", "world"]
|
|
|
|
* "Hello, world!".to_enum(:scan, /\w+/).to_a #=> ["Hello", "world"]
|
|
|
|
* "Hello, world!".to_enum(:scan).each(/\w+/).to_a #=> ["Hello", "world"]
|
|
|
|
*
|
|
|
|
* obj = Object.new
|
|
|
|
*
|
|
|
|
* def obj.each_arg(a, b=:b, *rest)
|
|
|
|
* yield a
|
|
|
|
* yield b
|
|
|
|
* yield rest
|
|
|
|
* :method_returned
|
|
|
|
* end
|
|
|
|
*
|
|
|
|
* enum = obj.to_enum :each_arg, :a, :x
|
|
|
|
*
|
|
|
|
* enum.each.to_a #=> [:a, :x, []]
|
|
|
|
* enum.each.equal?(enum) #=> true
|
|
|
|
* enum.each { |elm| elm } #=> :method_returned
|
|
|
|
*
|
|
|
|
* enum.each(:y, :z).to_a #=> [:a, :x, [:y, :z]]
|
|
|
|
* enum.each(:y, :z).equal?(enum) #=> false
|
|
|
|
* enum.each(:y, :z) { |elm| elm } #=> :method_returned
|
2005-07-14 19:15:22 +04:00
|
|
|
*
|
|
|
|
*/
|
2003-10-13 21:09:23 +04:00
|
|
|
static VALUE
|
2012-03-08 19:26:01 +04:00
|
|
|
enumerator_each(int argc, VALUE *argv, VALUE obj)
|
2003-10-13 21:09:23 +04:00
|
|
|
{
|
2022-08-25 07:42:35 +03:00
|
|
|
struct enumerator *e = enumerator_ptr(obj);
|
|
|
|
|
2012-03-08 19:26:01 +04:00
|
|
|
if (argc > 0) {
|
2022-08-25 07:42:35 +03:00
|
|
|
VALUE args = (e = enumerator_ptr(obj = rb_obj_dup(obj)))->args;
|
2012-03-08 19:26:01 +04:00
|
|
|
if (args) {
|
2013-08-27 11:51:27 +04:00
|
|
|
#if SIZEOF_INT < SIZEOF_LONG
|
2013-08-27 11:56:17 +04:00
|
|
|
/* check int range overflow */
|
2013-08-27 11:51:27 +04:00
|
|
|
rb_long2int(RARRAY_LEN(args) + argc);
|
|
|
|
#endif
|
2012-03-08 19:26:01 +04:00
|
|
|
args = rb_ary_dup(args);
|
|
|
|
rb_ary_cat(args, argv, argc);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
args = rb_ary_new4(argc, argv);
|
|
|
|
}
|
2023-11-22 18:00:01 +03:00
|
|
|
RB_OBJ_WRITE(obj, &e->args, args);
|
2018-10-22 14:23:57 +03:00
|
|
|
e->size = Qnil;
|
|
|
|
e->size_fn = 0;
|
2012-03-08 19:26:01 +04:00
|
|
|
}
|
2006-02-14 15:36:11 +03:00
|
|
|
if (!rb_block_given_p()) return obj;
|
2022-08-25 07:42:35 +03:00
|
|
|
|
|
|
|
if (!lazy_precheck(e->procs)) return Qnil;
|
|
|
|
|
2009-12-20 15:31:26 +03:00
|
|
|
return enumerator_block_call(obj, 0, obj);
|
2005-07-11 18:50:42 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2013-11-29 11:59:14 +04:00
|
|
|
enumerator_with_index_i(RB_BLOCK_CALL_FUNC_ARGLIST(val, m))
|
2005-07-11 18:50:42 +04:00
|
|
|
{
|
2015-03-11 03:20:45 +03:00
|
|
|
struct MEMO *memo = (struct MEMO *)m;
|
|
|
|
VALUE idx = memo->v1;
|
2015-03-12 02:13:01 +03:00
|
|
|
MEMO_V1_SET(memo, rb_int_succ(idx));
|
2009-03-17 12:07:18 +03:00
|
|
|
|
|
|
|
if (argc <= 1)
|
|
|
|
return rb_yield_values(2, val, idx);
|
|
|
|
|
|
|
|
return rb_yield_values(2, rb_ary_new4(argc, argv), idx);
|
2003-10-13 21:09:23 +04:00
|
|
|
}
|
|
|
|
|
2012-11-06 21:10:35 +04:00
|
|
|
static VALUE
|
|
|
|
enumerator_size(VALUE obj);
|
|
|
|
|
2013-06-26 17:43:22 +04:00
|
|
|
static VALUE
|
|
|
|
enumerator_enum_size(VALUE obj, VALUE args, VALUE eobj)
|
|
|
|
{
|
|
|
|
return enumerator_size(obj);
|
|
|
|
}
|
|
|
|
|
2005-07-14 19:15:22 +04:00
|
|
|
/*
|
2011-05-23 03:33:21 +04:00
|
|
|
* call-seq:
|
|
|
|
* e.with_index(offset = 0) {|(*args), idx| ... }
|
|
|
|
* e.with_index(offset = 0)
|
|
|
|
*
|
|
|
|
* Iterates the given block for each element with an index, which
|
|
|
|
* starts from +offset+. If no block is given, returns a new Enumerator
|
|
|
|
* that includes the index, starting from +offset+
|
2005-07-14 19:15:22 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* +offset+:: the starting index to use
|
2005-07-14 19:15:22 +04:00
|
|
|
*
|
|
|
|
*/
|
2005-07-11 18:50:42 +04:00
|
|
|
static VALUE
|
2009-02-08 17:42:01 +03:00
|
|
|
enumerator_with_index(int argc, VALUE *argv, VALUE obj)
|
2005-07-11 18:50:42 +04:00
|
|
|
{
|
2009-02-08 17:42:01 +03:00
|
|
|
VALUE memo;
|
2006-02-03 12:15:42 +03:00
|
|
|
|
2018-12-06 10:49:24 +03:00
|
|
|
rb_check_arity(argc, 0, 1);
|
2013-06-26 17:43:22 +04:00
|
|
|
RETURN_SIZED_ENUMERATOR(obj, argc, argv, enumerator_enum_size);
|
2018-12-06 10:49:24 +03:00
|
|
|
memo = (!argc || NIL_P(memo = argv[0])) ? INT2FIX(0) : rb_to_int(memo);
|
2015-03-12 02:13:01 +03:00
|
|
|
return enumerator_block_call(obj, enumerator_with_index_i, (VALUE)MEMO_NEW(memo, 0, 0));
|
2005-07-11 18:50:42 +04:00
|
|
|
}
|
|
|
|
|
2009-02-08 17:42:01 +03:00
|
|
|
/*
|
2011-05-23 03:33:21 +04:00
|
|
|
* call-seq:
|
|
|
|
* e.each_with_index {|(*args), idx| ... }
|
|
|
|
* e.each_with_index
|
2009-02-08 17:42:01 +03:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* Same as Enumerator#with_index(0), i.e. there is no starting offset.
|
|
|
|
*
|
|
|
|
* If no block is given, a new Enumerator is returned that includes the index.
|
2009-02-08 17:42:01 +03:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
enumerator_each_with_index(VALUE obj)
|
|
|
|
{
|
|
|
|
return enumerator_with_index(0, NULL, obj);
|
|
|
|
}
|
|
|
|
|
2008-06-03 16:43:45 +04:00
|
|
|
static VALUE
|
2013-11-29 11:59:14 +04:00
|
|
|
enumerator_with_object_i(RB_BLOCK_CALL_FUNC_ARGLIST(val, memo))
|
2008-06-03 16:43:45 +04:00
|
|
|
{
|
2009-03-17 12:07:18 +03:00
|
|
|
if (argc <= 1)
|
|
|
|
return rb_yield_values(2, val, memo);
|
|
|
|
|
|
|
|
return rb_yield_values(2, rb_ary_new4(argc, argv), memo);
|
2008-06-03 16:43:45 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-05-23 03:33:21 +04:00
|
|
|
* call-seq:
|
2013-04-12 06:59:07 +04:00
|
|
|
* e.each_with_object(obj) {|(*args), obj| ... }
|
|
|
|
* e.each_with_object(obj)
|
2011-05-23 03:33:21 +04:00
|
|
|
* e.with_object(obj) {|(*args), obj| ... }
|
|
|
|
* e.with_object(obj)
|
|
|
|
*
|
|
|
|
* Iterates the given block for each element with an arbitrary object, +obj+,
|
|
|
|
* and returns +obj+
|
2008-06-03 16:43:45 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* If no block is given, returns a new Enumerator.
|
2008-06-03 16:43:45 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* === Example
|
2008-06-03 16:43:45 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* to_three = Enumerator.new do |y|
|
|
|
|
* 3.times do |x|
|
|
|
|
* y << x
|
|
|
|
* end
|
|
|
|
* end
|
|
|
|
*
|
|
|
|
* to_three_with_string = to_three.with_object("foo")
|
|
|
|
* to_three_with_string.each do |x,string|
|
|
|
|
* puts "#{string}: #{x}"
|
|
|
|
* end
|
|
|
|
*
|
2020-03-26 21:25:22 +03:00
|
|
|
* # => foo: 0
|
|
|
|
* # => foo: 1
|
|
|
|
* # => foo: 2
|
2008-06-03 16:43:45 +04:00
|
|
|
*/
|
|
|
|
static VALUE
|
2008-06-16 04:49:25 +04:00
|
|
|
enumerator_with_object(VALUE obj, VALUE memo)
|
2008-06-03 16:43:45 +04:00
|
|
|
{
|
2013-06-26 17:43:22 +04:00
|
|
|
RETURN_SIZED_ENUMERATOR(obj, 1, &memo, enumerator_enum_size);
|
2009-09-29 19:37:28 +04:00
|
|
|
enumerator_block_call(obj, enumerator_with_object_i, memo);
|
2008-06-03 16:43:45 +04:00
|
|
|
|
|
|
|
return memo;
|
|
|
|
}
|
|
|
|
|
2007-08-06 20:41:17 +04:00
|
|
|
static VALUE
|
2013-11-29 11:59:14 +04:00
|
|
|
next_ii(RB_BLOCK_CALL_FUNC_ARGLIST(i, obj))
|
2007-08-06 20:41:17 +04:00
|
|
|
{
|
2009-08-19 20:36:00 +04:00
|
|
|
struct enumerator *e = enumerator_ptr(obj);
|
|
|
|
VALUE feedvalue = Qnil;
|
|
|
|
VALUE args = rb_ary_new4(argc, argv);
|
|
|
|
rb_fiber_yield(1, &args);
|
2022-11-15 07:24:08 +03:00
|
|
|
if (!UNDEF_P(e->feedvalue)) {
|
2009-08-19 20:36:00 +04:00
|
|
|
feedvalue = e->feedvalue;
|
|
|
|
e->feedvalue = Qundef;
|
|
|
|
}
|
|
|
|
return feedvalue;
|
2007-08-06 20:41:17 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2019-08-26 09:35:28 +03:00
|
|
|
next_i(RB_BLOCK_CALL_FUNC_ARGLIST(_, obj))
|
2007-08-06 20:41:17 +04:00
|
|
|
{
|
2007-08-06 20:53:36 +04:00
|
|
|
struct enumerator *e = enumerator_ptr(obj);
|
2007-08-24 14:48:43 +04:00
|
|
|
VALUE nil = Qnil;
|
2009-08-19 20:36:00 +04:00
|
|
|
VALUE result;
|
2007-08-20 22:58:32 +04:00
|
|
|
|
2009-08-19 20:36:00 +04:00
|
|
|
result = rb_block_call(obj, id_each, 0, 0, next_ii, obj);
|
2023-11-22 18:00:01 +03:00
|
|
|
RB_OBJ_WRITE(obj, &e->stop_exc, rb_exc_new2(rb_eStopIteration, "iteration reached an end"));
|
2012-03-14 05:35:09 +04:00
|
|
|
rb_ivar_set(e->stop_exc, id_result, result);
|
2007-08-24 14:48:43 +04:00
|
|
|
return rb_fiber_yield(1, &nil);
|
2007-08-06 20:41:17 +04:00
|
|
|
}
|
|
|
|
|
2007-08-08 11:07:03 +04:00
|
|
|
static void
|
|
|
|
next_init(VALUE obj, struct enumerator *e)
|
|
|
|
{
|
|
|
|
VALUE curr = rb_fiber_current();
|
2023-11-22 18:00:01 +03:00
|
|
|
RB_OBJ_WRITE(obj, &e->dst, curr);
|
|
|
|
RB_OBJ_WRITE(obj, &e->fib, rb_fiber_new(next_i, obj));
|
2009-08-18 16:02:53 +04:00
|
|
|
e->lookahead = Qundef;
|
2007-08-08 11:07:03 +04:00
|
|
|
}
|
|
|
|
|
2009-08-21 17:39:35 +04:00
|
|
|
static VALUE
|
|
|
|
get_next_values(VALUE obj, struct enumerator *e)
|
|
|
|
{
|
|
|
|
VALUE curr, vs;
|
|
|
|
|
2022-07-29 16:09:54 +03:00
|
|
|
if (e->stop_exc) {
|
|
|
|
VALUE exc = e->stop_exc;
|
|
|
|
VALUE result = rb_attr_get(exc, id_result);
|
|
|
|
VALUE mesg = rb_attr_get(exc, idMesg);
|
|
|
|
if (!NIL_P(mesg)) mesg = rb_str_dup(mesg);
|
|
|
|
VALUE stop_exc = rb_exc_new_str(rb_eStopIteration, mesg);
|
|
|
|
rb_ivar_set(stop_exc, id_cause, exc);
|
|
|
|
rb_ivar_set(stop_exc, id_result, result);
|
|
|
|
rb_exc_raise(stop_exc);
|
|
|
|
}
|
2009-08-21 17:39:35 +04:00
|
|
|
|
|
|
|
curr = rb_fiber_current();
|
|
|
|
|
|
|
|
if (!e->fib || !rb_fiber_alive_p(e->fib)) {
|
|
|
|
next_init(obj, e);
|
|
|
|
}
|
|
|
|
|
|
|
|
vs = rb_fiber_resume(e->fib, 1, &curr);
|
|
|
|
if (e->stop_exc) {
|
|
|
|
e->fib = 0;
|
|
|
|
e->dst = Qnil;
|
|
|
|
e->lookahead = Qundef;
|
|
|
|
e->feedvalue = Qundef;
|
|
|
|
rb_exc_raise(e->stop_exc);
|
|
|
|
}
|
|
|
|
return vs;
|
|
|
|
}
|
|
|
|
|
2007-08-06 20:41:17 +04:00
|
|
|
/*
|
|
|
|
* call-seq:
|
2010-05-18 01:07:33 +04:00
|
|
|
* e.next_values -> array
|
2007-08-06 20:41:17 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* Returns the next object as an array in the enumerator, and move the
|
|
|
|
* internal position forward. When the position reached at the end,
|
|
|
|
* StopIteration is raised.
|
|
|
|
*
|
2020-05-06 00:02:59 +03:00
|
|
|
* See class-level notes about external iterators.
|
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* This method can be used to distinguish <code>yield</code> and <code>yield
|
|
|
|
* nil</code>.
|
2007-08-06 20:41:17 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* === Example
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
|
|
|
* o = Object.new
|
|
|
|
* def o.each
|
|
|
|
* yield
|
|
|
|
* yield 1
|
|
|
|
* yield 1, 2
|
|
|
|
* yield nil
|
|
|
|
* yield [1, 2]
|
|
|
|
* end
|
|
|
|
* e = o.to_enum
|
|
|
|
* p e.next_values
|
|
|
|
* p e.next_values
|
|
|
|
* p e.next_values
|
|
|
|
* p e.next_values
|
|
|
|
* p e.next_values
|
|
|
|
* e = o.to_enum
|
|
|
|
* p e.next
|
|
|
|
* p e.next
|
|
|
|
* p e.next
|
|
|
|
* p e.next
|
|
|
|
* p e.next
|
|
|
|
*
|
2009-08-20 00:26:38 +04:00
|
|
|
* ## yield args next_values next
|
|
|
|
* # yield [] nil
|
|
|
|
* # yield 1 [1] 1
|
|
|
|
* # yield 1, 2 [1, 2] [1, 2]
|
|
|
|
* # yield nil [nil] nil
|
|
|
|
* # yield [1, 2] [[1, 2]] [1, 2]
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
2007-08-06 20:41:17 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
2009-08-19 20:36:00 +04:00
|
|
|
enumerator_next_values(VALUE obj)
|
2007-08-06 20:41:17 +04:00
|
|
|
{
|
|
|
|
struct enumerator *e = enumerator_ptr(obj);
|
2009-08-21 17:39:35 +04:00
|
|
|
VALUE vs;
|
2009-08-18 16:02:53 +04:00
|
|
|
|
2023-10-25 10:32:25 +03:00
|
|
|
rb_check_frozen(obj);
|
|
|
|
|
2022-11-15 07:24:08 +03:00
|
|
|
if (!UNDEF_P(e->lookahead)) {
|
2009-08-21 17:39:35 +04:00
|
|
|
vs = e->lookahead;
|
2009-08-18 16:02:53 +04:00
|
|
|
e->lookahead = Qundef;
|
2009-08-21 17:39:35 +04:00
|
|
|
return vs;
|
2009-08-18 16:02:53 +04:00
|
|
|
}
|
|
|
|
|
2009-08-21 17:39:35 +04:00
|
|
|
return get_next_values(obj, e);
|
2007-08-06 20:41:17 +04:00
|
|
|
}
|
|
|
|
|
2009-08-19 20:36:00 +04:00
|
|
|
static VALUE
|
2009-08-21 19:51:35 +04:00
|
|
|
ary2sv(VALUE args, int dup)
|
2009-08-19 20:36:00 +04:00
|
|
|
{
|
2011-09-29 15:07:45 +04:00
|
|
|
if (!RB_TYPE_P(args, T_ARRAY))
|
2009-08-19 20:36:00 +04:00
|
|
|
return args;
|
|
|
|
|
|
|
|
switch (RARRAY_LEN(args)) {
|
|
|
|
case 0:
|
|
|
|
return Qnil;
|
|
|
|
|
|
|
|
case 1:
|
2013-05-13 13:56:22 +04:00
|
|
|
return RARRAY_AREF(args, 0);
|
2009-08-19 20:36:00 +04:00
|
|
|
|
|
|
|
default:
|
2009-08-21 19:51:35 +04:00
|
|
|
if (dup)
|
|
|
|
return rb_ary_dup(args);
|
2009-08-19 20:36:00 +04:00
|
|
|
return args;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-18 16:02:53 +04:00
|
|
|
/*
|
|
|
|
* call-seq:
|
2010-05-18 01:07:33 +04:00
|
|
|
* e.next -> object
|
2009-08-18 16:02:53 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* Returns the next object in the enumerator, and move the internal position
|
|
|
|
* forward. When the position reached at the end, StopIteration is raised.
|
|
|
|
*
|
|
|
|
* === Example
|
2009-08-18 16:02:53 +04:00
|
|
|
*
|
2009-08-22 07:19:53 +04:00
|
|
|
* a = [1,2,3]
|
|
|
|
* e = a.to_enum
|
|
|
|
* p e.next #=> 1
|
|
|
|
* p e.next #=> 2
|
|
|
|
* p e.next #=> 3
|
|
|
|
* p e.next #raises StopIteration
|
|
|
|
*
|
2020-05-06 00:02:59 +03:00
|
|
|
* See class-level notes about external iterators.
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
2009-08-18 16:02:53 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
2009-08-19 20:36:00 +04:00
|
|
|
enumerator_next(VALUE obj)
|
|
|
|
{
|
|
|
|
VALUE vs = enumerator_next_values(obj);
|
2009-08-21 19:51:35 +04:00
|
|
|
return ary2sv(vs, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
enumerator_peek_values(VALUE obj)
|
|
|
|
{
|
|
|
|
struct enumerator *e = enumerator_ptr(obj);
|
|
|
|
|
2023-10-25 10:32:25 +03:00
|
|
|
rb_check_frozen(obj);
|
|
|
|
|
2022-11-15 07:24:08 +03:00
|
|
|
if (UNDEF_P(e->lookahead)) {
|
2023-11-22 18:00:01 +03:00
|
|
|
RB_OBJ_WRITE(obj, &e->lookahead, get_next_values(obj, e));
|
2009-08-21 19:51:35 +04:00
|
|
|
}
|
2023-11-22 18:00:01 +03:00
|
|
|
|
2009-08-21 19:51:35 +04:00
|
|
|
return e->lookahead;
|
2009-08-19 20:36:00 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
2010-05-18 01:07:33 +04:00
|
|
|
* e.peek_values -> array
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* Returns the next object as an array, similar to Enumerator#next_values, but
|
|
|
|
* doesn't move the internal position forward. If the position is already at
|
|
|
|
* the end, StopIteration is raised.
|
|
|
|
*
|
2020-05-06 00:02:59 +03:00
|
|
|
* See class-level notes about external iterators.
|
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* === Example
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
|
|
|
* o = Object.new
|
|
|
|
* def o.each
|
2010-05-29 22:51:39 +04:00
|
|
|
* yield
|
2009-08-19 20:36:00 +04:00
|
|
|
* yield 1
|
|
|
|
* yield 1, 2
|
|
|
|
* end
|
|
|
|
* e = o.to_enum
|
|
|
|
* p e.peek_values #=> []
|
2010-05-29 22:51:39 +04:00
|
|
|
* e.next
|
2009-08-19 20:36:00 +04:00
|
|
|
* p e.peek_values #=> [1]
|
2009-08-22 07:19:53 +04:00
|
|
|
* p e.peek_values #=> [1]
|
2010-05-29 22:51:39 +04:00
|
|
|
* e.next
|
2009-08-19 20:36:00 +04:00
|
|
|
* p e.peek_values #=> [1, 2]
|
2010-05-29 22:51:39 +04:00
|
|
|
* e.next
|
2009-08-19 20:36:00 +04:00
|
|
|
* p e.peek_values # raises StopIteration
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
2009-08-21 19:51:35 +04:00
|
|
|
enumerator_peek_values_m(VALUE obj)
|
2009-08-18 16:02:53 +04:00
|
|
|
{
|
2009-08-21 19:51:35 +04:00
|
|
|
return rb_ary_dup(enumerator_peek_values(obj));
|
2009-08-18 16:02:53 +04:00
|
|
|
}
|
|
|
|
|
2009-08-19 20:36:00 +04:00
|
|
|
/*
|
|
|
|
* call-seq:
|
2010-05-18 01:07:33 +04:00
|
|
|
* e.peek -> object
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* Returns the next object in the enumerator, but doesn't move the internal
|
|
|
|
* position forward. If the position is already at the end, StopIteration
|
2009-08-19 20:36:00 +04:00
|
|
|
* is raised.
|
|
|
|
*
|
2020-05-06 00:02:59 +03:00
|
|
|
* See class-level notes about external iterators.
|
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* === Example
|
|
|
|
*
|
2009-08-22 07:19:53 +04:00
|
|
|
* a = [1,2,3]
|
|
|
|
* e = a.to_enum
|
|
|
|
* p e.next #=> 1
|
|
|
|
* p e.peek #=> 2
|
|
|
|
* p e.peek #=> 2
|
|
|
|
* p e.peek #=> 2
|
|
|
|
* p e.next #=> 2
|
|
|
|
* p e.next #=> 3
|
2014-05-25 07:00:17 +04:00
|
|
|
* p e.peek #raises StopIteration
|
2009-08-22 07:19:53 +04:00
|
|
|
*
|
2009-08-19 20:36:00 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
enumerator_peek(VALUE obj)
|
|
|
|
{
|
|
|
|
VALUE vs = enumerator_peek_values(obj);
|
2009-08-21 19:51:35 +04:00
|
|
|
return ary2sv(vs, 1);
|
2009-08-19 20:36:00 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
2010-05-18 01:07:33 +04:00
|
|
|
* e.feed obj -> nil
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
2011-06-02 01:16:05 +04:00
|
|
|
* Sets the value to be returned by the next yield inside +e+.
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
2011-06-01 03:19:50 +04:00
|
|
|
* If the value is not set, the yield returns nil.
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
2011-06-02 01:16:05 +04:00
|
|
|
* This value is cleared after being yielded.
|
2009-08-19 20:36:00 +04:00
|
|
|
*
|
2013-05-21 17:54:31 +04:00
|
|
|
* # Array#map passes the array's elements to "yield" and collects the
|
|
|
|
* # results of "yield" as an array.
|
|
|
|
* # Following example shows that "next" returns the passed elements and
|
|
|
|
* # values passed to "feed" are collected as an array which can be
|
|
|
|
* # obtained by StopIteration#result.
|
|
|
|
* e = [1,2,3].map
|
|
|
|
* p e.next #=> 1
|
|
|
|
* e.feed "a"
|
|
|
|
* p e.next #=> 2
|
|
|
|
* e.feed "b"
|
|
|
|
* p e.next #=> 3
|
|
|
|
* e.feed "c"
|
|
|
|
* begin
|
|
|
|
* e.next
|
|
|
|
* rescue StopIteration
|
|
|
|
* p $!.result #=> ["a", "b", "c"]
|
|
|
|
* end
|
|
|
|
*
|
2011-06-01 03:19:50 +04:00
|
|
|
* o = Object.new
|
|
|
|
* def o.each
|
|
|
|
* x = yield # (2) blocks
|
|
|
|
* p x # (5) => "foo"
|
|
|
|
* x = yield # (6) blocks
|
|
|
|
* p x # (8) => nil
|
|
|
|
* x = yield # (9) blocks
|
|
|
|
* p x # not reached w/o another e.next
|
2009-08-19 20:36:00 +04:00
|
|
|
* end
|
|
|
|
*
|
2011-06-01 03:19:50 +04:00
|
|
|
* e = o.to_enum
|
|
|
|
* e.next # (1)
|
|
|
|
* e.feed "foo" # (3)
|
|
|
|
* e.next # (4)
|
|
|
|
* e.next # (7)
|
|
|
|
* # (10)
|
2009-08-19 20:36:00 +04:00
|
|
|
*/
|
2011-06-01 03:19:50 +04:00
|
|
|
|
2009-08-19 20:36:00 +04:00
|
|
|
static VALUE
|
|
|
|
enumerator_feed(VALUE obj, VALUE v)
|
|
|
|
{
|
|
|
|
struct enumerator *e = enumerator_ptr(obj);
|
|
|
|
|
2023-10-25 10:32:25 +03:00
|
|
|
rb_check_frozen(obj);
|
|
|
|
|
2022-11-15 07:24:08 +03:00
|
|
|
if (!UNDEF_P(e->feedvalue)) {
|
2009-08-19 20:36:00 +04:00
|
|
|
rb_raise(rb_eTypeError, "feed value already set");
|
|
|
|
}
|
2023-11-22 18:00:01 +03:00
|
|
|
RB_OBJ_WRITE(obj, &e->feedvalue, v);
|
2009-08-19 20:36:00 +04:00
|
|
|
|
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
|
2007-08-06 20:41:17 +04:00
|
|
|
/*
|
|
|
|
* call-seq:
|
2010-05-18 01:07:33 +04:00
|
|
|
* e.rewind -> e
|
2007-08-06 20:41:17 +04:00
|
|
|
*
|
2012-02-20 21:59:43 +04:00
|
|
|
* Rewinds the enumeration sequence to the beginning.
|
2008-12-10 06:58:56 +03:00
|
|
|
*
|
|
|
|
* If the enclosed object responds to a "rewind" method, it is called.
|
2007-08-06 20:41:17 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
enumerator_rewind(VALUE obj)
|
|
|
|
{
|
|
|
|
struct enumerator *e = enumerator_ptr(obj);
|
|
|
|
|
2023-10-25 10:32:25 +03:00
|
|
|
rb_check_frozen(obj);
|
|
|
|
|
2009-10-29 07:55:10 +03:00
|
|
|
rb_check_funcall(e->obj, id_rewind, 0, 0);
|
2008-12-10 06:58:56 +03:00
|
|
|
|
2007-08-06 20:41:17 +04:00
|
|
|
e->fib = 0;
|
2007-08-24 14:48:43 +04:00
|
|
|
e->dst = Qnil;
|
2009-08-18 16:02:53 +04:00
|
|
|
e->lookahead = Qundef;
|
2009-08-19 20:36:00 +04:00
|
|
|
e->feedvalue = Qundef;
|
|
|
|
e->stop_exc = Qfalse;
|
2007-08-06 20:41:17 +04:00
|
|
|
return obj;
|
2006-10-02 21:39:57 +04:00
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static struct generator *generator_ptr(VALUE obj);
|
2013-05-18 06:03:07 +04:00
|
|
|
static VALUE append_method(VALUE obj, VALUE str, ID default_method, VALUE default_args);
|
|
|
|
|
2008-12-04 05:44:38 +03:00
|
|
|
static VALUE
|
|
|
|
inspect_enumerator(VALUE obj, VALUE dummy, int recur)
|
|
|
|
{
|
2010-04-12 17:49:44 +04:00
|
|
|
struct enumerator *e;
|
2013-05-18 06:05:17 +04:00
|
|
|
VALUE eobj, str, cname;
|
2008-12-04 05:44:38 +03:00
|
|
|
|
2010-04-12 17:49:44 +04:00
|
|
|
TypedData_Get_Struct(obj, struct enumerator, &enumerator_data_type, e);
|
|
|
|
|
2013-05-18 06:05:17 +04:00
|
|
|
cname = rb_obj_class(obj);
|
2010-04-12 17:49:44 +04:00
|
|
|
|
2022-11-15 07:24:08 +03:00
|
|
|
if (!e || UNDEF_P(e->obj)) {
|
2013-05-18 06:05:17 +04:00
|
|
|
return rb_sprintf("#<%"PRIsVALUE": uninitialized>", rb_class_path(cname));
|
2010-04-12 17:49:44 +04:00
|
|
|
}
|
|
|
|
|
2008-12-04 05:44:38 +03:00
|
|
|
if (recur) {
|
2013-05-18 06:05:17 +04:00
|
|
|
str = rb_sprintf("#<%"PRIsVALUE": ...>", rb_class_path(cname));
|
2008-12-04 05:44:38 +03:00
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
if (e->procs) {
|
|
|
|
long i;
|
2022-07-21 19:23:58 +03:00
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
eobj = generator_ptr(e->obj)->obj;
|
|
|
|
/* In case procs chained enumerator traversing all proc entries manually */
|
|
|
|
if (rb_obj_class(eobj) == cname) {
|
|
|
|
str = rb_inspect(eobj);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
str = rb_sprintf("#<%"PRIsVALUE": %+"PRIsVALUE">", rb_class_path(cname), eobj);
|
|
|
|
}
|
|
|
|
for (i = 0; i < RARRAY_LEN(e->procs); i++) {
|
|
|
|
str = rb_sprintf("#<%"PRIsVALUE": %"PRIsVALUE, cname, str);
|
|
|
|
append_method(RARRAY_AREF(e->procs, i), str, e->meth, e->args);
|
|
|
|
rb_str_buf_cat2(str, ">");
|
|
|
|
}
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
2012-04-03 05:24:51 +04:00
|
|
|
eobj = rb_attr_get(obj, id_receiver);
|
2012-03-24 19:17:31 +04:00
|
|
|
if (NIL_P(eobj)) {
|
|
|
|
eobj = e->obj;
|
|
|
|
}
|
2008-12-04 05:44:38 +03:00
|
|
|
|
|
|
|
/* (1..100).each_cons(2) => "#<Enumerator: 1..100:each_cons(2)>" */
|
2013-05-18 06:05:17 +04:00
|
|
|
str = rb_sprintf("#<%"PRIsVALUE": %+"PRIsVALUE, rb_class_path(cname), eobj);
|
2013-05-18 06:03:07 +04:00
|
|
|
append_method(obj, str, e->meth, e->args);
|
|
|
|
|
|
|
|
rb_str_buf_cat2(str, ">");
|
|
|
|
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
2018-03-15 09:29:04 +03:00
|
|
|
static int
|
|
|
|
key_symbol_p(VALUE key, VALUE val, VALUE arg)
|
|
|
|
{
|
|
|
|
if (SYMBOL_P(key)) return ST_CONTINUE;
|
|
|
|
*(int *)arg = FALSE;
|
|
|
|
return ST_STOP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
kwd_append(VALUE key, VALUE val, VALUE str)
|
|
|
|
{
|
|
|
|
if (!SYMBOL_P(key)) rb_raise(rb_eRuntimeError, "non-symbol key inserted");
|
|
|
|
rb_str_catf(str, "% "PRIsVALUE": %"PRIsVALUE", ", key, val);
|
|
|
|
return ST_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2013-05-18 06:03:07 +04:00
|
|
|
static VALUE
|
|
|
|
append_method(VALUE obj, VALUE str, ID default_method, VALUE default_args)
|
|
|
|
{
|
|
|
|
VALUE method, eargs;
|
|
|
|
|
2012-04-03 05:24:51 +04:00
|
|
|
method = rb_attr_get(obj, id_method);
|
2013-05-18 06:05:17 +04:00
|
|
|
if (method != Qfalse) {
|
|
|
|
if (!NIL_P(method)) {
|
|
|
|
Check_Type(method, T_SYMBOL);
|
2014-07-28 12:15:42 +04:00
|
|
|
method = rb_sym2str(method);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
method = rb_id2str(default_method);
|
2013-05-18 06:05:17 +04:00
|
|
|
}
|
2012-03-24 19:17:31 +04:00
|
|
|
rb_str_buf_cat2(str, ":");
|
2014-07-28 12:15:42 +04:00
|
|
|
rb_str_buf_append(str, method);
|
2012-03-24 19:17:31 +04:00
|
|
|
}
|
2008-12-04 05:44:38 +03:00
|
|
|
|
2012-04-03 05:24:51 +04:00
|
|
|
eargs = rb_attr_get(obj, id_arguments);
|
2012-03-26 14:52:02 +04:00
|
|
|
if (NIL_P(eargs)) {
|
2013-05-18 06:03:07 +04:00
|
|
|
eargs = default_args;
|
2012-03-26 14:52:02 +04:00
|
|
|
}
|
|
|
|
if (eargs != Qfalse) {
|
|
|
|
long argc = RARRAY_LEN(eargs);
|
2013-11-08 05:52:07 +04:00
|
|
|
const VALUE *argv = RARRAY_CONST_PTR(eargs); /* WB: no new reference */
|
2008-12-04 05:44:38 +03:00
|
|
|
|
2012-03-26 14:52:02 +04:00
|
|
|
if (argc > 0) {
|
2018-03-15 09:29:04 +03:00
|
|
|
VALUE kwds = Qnil;
|
|
|
|
|
2012-03-26 14:52:02 +04:00
|
|
|
rb_str_buf_cat2(str, "(");
|
2008-12-04 05:44:38 +03:00
|
|
|
|
2018-12-24 03:48:15 +03:00
|
|
|
if (RB_TYPE_P(argv[argc-1], T_HASH) && !RHASH_EMPTY_P(argv[argc-1])) {
|
2018-03-15 09:29:04 +03:00
|
|
|
int all_key = TRUE;
|
|
|
|
rb_hash_foreach(argv[argc-1], key_symbol_p, (VALUE)&all_key);
|
|
|
|
if (all_key) kwds = argv[--argc];
|
|
|
|
}
|
2022-07-21 19:23:58 +03:00
|
|
|
|
2012-03-26 14:52:02 +04:00
|
|
|
while (argc--) {
|
|
|
|
VALUE arg = *argv++;
|
2022-07-21 19:23:58 +03:00
|
|
|
|
2013-05-18 06:05:17 +04:00
|
|
|
rb_str_append(str, rb_inspect(arg));
|
2018-03-15 09:29:04 +03:00
|
|
|
rb_str_buf_cat2(str, ", ");
|
2012-03-26 14:52:02 +04:00
|
|
|
}
|
2018-03-15 09:29:04 +03:00
|
|
|
if (!NIL_P(kwds)) {
|
|
|
|
rb_hash_foreach(kwds, kwd_append, str);
|
|
|
|
}
|
|
|
|
rb_str_set_len(str, RSTRING_LEN(str)-2);
|
|
|
|
rb_str_buf_cat2(str, ")");
|
2008-12-04 05:44:38 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
2010-05-18 01:07:33 +04:00
|
|
|
* e.inspect -> string
|
2008-12-04 05:44:38 +03:00
|
|
|
*
|
2011-05-23 03:33:21 +04:00
|
|
|
* Creates a printable version of <i>e</i>.
|
2008-12-04 05:44:38 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
enumerator_inspect(VALUE obj)
|
|
|
|
{
|
|
|
|
return rb_exec_recursive(inspect_enumerator, obj, 0);
|
|
|
|
}
|
|
|
|
|
2012-11-06 21:10:06 +04:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* e.size -> int, Float::INFINITY or nil
|
|
|
|
*
|
|
|
|
* Returns the size of the enumerator, or +nil+ if it can't be calculated lazily.
|
|
|
|
*
|
|
|
|
* (1..100).to_a.permutation(4).size # => 94109400
|
|
|
|
* loop.size # => Float::INFINITY
|
|
|
|
* (1..100).drop_while.size # => nil
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
enumerator_size(VALUE obj)
|
|
|
|
{
|
|
|
|
struct enumerator *e = enumerator_ptr(obj);
|
2013-08-27 11:56:52 +04:00
|
|
|
int argc = 0;
|
|
|
|
const VALUE *argv = NULL;
|
|
|
|
VALUE size;
|
2012-11-06 21:10:06 +04:00
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
if (e->procs) {
|
|
|
|
struct generator *g = generator_ptr(e->obj);
|
|
|
|
VALUE receiver = rb_check_funcall(g->obj, id_size, 0, 0);
|
|
|
|
long i = 0;
|
2022-07-21 19:23:58 +03:00
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
for (i = 0; i < RARRAY_LEN(e->procs); i++) {
|
|
|
|
VALUE proc = RARRAY_AREF(e->procs, i);
|
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc);
|
2017-10-18 07:35:25 +03:00
|
|
|
lazyenum_size_func *size_fn = entry->fn->size;
|
|
|
|
if (!size_fn) {
|
2016-09-19 04:36:56 +03:00
|
|
|
return Qnil;
|
|
|
|
}
|
2017-10-18 07:35:25 +03:00
|
|
|
receiver = (*size_fn)(proc, receiver);
|
2016-09-19 04:36:56 +03:00
|
|
|
}
|
|
|
|
return receiver;
|
|
|
|
}
|
|
|
|
|
2012-11-06 21:10:06 +04:00
|
|
|
if (e->size_fn) {
|
2013-02-05 07:49:41 +04:00
|
|
|
return (*e->size_fn)(e->obj, e->args, obj);
|
2012-11-06 21:10:06 +04:00
|
|
|
}
|
2013-08-27 11:56:52 +04:00
|
|
|
if (e->args) {
|
|
|
|
argc = (int)RARRAY_LEN(e->args);
|
* include/ruby/ruby.h: rename RARRAY_RAWPTR() to RARRAY_CONST_PTR().
RARRAY_RAWPTR(ary) returns (const VALUE *) type pointer and
usecase of this macro is not acquire raw pointer, but acquire
read-only pointer. So we rename to better name.
RSTRUCT_RAWPTR() is also renamed to RSTRUCT_CONST_PTR()
(I expect that nobody use it).
* array.c, compile.c, cont.c, enumerator.c, gc.c, proc.c, random.c,
string.c, struct.c, thread.c, vm_eval.c, vm_insnhelper.c:
catch up this change.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@43043 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-09-25 12:24:34 +04:00
|
|
|
argv = RARRAY_CONST_PTR(e->args);
|
2012-11-06 21:10:06 +04:00
|
|
|
}
|
2019-09-18 22:59:01 +03:00
|
|
|
size = rb_check_funcall_kw(e->size, id_call, argc, argv, e->kw_splat);
|
2022-11-15 07:24:08 +03:00
|
|
|
if (!UNDEF_P(size)) return size;
|
2012-11-06 21:10:06 +04:00
|
|
|
return e->size;
|
|
|
|
}
|
|
|
|
|
2008-08-26 09:42:12 +04:00
|
|
|
/*
|
|
|
|
* Yielder
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
yielder_mark(void *p)
|
|
|
|
{
|
|
|
|
struct yielder *ptr = p;
|
2019-08-13 00:00:34 +03:00
|
|
|
rb_gc_mark_movable(ptr->proc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
yielder_compact(void *p)
|
|
|
|
{
|
|
|
|
struct yielder *ptr = p;
|
|
|
|
ptr->proc = rb_gc_location(ptr->proc);
|
2008-08-26 09:42:12 +04:00
|
|
|
}
|
|
|
|
|
2009-09-09 06:46:00 +04:00
|
|
|
static const rb_data_type_t yielder_data_type = {
|
|
|
|
"yielder",
|
2010-07-18 11:31:54 +04:00
|
|
|
{
|
|
|
|
yielder_mark,
|
2023-11-22 12:20:47 +03:00
|
|
|
RUBY_TYPED_DEFAULT_FREE,
|
2023-11-22 12:19:46 +03:00
|
|
|
NULL,
|
2019-08-13 17:00:56 +03:00
|
|
|
yielder_compact,
|
2010-07-18 11:31:54 +04:00
|
|
|
},
|
2023-11-22 18:45:50 +03:00
|
|
|
0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
|
2009-09-09 06:46:00 +04:00
|
|
|
};
|
|
|
|
|
2008-08-26 09:42:12 +04:00
|
|
|
static struct yielder *
|
|
|
|
yielder_ptr(VALUE obj)
|
|
|
|
{
|
|
|
|
struct yielder *ptr;
|
|
|
|
|
2009-09-09 06:46:00 +04:00
|
|
|
TypedData_Get_Struct(obj, struct yielder, &yielder_data_type, ptr);
|
2022-11-15 07:24:08 +03:00
|
|
|
if (!ptr || UNDEF_P(ptr->proc)) {
|
2008-08-26 09:42:12 +04:00
|
|
|
rb_raise(rb_eArgError, "uninitialized yielder");
|
|
|
|
}
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
|
|
|
yielder_allocate(VALUE klass)
|
|
|
|
{
|
|
|
|
struct yielder *ptr;
|
|
|
|
VALUE obj;
|
|
|
|
|
2009-09-09 06:46:00 +04:00
|
|
|
obj = TypedData_Make_Struct(klass, struct yielder, &yielder_data_type, ptr);
|
2008-08-26 09:42:12 +04:00
|
|
|
ptr->proc = Qundef;
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
yielder_init(VALUE obj, VALUE proc)
|
|
|
|
{
|
|
|
|
struct yielder *ptr;
|
|
|
|
|
2009-09-09 06:46:00 +04:00
|
|
|
TypedData_Get_Struct(obj, struct yielder, &yielder_data_type, ptr);
|
2008-08-26 09:42:12 +04:00
|
|
|
|
|
|
|
if (!ptr) {
|
|
|
|
rb_raise(rb_eArgError, "unallocated yielder");
|
|
|
|
}
|
|
|
|
|
2023-11-22 18:45:50 +03:00
|
|
|
RB_OBJ_WRITE(obj, &ptr->proc, proc);
|
2008-08-26 09:42:12 +04:00
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
|
|
|
yielder_initialize(VALUE obj)
|
|
|
|
{
|
|
|
|
rb_need_block();
|
|
|
|
|
|
|
|
return yielder_init(obj, rb_block_proc());
|
|
|
|
}
|
|
|
|
|
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
|
|
|
yielder_yield(VALUE obj, VALUE args)
|
|
|
|
{
|
|
|
|
struct yielder *ptr = yielder_ptr(obj);
|
|
|
|
|
2019-09-27 03:25:54 +03:00
|
|
|
return rb_proc_call_kw(ptr->proc, args, RB_PASS_CALLED_KEYWORDS);
|
2008-08-26 09:42:12 +04:00
|
|
|
}
|
|
|
|
|
2009-11-11 19:32:34 +03:00
|
|
|
/* :nodoc: */
|
2014-05-24 10:05:35 +04:00
|
|
|
static VALUE
|
2018-09-18 11:49:40 +03:00
|
|
|
yielder_yield_push(VALUE obj, VALUE arg)
|
2009-11-11 19:32:34 +03:00
|
|
|
{
|
2018-09-18 11:49:40 +03:00
|
|
|
struct yielder *ptr = yielder_ptr(obj);
|
|
|
|
|
|
|
|
rb_proc_call_with_block(ptr->proc, 1, &arg, Qnil);
|
|
|
|
|
2009-11-11 19:32:34 +03:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
2019-03-11 12:49:14 +03:00
|
|
|
/*
|
2020-05-27 09:46:42 +03:00
|
|
|
* Returns a Proc object that takes arguments and yields them.
|
2019-03-11 12:49:14 +03:00
|
|
|
*
|
|
|
|
* This method is implemented so that a Yielder object can be directly
|
|
|
|
* passed to another method as a block argument.
|
|
|
|
*
|
|
|
|
* enum = Enumerator.new { |y|
|
|
|
|
* Dir.glob("*.rb") { |file|
|
|
|
|
* File.open(file) { |f| f.each_line(&y) }
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
yielder_to_proc(VALUE obj)
|
|
|
|
{
|
2019-05-15 06:41:31 +03:00
|
|
|
VALUE method = rb_obj_method(obj, sym_yield);
|
2019-03-11 12:49:14 +03:00
|
|
|
|
2019-08-01 10:40:03 +03:00
|
|
|
return rb_funcall(method, idTo_proc, 0);
|
2019-03-11 12:49:14 +03:00
|
|
|
}
|
|
|
|
|
2008-08-26 09:42:12 +04:00
|
|
|
static VALUE
|
2013-11-29 11:59:14 +04:00
|
|
|
yielder_yield_i(RB_BLOCK_CALL_FUNC_ARGLIST(obj, memo))
|
2008-08-26 09:42:12 +04:00
|
|
|
{
|
2019-09-27 03:25:54 +03:00
|
|
|
return rb_yield_values_kw(argc, argv, RB_PASS_CALLED_KEYWORDS);
|
2008-08-26 09:42:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
yielder_new(void)
|
|
|
|
{
|
2009-07-13 20:07:43 +04:00
|
|
|
return yielder_init(yielder_allocate(rb_cYielder), rb_proc_new(yielder_yield_i, 0));
|
2008-08-26 09:42:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Generator
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
generator_mark(void *p)
|
|
|
|
{
|
|
|
|
struct generator *ptr = p;
|
2019-08-13 00:00:34 +03:00
|
|
|
rb_gc_mark_movable(ptr->proc);
|
|
|
|
rb_gc_mark_movable(ptr->obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
generator_compact(void *p)
|
|
|
|
{
|
|
|
|
struct generator *ptr = p;
|
|
|
|
ptr->proc = rb_gc_location(ptr->proc);
|
|
|
|
ptr->obj = rb_gc_location(ptr->obj);
|
2008-08-26 09:42:12 +04:00
|
|
|
}
|
|
|
|
|
2009-09-09 06:46:00 +04:00
|
|
|
static const rb_data_type_t generator_data_type = {
|
|
|
|
"generator",
|
2010-07-18 11:31:54 +04:00
|
|
|
{
|
|
|
|
generator_mark,
|
2023-11-22 12:20:47 +03:00
|
|
|
RUBY_TYPED_DEFAULT_FREE,
|
2023-11-22 12:19:46 +03:00
|
|
|
NULL,
|
2019-08-13 17:00:56 +03:00
|
|
|
generator_compact,
|
2010-07-18 11:31:54 +04:00
|
|
|
},
|
2023-11-22 18:49:28 +03:00
|
|
|
0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
|
2009-09-09 06:46:00 +04:00
|
|
|
};
|
|
|
|
|
2008-08-26 09:42:12 +04:00
|
|
|
static struct generator *
|
|
|
|
generator_ptr(VALUE obj)
|
|
|
|
{
|
|
|
|
struct generator *ptr;
|
|
|
|
|
2009-09-09 06:46:00 +04:00
|
|
|
TypedData_Get_Struct(obj, struct generator, &generator_data_type, ptr);
|
2022-11-15 07:24:08 +03:00
|
|
|
if (!ptr || UNDEF_P(ptr->proc)) {
|
2008-08-26 09:42:12 +04:00
|
|
|
rb_raise(rb_eArgError, "uninitialized generator");
|
|
|
|
}
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
|
|
|
generator_allocate(VALUE klass)
|
|
|
|
{
|
|
|
|
struct generator *ptr;
|
|
|
|
VALUE obj;
|
|
|
|
|
2009-09-09 06:46:00 +04:00
|
|
|
obj = TypedData_Make_Struct(klass, struct generator, &generator_data_type, ptr);
|
2008-08-26 09:42:12 +04:00
|
|
|
ptr->proc = Qundef;
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
generator_init(VALUE obj, VALUE proc)
|
|
|
|
{
|
|
|
|
struct generator *ptr;
|
|
|
|
|
2013-07-29 16:06:42 +04:00
|
|
|
rb_check_frozen(obj);
|
2009-09-09 06:46:00 +04:00
|
|
|
TypedData_Get_Struct(obj, struct generator, &generator_data_type, ptr);
|
2008-08-26 09:42:12 +04:00
|
|
|
|
|
|
|
if (!ptr) {
|
|
|
|
rb_raise(rb_eArgError, "unallocated generator");
|
|
|
|
}
|
|
|
|
|
2023-11-22 18:49:28 +03:00
|
|
|
RB_OBJ_WRITE(obj, &ptr->proc, proc);
|
2008-08-26 09:42:12 +04:00
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
|
|
|
generator_initialize(int argc, VALUE *argv, VALUE obj)
|
|
|
|
{
|
|
|
|
VALUE proc;
|
|
|
|
|
|
|
|
if (argc == 0) {
|
|
|
|
rb_need_block();
|
|
|
|
|
|
|
|
proc = rb_block_proc();
|
2012-03-08 19:26:01 +04:00
|
|
|
}
|
|
|
|
else {
|
2008-08-26 09:42:12 +04:00
|
|
|
rb_scan_args(argc, argv, "1", &proc);
|
|
|
|
|
|
|
|
if (!rb_obj_is_proc(proc))
|
|
|
|
rb_raise(rb_eTypeError,
|
2015-09-28 05:40:46 +03:00
|
|
|
"wrong argument type %"PRIsVALUE" (expected Proc)",
|
|
|
|
rb_obj_class(proc));
|
2008-08-26 09:42:12 +04:00
|
|
|
|
|
|
|
if (rb_block_given_p()) {
|
|
|
|
rb_warn("given block not used");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return generator_init(obj, proc);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
|
|
|
generator_init_copy(VALUE obj, VALUE orig)
|
|
|
|
{
|
|
|
|
struct generator *ptr0, *ptr1;
|
|
|
|
|
2012-06-05 15:13:18 +04:00
|
|
|
if (!OBJ_INIT_COPY(obj, orig)) return obj;
|
|
|
|
|
2008-08-26 09:42:12 +04:00
|
|
|
ptr0 = generator_ptr(orig);
|
|
|
|
|
2009-09-09 06:46:00 +04:00
|
|
|
TypedData_Get_Struct(obj, struct generator, &generator_data_type, ptr1);
|
2008-08-26 09:42:12 +04:00
|
|
|
|
|
|
|
if (!ptr1) {
|
|
|
|
rb_raise(rb_eArgError, "unallocated generator");
|
|
|
|
}
|
|
|
|
|
2023-11-22 18:49:28 +03:00
|
|
|
RB_OBJ_WRITE(obj, &ptr1->proc, ptr0->proc);
|
2008-08-26 09:42:12 +04:00
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
2012-03-08 19:26:01 +04:00
|
|
|
generator_each(int argc, VALUE *argv, VALUE obj)
|
2008-08-26 09:42:12 +04:00
|
|
|
{
|
|
|
|
struct generator *ptr = generator_ptr(obj);
|
2012-03-08 19:26:01 +04:00
|
|
|
VALUE args = rb_ary_new2(argc + 1);
|
2008-08-26 09:42:12 +04:00
|
|
|
|
2012-03-08 19:26:01 +04:00
|
|
|
rb_ary_push(args, yielder_new());
|
|
|
|
if (argc > 0) {
|
|
|
|
rb_ary_cat(args, argv, argc);
|
|
|
|
}
|
2008-08-26 09:42:12 +04:00
|
|
|
|
2019-09-26 19:10:42 +03:00
|
|
|
return rb_proc_call_kw(ptr->proc, args, RB_PASS_CALLED_KEYWORDS);
|
2009-08-19 20:36:00 +04:00
|
|
|
}
|
2008-08-26 09:42:12 +04:00
|
|
|
|
2012-03-08 19:30:28 +04:00
|
|
|
/* Lazy Enumerator methods */
|
2012-11-06 21:15:59 +04:00
|
|
|
static VALUE
|
2013-02-05 07:49:41 +04:00
|
|
|
enum_size(VALUE self)
|
2012-11-06 21:15:59 +04:00
|
|
|
{
|
2013-02-05 07:49:41 +04:00
|
|
|
VALUE r = rb_check_funcall(self, id_size, 0, 0);
|
2022-11-15 07:24:08 +03:00
|
|
|
return UNDEF_P(r) ? Qnil : r;
|
2012-11-06 21:15:59 +04:00
|
|
|
}
|
|
|
|
|
2013-06-28 08:26:21 +04:00
|
|
|
static VALUE
|
|
|
|
lazyenum_size(VALUE self, VALUE args, VALUE eobj)
|
|
|
|
{
|
|
|
|
return enum_size(self);
|
|
|
|
}
|
|
|
|
|
2020-07-22 03:52:50 +03:00
|
|
|
#define lazy_receiver_size lazy_map_size
|
2012-11-06 21:15:59 +04:00
|
|
|
|
2012-03-08 19:30:28 +04:00
|
|
|
static VALUE
|
2013-11-29 11:59:14 +04:00
|
|
|
lazy_init_iterator(RB_BLOCK_CALL_FUNC_ARGLIST(val, m))
|
2012-03-08 19:30:28 +04:00
|
|
|
{
|
2012-03-15 18:20:27 +04:00
|
|
|
VALUE result;
|
|
|
|
if (argc == 1) {
|
|
|
|
VALUE args[2];
|
|
|
|
args[0] = m;
|
|
|
|
args[1] = val;
|
|
|
|
result = rb_yield_values2(2, args);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
VALUE args;
|
|
|
|
int len = rb_long2int((long)argc + 1);
|
2015-12-15 18:32:28 +03:00
|
|
|
VALUE *nargv = ALLOCV_N(VALUE, args, len);
|
2012-03-15 18:20:27 +04:00
|
|
|
|
2015-12-15 18:32:28 +03:00
|
|
|
nargv[0] = m;
|
2012-03-15 18:20:27 +04:00
|
|
|
if (argc > 0) {
|
2015-12-15 18:32:28 +03:00
|
|
|
MEMCPY(nargv + 1, argv, VALUE, argc);
|
2012-03-15 18:20:27 +04:00
|
|
|
}
|
2015-12-15 18:32:28 +03:00
|
|
|
result = rb_yield_values2(len, nargv);
|
|
|
|
ALLOCV_END(args);
|
2012-03-15 18:20:27 +04:00
|
|
|
}
|
2022-11-15 07:24:08 +03:00
|
|
|
if (UNDEF_P(result)) rb_iter_break();
|
2012-03-15 18:20:27 +04:00
|
|
|
return Qnil;
|
2012-03-08 19:30:28 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2013-11-29 11:59:14 +04:00
|
|
|
lazy_init_block_i(RB_BLOCK_CALL_FUNC_ARGLIST(val, m))
|
2012-03-08 19:30:28 +04:00
|
|
|
{
|
2012-03-15 18:20:27 +04:00
|
|
|
rb_block_call(m, id_each, argc-1, argv+1, lazy_init_iterator, val);
|
|
|
|
return Qnil;
|
2012-03-08 19:30:28 +04:00
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
#define memo_value v2
|
|
|
|
#define memo_flags u3.state
|
|
|
|
#define LAZY_MEMO_BREAK 1
|
|
|
|
#define LAZY_MEMO_PACKED 2
|
|
|
|
#define LAZY_MEMO_BREAK_P(memo) ((memo)->memo_flags & LAZY_MEMO_BREAK)
|
|
|
|
#define LAZY_MEMO_PACKED_P(memo) ((memo)->memo_flags & LAZY_MEMO_PACKED)
|
|
|
|
#define LAZY_MEMO_SET_BREAK(memo) ((memo)->memo_flags |= LAZY_MEMO_BREAK)
|
2020-07-21 18:58:48 +03:00
|
|
|
#define LAZY_MEMO_RESET_BREAK(memo) ((memo)->memo_flags &= ~LAZY_MEMO_BREAK)
|
2016-09-19 04:36:56 +03:00
|
|
|
#define LAZY_MEMO_SET_VALUE(memo, value) MEMO_V2_SET(memo, value)
|
2017-06-10 13:26:32 +03:00
|
|
|
#define LAZY_MEMO_SET_PACKED(memo) ((memo)->memo_flags |= LAZY_MEMO_PACKED)
|
|
|
|
#define LAZY_MEMO_RESET_PACKED(memo) ((memo)->memo_flags &= ~LAZY_MEMO_PACKED)
|
2016-09-19 04:36:56 +03:00
|
|
|
|
2020-07-21 03:11:20 +03:00
|
|
|
static VALUE lazy_yielder_result(struct MEMO *result, VALUE yielder, VALUE procs_array, VALUE memos, long i);
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static VALUE
|
2019-08-26 07:30:04 +03:00
|
|
|
lazy_init_yielder(RB_BLOCK_CALL_FUNC_ARGLIST(_, m))
|
2016-09-19 04:36:56 +03:00
|
|
|
{
|
|
|
|
VALUE yielder = RARRAY_AREF(m, 0);
|
|
|
|
VALUE procs_array = RARRAY_AREF(m, 1);
|
|
|
|
VALUE memos = rb_attr_get(yielder, id_memo);
|
|
|
|
struct MEMO *result;
|
|
|
|
|
2020-07-21 03:11:20 +03:00
|
|
|
result = MEMO_NEW(m, rb_enum_values_pack(argc, argv),
|
2016-09-19 04:36:56 +03:00
|
|
|
argc > 1 ? LAZY_MEMO_PACKED : 0);
|
2020-07-21 03:11:20 +03:00
|
|
|
return lazy_yielder_result(result, yielder, procs_array, memos, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
lazy_yielder_yield(struct MEMO *result, long memo_index, int argc, const VALUE *argv)
|
|
|
|
{
|
|
|
|
VALUE m = result->v1;
|
|
|
|
VALUE yielder = RARRAY_AREF(m, 0);
|
|
|
|
VALUE procs_array = RARRAY_AREF(m, 1);
|
|
|
|
VALUE memos = rb_attr_get(yielder, id_memo);
|
|
|
|
LAZY_MEMO_SET_VALUE(result, rb_enum_values_pack(argc, argv));
|
|
|
|
if (argc > 1)
|
|
|
|
LAZY_MEMO_SET_PACKED(result);
|
|
|
|
else
|
|
|
|
LAZY_MEMO_RESET_PACKED(result);
|
|
|
|
return lazy_yielder_result(result, yielder, procs_array, memos, memo_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
lazy_yielder_result(struct MEMO *result, VALUE yielder, VALUE procs_array, VALUE memos, long i)
|
|
|
|
{
|
|
|
|
int cont = 1;
|
2016-09-19 04:36:56 +03:00
|
|
|
|
2020-07-21 03:11:20 +03:00
|
|
|
for (; i < RARRAY_LEN(procs_array); i++) {
|
2016-09-19 04:36:56 +03:00
|
|
|
VALUE proc = RARRAY_AREF(procs_array, i);
|
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc);
|
|
|
|
if (!(*entry->fn->proc)(proc, result, memos, i)) {
|
|
|
|
cont = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cont) {
|
2018-09-18 11:49:40 +03:00
|
|
|
rb_funcall2(yielder, idLTLT, 1, &(result->memo_value));
|
2016-09-19 04:36:56 +03:00
|
|
|
}
|
|
|
|
if (LAZY_MEMO_BREAK_P(result)) {
|
|
|
|
rb_iter_break();
|
|
|
|
}
|
|
|
|
return result->memo_value;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2019-08-26 07:30:04 +03:00
|
|
|
lazy_init_block(RB_BLOCK_CALL_FUNC_ARGLIST(val, m))
|
2016-09-19 04:36:56 +03:00
|
|
|
{
|
|
|
|
VALUE procs = RARRAY_AREF(m, 1);
|
|
|
|
|
|
|
|
rb_ivar_set(val, id_memo, rb_ary_new2(RARRAY_LEN(procs)));
|
|
|
|
rb_block_call(RARRAY_AREF(m, 0), id_each, 0, 0,
|
|
|
|
lazy_init_yielder, rb_ary_new3(2, val, procs));
|
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
lazy_generator_init(VALUE enumerator, VALUE procs)
|
|
|
|
{
|
|
|
|
VALUE generator;
|
|
|
|
VALUE obj;
|
|
|
|
struct generator *gen_ptr;
|
|
|
|
struct enumerator *e = enumerator_ptr(enumerator);
|
|
|
|
|
|
|
|
if (RARRAY_LEN(procs) > 0) {
|
|
|
|
struct generator *old_gen_ptr = generator_ptr(e->obj);
|
|
|
|
obj = old_gen_ptr->obj;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
obj = enumerator;
|
|
|
|
}
|
|
|
|
|
|
|
|
generator = generator_allocate(rb_cGenerator);
|
|
|
|
|
|
|
|
rb_block_call(generator, id_initialize, 0, 0,
|
|
|
|
lazy_init_block, rb_ary_new3(2, obj, procs));
|
|
|
|
|
|
|
|
gen_ptr = generator_ptr(generator);
|
2023-11-22 18:49:28 +03:00
|
|
|
RB_OBJ_WRITE(generator, &gen_ptr->obj, obj);
|
2016-09-19 04:36:56 +03:00
|
|
|
|
|
|
|
return generator;
|
|
|
|
}
|
|
|
|
|
2022-08-25 07:42:35 +03:00
|
|
|
static int
|
|
|
|
lazy_precheck(VALUE procs)
|
|
|
|
{
|
|
|
|
if (RTEST(procs)) {
|
|
|
|
long num_procs = RARRAY_LEN(procs), i = num_procs;
|
|
|
|
while (i-- > 0) {
|
|
|
|
VALUE proc = RARRAY_AREF(procs, i);
|
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc);
|
|
|
|
lazyenum_precheck_func *precheck = entry->fn->precheck;
|
|
|
|
if (precheck && !precheck(proc)) return FALSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
2019-03-21 02:32:11 +03:00
|
|
|
/*
|
|
|
|
* Document-class: Enumerator::Lazy
|
2019-05-18 14:04:04 +03:00
|
|
|
*
|
2019-03-21 02:32:11 +03:00
|
|
|
* Enumerator::Lazy is a special type of Enumerator, that allows constructing
|
|
|
|
* chains of operations without evaluating them immediately, and evaluating
|
|
|
|
* values on as-needed basis. In order to do so it redefines most of Enumerable
|
|
|
|
* methods so that they just construct another lazy enumerator.
|
|
|
|
*
|
2019-05-18 14:04:04 +03:00
|
|
|
* Enumerator::Lazy can be constructed from any Enumerable with the
|
|
|
|
* Enumerable#lazy method.
|
2019-03-21 02:32:11 +03:00
|
|
|
*
|
|
|
|
* lazy = (1..Float::INFINITY).lazy.select(&:odd?).drop(10).take_while { |i| i < 30 }
|
|
|
|
* # => #<Enumerator::Lazy: #<Enumerator::Lazy: #<Enumerator::Lazy: #<Enumerator::Lazy: 1..Infinity>:select>:drop(10)>:take_while>
|
|
|
|
*
|
2019-05-18 14:04:04 +03:00
|
|
|
* The real enumeration is performed when any non-redefined Enumerable method
|
|
|
|
* is called, like Enumerable#first or Enumerable#to_a (the latter is aliased
|
|
|
|
* as #force for more semantic code):
|
2019-03-21 02:32:11 +03:00
|
|
|
*
|
|
|
|
* lazy.first(2)
|
|
|
|
* #=> [21, 23]
|
|
|
|
*
|
|
|
|
* lazy.force
|
|
|
|
* #=> [21, 23, 25, 27, 29]
|
|
|
|
*
|
2019-05-18 14:04:04 +03:00
|
|
|
* Note that most Enumerable methods that could be called with or without
|
|
|
|
* a block, on Enumerator::Lazy will always require a block:
|
2019-03-21 02:32:11 +03:00
|
|
|
*
|
|
|
|
* [1, 2, 3].map #=> #<Enumerator: [1, 2, 3]:map>
|
|
|
|
* [1, 2, 3].lazy.map # ArgumentError: tried to call lazy map without a block
|
|
|
|
*
|
2019-05-18 14:04:04 +03:00
|
|
|
* This class allows idiomatic calculations on long or infinite sequences, as well
|
2019-03-21 02:32:11 +03:00
|
|
|
* as chaining of calculations without constructing intermediate arrays.
|
|
|
|
*
|
2019-05-18 14:04:04 +03:00
|
|
|
* Example for working with a slowly calculated sequence:
|
2019-03-21 02:32:11 +03:00
|
|
|
*
|
|
|
|
* require 'open-uri'
|
|
|
|
*
|
|
|
|
* # This will fetch all URLs before selecting
|
|
|
|
* # necessary data
|
2021-01-08 17:52:35 +03:00
|
|
|
* URLS.map { |u| JSON.parse(URI.open(u).read) }
|
2019-09-04 10:07:40 +03:00
|
|
|
* .select { |data| data.key?('stats') }
|
|
|
|
* .first(5)
|
2019-03-21 02:32:11 +03:00
|
|
|
*
|
|
|
|
* # This will fetch URLs one-by-one, only till
|
|
|
|
* # there is enough data to satisfy the condition
|
2021-01-08 17:52:35 +03:00
|
|
|
* URLS.lazy.map { |u| JSON.parse(URI.open(u).read) }
|
2019-09-04 10:07:40 +03:00
|
|
|
* .select { |data| data.key?('stats') }
|
|
|
|
* .first(5)
|
|
|
|
*
|
|
|
|
* Ending a chain with ".eager" generates a non-lazy enumerator, which
|
|
|
|
* is suitable for returning or passing to another method that expects
|
|
|
|
* a normal enumerator.
|
|
|
|
*
|
|
|
|
* def active_items
|
|
|
|
* groups
|
|
|
|
* .lazy
|
|
|
|
* .flat_map(&:items)
|
|
|
|
* .reject(&:disabled)
|
|
|
|
* .eager
|
|
|
|
* end
|
|
|
|
*
|
|
|
|
* # This works lazily; if a checked item is found, it stops
|
|
|
|
* # iteration and does not look into remaining groups.
|
|
|
|
* first_checked = active_items.find(&:checked)
|
|
|
|
*
|
|
|
|
* # This returns an array of items like a normal enumerator does.
|
|
|
|
* all_checked = active_items.select(&:checked)
|
2019-03-21 02:32:11 +03:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2013-02-05 07:49:41 +04:00
|
|
|
/*
|
|
|
|
* call-seq:
|
2019-12-22 21:46:24 +03:00
|
|
|
* Lazy.new(obj, size=nil) { |yielder, *values| block }
|
2013-02-05 07:49:41 +04:00
|
|
|
*
|
|
|
|
* Creates a new Lazy enumerator. When the enumerator is actually enumerated
|
|
|
|
* (e.g. by calling #force), +obj+ will be enumerated and each value passed
|
|
|
|
* to the given block. The block can yield values back using +yielder+.
|
2019-03-21 02:32:11 +03:00
|
|
|
* For example, to create a "filter+map" enumerator:
|
2013-02-05 07:49:41 +04:00
|
|
|
*
|
2019-03-21 02:32:11 +03:00
|
|
|
* def filter_map(sequence)
|
|
|
|
* Lazy.new(sequence) do |yielder, *values|
|
|
|
|
* result = yield *values
|
|
|
|
* yielder << result if result
|
2013-02-05 07:49:41 +04:00
|
|
|
* end
|
|
|
|
* end
|
|
|
|
*
|
2019-03-21 02:32:11 +03:00
|
|
|
* filter_map(1..Float::INFINITY) {|i| i*i if i.even?}.first(5)
|
|
|
|
* #=> [4, 16, 36, 64, 100]
|
2013-02-05 07:49:41 +04:00
|
|
|
*/
|
2012-03-08 19:30:28 +04:00
|
|
|
static VALUE
|
2012-03-15 06:00:30 +04:00
|
|
|
lazy_initialize(int argc, VALUE *argv, VALUE self)
|
2012-03-08 19:30:28 +04:00
|
|
|
{
|
2013-02-05 07:49:41 +04:00
|
|
|
VALUE obj, size = Qnil;
|
2012-03-08 19:35:05 +04:00
|
|
|
VALUE generator;
|
2012-03-08 19:30:28 +04:00
|
|
|
|
2013-02-05 07:49:41 +04:00
|
|
|
rb_check_arity(argc, 1, 2);
|
|
|
|
if (!rb_block_given_p()) {
|
|
|
|
rb_raise(rb_eArgError, "tried to call lazy new without a block");
|
2012-03-15 06:00:30 +04:00
|
|
|
}
|
2013-02-05 07:49:41 +04:00
|
|
|
obj = argv[0];
|
|
|
|
if (argc > 1) {
|
|
|
|
size = argv[1];
|
2012-03-15 06:00:30 +04:00
|
|
|
}
|
2012-03-08 19:30:28 +04:00
|
|
|
generator = generator_allocate(rb_cGenerator);
|
2013-02-05 07:49:41 +04:00
|
|
|
rb_block_call(generator, id_initialize, 0, 0, lazy_init_block_i, obj);
|
2019-09-06 23:33:19 +03:00
|
|
|
enumerator_init(self, generator, sym_each, 0, 0, 0, size, 0);
|
2012-04-03 05:24:51 +04:00
|
|
|
rb_ivar_set(self, id_receiver, obj);
|
2012-03-08 19:30:28 +04:00
|
|
|
|
2012-03-08 19:35:05 +04:00
|
|
|
return self;
|
2012-03-08 19:30:28 +04:00
|
|
|
}
|
|
|
|
|
2018-09-05 23:40:49 +03:00
|
|
|
#if 0 /* for RDoc */
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.to_a -> array
|
|
|
|
* lazy.force -> array
|
|
|
|
*
|
|
|
|
* Expands +lazy+ enumerator to an array.
|
|
|
|
* See Enumerable#to_a.
|
|
|
|
*/
|
2022-08-06 04:13:20 +03:00
|
|
|
static VALUE
|
|
|
|
lazy_to_a(VALUE self)
|
2018-09-05 23:40:49 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static void
|
|
|
|
lazy_set_args(VALUE lazy, VALUE args)
|
2012-03-26 14:52:02 +04:00
|
|
|
{
|
|
|
|
ID id = rb_frame_this_func();
|
2012-04-03 05:24:51 +04:00
|
|
|
rb_ivar_set(lazy, id_method, ID2SYM(id));
|
2012-03-26 14:52:02 +04:00
|
|
|
if (NIL_P(args)) {
|
|
|
|
/* Qfalse indicates that the arguments are empty */
|
2012-04-03 05:24:51 +04:00
|
|
|
rb_ivar_set(lazy, id_arguments, Qfalse);
|
2012-03-26 14:52:02 +04:00
|
|
|
}
|
|
|
|
else {
|
2012-04-03 05:24:51 +04:00
|
|
|
rb_ivar_set(lazy, id_arguments, args);
|
2012-03-26 14:52:02 +04:00
|
|
|
}
|
2016-09-19 04:36:56 +03:00
|
|
|
}
|
|
|
|
|
2020-07-22 03:52:50 +03:00
|
|
|
#if 0
|
2016-09-19 04:36:56 +03:00
|
|
|
static VALUE
|
|
|
|
lazy_set_method(VALUE lazy, VALUE args, rb_enumerator_size_func *size_fn)
|
|
|
|
{
|
|
|
|
struct enumerator *e = enumerator_ptr(lazy);
|
|
|
|
lazy_set_args(lazy, args);
|
2012-11-06 21:16:13 +04:00
|
|
|
e->size_fn = size_fn;
|
2012-03-26 14:52:02 +04:00
|
|
|
return lazy;
|
|
|
|
}
|
2020-07-22 03:52:50 +03:00
|
|
|
#endif
|
2012-03-24 19:17:31 +04:00
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static VALUE
|
|
|
|
lazy_add_method(VALUE obj, int argc, VALUE *argv, VALUE args, VALUE memo,
|
|
|
|
const lazyenum_funcs *fn)
|
|
|
|
{
|
|
|
|
struct enumerator *new_e;
|
|
|
|
VALUE new_obj;
|
|
|
|
VALUE new_generator;
|
|
|
|
VALUE new_procs;
|
|
|
|
struct enumerator *e = enumerator_ptr(obj);
|
|
|
|
struct proc_entry *entry;
|
|
|
|
VALUE entry_obj = TypedData_Make_Struct(rb_cObject, struct proc_entry,
|
|
|
|
&proc_entry_data_type, entry);
|
|
|
|
if (rb_block_given_p()) {
|
2023-11-22 19:11:08 +03:00
|
|
|
RB_OBJ_WRITE(entry_obj, &entry->proc, rb_block_proc());
|
2016-09-19 04:36:56 +03:00
|
|
|
}
|
|
|
|
entry->fn = fn;
|
2023-11-22 19:11:08 +03:00
|
|
|
RB_OBJ_WRITE(entry_obj, &entry->memo, args);
|
2016-09-19 04:36:56 +03:00
|
|
|
|
|
|
|
lazy_set_args(entry_obj, memo);
|
|
|
|
|
|
|
|
new_procs = RTEST(e->procs) ? rb_ary_dup(e->procs) : rb_ary_new();
|
|
|
|
new_generator = lazy_generator_init(obj, new_procs);
|
|
|
|
rb_ary_push(new_procs, entry_obj);
|
|
|
|
|
|
|
|
new_obj = enumerator_init_copy(enumerator_allocate(rb_cLazy), obj);
|
2023-11-20 19:28:36 +03:00
|
|
|
new_e = RTYPEDDATA_GET_DATA(new_obj);
|
2023-11-22 18:00:01 +03:00
|
|
|
RB_OBJ_WRITE(new_obj, &new_e->obj, new_generator);
|
|
|
|
RB_OBJ_WRITE(new_obj, &new_e->procs, new_procs);
|
2016-09-19 04:36:56 +03:00
|
|
|
|
|
|
|
if (argc > 0) {
|
|
|
|
new_e->meth = rb_to_id(*argv++);
|
|
|
|
--argc;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
new_e->meth = id_each;
|
|
|
|
}
|
2023-11-22 18:00:01 +03:00
|
|
|
|
|
|
|
RB_OBJ_WRITE(new_obj, &new_e->args, rb_ary_new4(argc, argv));
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
return new_obj;
|
|
|
|
}
|
|
|
|
|
2012-03-08 19:30:28 +04:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* e.lazy -> lazy_enumerator
|
2012-03-12 19:12:07 +04:00
|
|
|
*
|
2019-05-18 14:04:04 +03:00
|
|
|
* Returns an Enumerator::Lazy, which redefines most Enumerable
|
2019-03-21 02:32:11 +03:00
|
|
|
* methods to postpone enumeration and enumerate values only on an
|
|
|
|
* as-needed basis.
|
2012-03-12 19:12:07 +04:00
|
|
|
*
|
|
|
|
* === Example
|
|
|
|
*
|
2012-03-14 20:00:30 +04:00
|
|
|
* The following program finds pythagorean triples:
|
2012-03-12 19:12:07 +04:00
|
|
|
*
|
|
|
|
* def pythagorean_triples
|
|
|
|
* (1..Float::INFINITY).lazy.flat_map {|z|
|
|
|
|
* (1..z).flat_map {|x|
|
|
|
|
* (x..z).select {|y|
|
|
|
|
* x**2 + y**2 == z**2
|
|
|
|
* }.map {|y|
|
|
|
|
* [x, y, z]
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
* end
|
2012-03-14 20:00:30 +04:00
|
|
|
* # show first ten pythagorean triples
|
2012-03-19 10:41:02 +04:00
|
|
|
* p pythagorean_triples.take(10).force # take is lazy, so force is needed
|
|
|
|
* p pythagorean_triples.first(10) # first is eager
|
2012-03-14 20:00:30 +04:00
|
|
|
* # show pythagorean triples less than 100
|
|
|
|
* p pythagorean_triples.take_while { |*, z| z < 100 }.force
|
2012-03-08 19:30:28 +04:00
|
|
|
*/
|
|
|
|
static VALUE
|
2012-03-08 19:35:05 +04:00
|
|
|
enumerable_lazy(VALUE obj)
|
2012-03-08 19:30:28 +04:00
|
|
|
{
|
2019-10-04 22:51:57 +03:00
|
|
|
VALUE result = lazy_to_enum_i(obj, sym_each, 0, 0, lazyenum_size, rb_keyword_given_p());
|
2012-03-24 19:17:31 +04:00
|
|
|
/* Qfalse indicates that the Enumerator::Lazy has no method name */
|
2012-04-03 05:24:51 +04:00
|
|
|
rb_ivar_set(result, id_method, Qfalse);
|
2012-03-24 19:17:31 +04:00
|
|
|
return result;
|
2012-03-08 19:30:28 +04:00
|
|
|
}
|
|
|
|
|
2013-02-05 07:49:41 +04:00
|
|
|
static VALUE
|
2019-09-30 07:33:59 +03:00
|
|
|
lazy_to_enum_i(VALUE obj, VALUE meth, int argc, const VALUE *argv, rb_enumerator_size_func *size_fn, int kw_splat)
|
2013-02-05 07:49:41 +04:00
|
|
|
{
|
|
|
|
return enumerator_init(enumerator_allocate(rb_cLazy),
|
2019-09-30 07:33:59 +03:00
|
|
|
obj, meth, argc, argv, size_fn, Qnil, kw_splat);
|
2013-02-05 07:49:41 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
2019-12-22 21:46:24 +03:00
|
|
|
* lzy.to_enum(method = :each, *args) -> lazy_enum
|
|
|
|
* lzy.enum_for(method = :each, *args) -> lazy_enum
|
|
|
|
* lzy.to_enum(method = :each, *args) {|*args| block } -> lazy_enum
|
|
|
|
* lzy.enum_for(method = :each, *args) {|*args| block } -> lazy_enum
|
2013-02-05 07:49:41 +04:00
|
|
|
*
|
2019-03-21 02:32:11 +03:00
|
|
|
* Similar to Object#to_enum, except it returns a lazy enumerator.
|
2013-02-05 07:49:41 +04:00
|
|
|
* This makes it easy to define Enumerable methods that will
|
|
|
|
* naturally remain lazy if called from a lazy enumerator.
|
|
|
|
*
|
2019-03-21 02:32:11 +03:00
|
|
|
* For example, continuing from the example in Object#to_enum:
|
2013-02-05 07:49:41 +04:00
|
|
|
*
|
2019-08-25 00:05:19 +03:00
|
|
|
* # See Object#to_enum for the definition of repeat
|
2013-02-05 07:49:41 +04:00
|
|
|
* r = 1..Float::INFINITY
|
|
|
|
* r.repeat(2).first(5) # => [1, 1, 2, 2, 3]
|
|
|
|
* r.repeat(2).class # => Enumerator
|
|
|
|
* r.repeat(2).map{|n| n ** 2}.first(5) # => endless loop!
|
|
|
|
* # works naturally on lazy enumerator:
|
|
|
|
* r.lazy.repeat(2).class # => Enumerator::Lazy
|
|
|
|
* r.lazy.repeat(2).map{|n| n ** 2}.first(5) # => [1, 1, 4, 4, 9]
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
lazy_to_enum(int argc, VALUE *argv, VALUE self)
|
|
|
|
{
|
Fix Enumerator::Lazy#{to_enum,enum_for} where method is defined in Lazy
Previously, passing to_enum/enum_for a method that was defined in
Lazy itself returned wrong results:
[1,2,3].to_enum(:map).to_a
# => [1, 2, 3]
[1,2,3].lazy.to_enum(:map).to_a
# => []
I'm not sure why methods that are designed to be lazy do not work
with to_enum/enum_for. However, one possible way to work around
this bug is to have to_enum/enum_for use the implementation found
in Enumerable/Enumerator, which is what this commit does.
While this commit works around the problem, it is a band-aid, not a
real fix. It doesn't handle aliases of Enumerable::Lazy methods,
for instance. A better fix would be appreciated.
2019-09-02 23:22:26 +03:00
|
|
|
VALUE lazy, meth = sym_each, super_meth;
|
2013-02-05 07:49:41 +04:00
|
|
|
|
|
|
|
if (argc > 0) {
|
|
|
|
--argc;
|
|
|
|
meth = *argv++;
|
|
|
|
}
|
Fix Enumerator::Lazy#{to_enum,enum_for} where method is defined in Lazy
Previously, passing to_enum/enum_for a method that was defined in
Lazy itself returned wrong results:
[1,2,3].to_enum(:map).to_a
# => [1, 2, 3]
[1,2,3].lazy.to_enum(:map).to_a
# => []
I'm not sure why methods that are designed to be lazy do not work
with to_enum/enum_for. However, one possible way to work around
this bug is to have to_enum/enum_for use the implementation found
in Enumerable/Enumerator, which is what this commit does.
While this commit works around the problem, it is a band-aid, not a
real fix. It doesn't handle aliases of Enumerable::Lazy methods,
for instance. A better fix would be appreciated.
2019-09-02 23:22:26 +03:00
|
|
|
if (RTEST((super_meth = rb_hash_aref(lazy_use_super_method, meth)))) {
|
|
|
|
meth = super_meth;
|
|
|
|
}
|
2019-10-04 22:51:57 +03:00
|
|
|
lazy = lazy_to_enum_i(self, meth, argc, argv, 0, rb_keyword_given_p());
|
2013-02-05 07:49:41 +04:00
|
|
|
if (rb_block_given_p()) {
|
2023-11-22 18:00:01 +03:00
|
|
|
RB_OBJ_WRITE(lazy, &enumerator_ptr(lazy)->size, rb_block_proc());
|
2013-02-05 07:49:41 +04:00
|
|
|
}
|
|
|
|
return lazy;
|
|
|
|
}
|
|
|
|
|
2019-06-05 14:39:21 +03:00
|
|
|
static VALUE
|
|
|
|
lazy_eager_size(VALUE self, VALUE args, VALUE eobj)
|
|
|
|
{
|
|
|
|
return enum_size(self);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lzy.eager -> enum
|
|
|
|
*
|
|
|
|
* Returns a non-lazy Enumerator converted from the lazy enumerator.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
lazy_eager(VALUE self)
|
|
|
|
{
|
|
|
|
return enumerator_init(enumerator_allocate(rb_cEnumerator),
|
2019-09-06 23:33:19 +03:00
|
|
|
self, sym_each, 0, 0, lazy_eager_size, Qnil, 0);
|
2019-06-05 14:39:21 +03:00
|
|
|
}
|
|
|
|
|
2012-03-08 19:30:28 +04:00
|
|
|
static VALUE
|
2016-09-19 04:36:56 +03:00
|
|
|
lazyenum_yield(VALUE proc_entry, struct MEMO *result)
|
2012-03-08 19:30:28 +04:00
|
|
|
{
|
2016-09-19 04:36:56 +03:00
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc_entry);
|
|
|
|
return rb_proc_call_with_block(entry->proc, 1, &result->memo_value, Qnil);
|
|
|
|
}
|
2012-03-08 19:30:28 +04:00
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static VALUE
|
|
|
|
lazyenum_yield_values(VALUE proc_entry, struct MEMO *result)
|
|
|
|
{
|
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc_entry);
|
|
|
|
int argc = 1;
|
|
|
|
const VALUE *argv = &result->memo_value;
|
|
|
|
if (LAZY_MEMO_PACKED_P(result)) {
|
|
|
|
const VALUE args = *argv;
|
|
|
|
argc = RARRAY_LENINT(args);
|
|
|
|
argv = RARRAY_CONST_PTR(args);
|
|
|
|
}
|
|
|
|
return rb_proc_call_with_block(entry->proc, argc, argv, Qnil);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct MEMO *
|
|
|
|
lazy_map_proc(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
|
|
|
{
|
|
|
|
VALUE value = lazyenum_yield_values(proc_entry, result);
|
|
|
|
LAZY_MEMO_SET_VALUE(result, value);
|
2017-06-10 13:26:32 +03:00
|
|
|
LAZY_MEMO_RESET_PACKED(result);
|
2016-09-19 04:36:56 +03:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
lazy_map_size(VALUE entry, VALUE receiver)
|
|
|
|
{
|
|
|
|
return receiver;
|
2012-03-08 19:30:28 +04:00
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static const lazyenum_funcs lazy_map_funcs = {
|
|
|
|
lazy_map_proc, lazy_map_size,
|
|
|
|
};
|
|
|
|
|
2019-03-21 02:32:11 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.collect { |obj| block } -> lazy_enumerator
|
|
|
|
* lazy.map { |obj| block } -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#map, but chains operation to be lazy-evaluated.
|
|
|
|
*
|
|
|
|
* (1..Float::INFINITY).lazy.map {|i| i**2 }
|
|
|
|
* #=> #<Enumerator::Lazy: #<Enumerator::Lazy: 1..Infinity>:map>
|
|
|
|
* (1..Float::INFINITY).lazy.map {|i| i**2 }.first(3)
|
|
|
|
* #=> [1, 4, 9]
|
|
|
|
*/
|
|
|
|
|
2012-03-08 19:30:28 +04:00
|
|
|
static VALUE
|
|
|
|
lazy_map(VALUE obj)
|
|
|
|
{
|
|
|
|
if (!rb_block_given_p()) {
|
|
|
|
rb_raise(rb_eArgError, "tried to call lazy map without a block");
|
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
return lazy_add_method(obj, 0, 0, Qnil, Qnil, &lazy_map_funcs);
|
2012-03-08 19:30:28 +04:00
|
|
|
}
|
|
|
|
|
2020-07-21 18:58:48 +03:00
|
|
|
struct flat_map_i_arg {
|
|
|
|
struct MEMO *result;
|
|
|
|
long index;
|
|
|
|
};
|
|
|
|
|
2012-03-14 03:16:37 +04:00
|
|
|
static VALUE
|
2020-07-21 18:58:48 +03:00
|
|
|
lazy_flat_map_i(RB_BLOCK_CALL_FUNC_ARGLIST(i, y))
|
2012-03-14 03:16:37 +04:00
|
|
|
{
|
2020-07-21 18:58:48 +03:00
|
|
|
struct flat_map_i_arg *arg = (struct flat_map_i_arg *)y;
|
2018-09-18 11:49:40 +03:00
|
|
|
|
2020-07-21 18:58:48 +03:00
|
|
|
return lazy_yielder_yield(arg->result, arg->index, argc, argv);
|
2012-03-14 03:16:37 +04:00
|
|
|
}
|
|
|
|
|
2020-07-21 18:58:48 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_flat_map_proc(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
2012-03-19 12:22:29 +04:00
|
|
|
{
|
2020-07-21 18:58:48 +03:00
|
|
|
VALUE value = lazyenum_yield_values(proc_entry, result);
|
|
|
|
VALUE ary = 0;
|
|
|
|
const long proc_index = memo_index + 1;
|
|
|
|
int break_p = LAZY_MEMO_BREAK_P(result);
|
2012-03-19 12:22:29 +04:00
|
|
|
|
2020-07-21 18:58:48 +03:00
|
|
|
if (RB_TYPE_P(value, T_ARRAY)) {
|
|
|
|
ary = value;
|
2012-03-19 12:22:29 +04:00
|
|
|
}
|
2020-07-21 18:58:48 +03:00
|
|
|
else if (rb_respond_to(value, id_force) && rb_respond_to(value, id_each)) {
|
|
|
|
struct flat_map_i_arg arg = {.result = result, .index = proc_index};
|
|
|
|
LAZY_MEMO_RESET_BREAK(result);
|
|
|
|
rb_block_call(value, id_each, 0, 0, lazy_flat_map_i, (VALUE)&arg);
|
|
|
|
if (break_p) LAZY_MEMO_SET_BREAK(result);
|
|
|
|
return 0;
|
2012-03-19 12:22:29 +04:00
|
|
|
}
|
|
|
|
|
2020-07-21 18:58:48 +03:00
|
|
|
if (ary || !NIL_P(ary = rb_check_array_type(value))) {
|
|
|
|
long i;
|
|
|
|
LAZY_MEMO_RESET_BREAK(result);
|
|
|
|
for (i = 0; i + 1 < RARRAY_LEN(ary); i++) {
|
2020-08-14 08:45:23 +03:00
|
|
|
const VALUE argv = RARRAY_AREF(ary, i);
|
|
|
|
lazy_yielder_yield(result, proc_index, 1, &argv);
|
2020-07-21 18:58:48 +03:00
|
|
|
}
|
|
|
|
if (break_p) LAZY_MEMO_SET_BREAK(result);
|
|
|
|
if (i >= RARRAY_LEN(ary)) return 0;
|
|
|
|
value = RARRAY_AREF(ary, i);
|
2012-03-14 03:16:37 +04:00
|
|
|
}
|
2020-07-21 18:58:48 +03:00
|
|
|
LAZY_MEMO_SET_VALUE(result, value);
|
|
|
|
LAZY_MEMO_RESET_PACKED(result);
|
|
|
|
return result;
|
2012-03-09 09:34:41 +04:00
|
|
|
}
|
|
|
|
|
2020-07-21 18:58:48 +03:00
|
|
|
static const lazyenum_funcs lazy_flat_map_funcs = {
|
|
|
|
lazy_flat_map_proc, 0,
|
|
|
|
};
|
|
|
|
|
2013-01-14 12:36:15 +04:00
|
|
|
/*
|
|
|
|
* call-seq:
|
2013-04-12 06:59:07 +04:00
|
|
|
* lazy.collect_concat { |obj| block } -> a_lazy_enumerator
|
2013-01-14 12:36:15 +04:00
|
|
|
* lazy.flat_map { |obj| block } -> a_lazy_enumerator
|
|
|
|
*
|
|
|
|
* Returns a new lazy enumerator with the concatenated results of running
|
2019-12-22 21:46:24 +03:00
|
|
|
* +block+ once for every element in the lazy enumerator.
|
2013-01-14 12:36:15 +04:00
|
|
|
*
|
|
|
|
* ["foo", "bar"].lazy.flat_map {|i| i.each_char.lazy}.force
|
|
|
|
* #=> ["f", "o", "o", "b", "a", "r"]
|
|
|
|
*
|
2019-12-22 21:46:24 +03:00
|
|
|
* A value +x+ returned by +block+ is decomposed if either of
|
2013-01-14 12:36:15 +04:00
|
|
|
* the following conditions is true:
|
|
|
|
*
|
2019-12-22 21:46:24 +03:00
|
|
|
* * +x+ responds to both each and force, which means that
|
|
|
|
* +x+ is a lazy enumerator.
|
|
|
|
* * +x+ is an array or responds to to_ary.
|
2013-01-14 12:36:15 +04:00
|
|
|
*
|
2019-12-22 21:46:24 +03:00
|
|
|
* Otherwise, +x+ is contained as-is in the return value.
|
2013-01-14 12:36:15 +04:00
|
|
|
*
|
|
|
|
* [{a:1}, {b:2}].lazy.flat_map {|i| i}.force
|
|
|
|
* #=> [{:a=>1}, {:b=>2}]
|
|
|
|
*/
|
2012-03-09 09:34:41 +04:00
|
|
|
static VALUE
|
|
|
|
lazy_flat_map(VALUE obj)
|
|
|
|
{
|
|
|
|
if (!rb_block_given_p()) {
|
|
|
|
rb_raise(rb_eArgError, "tried to call lazy flat_map without a block");
|
|
|
|
}
|
|
|
|
|
2020-07-21 18:58:48 +03:00
|
|
|
return lazy_add_method(obj, 0, 0, Qnil, Qnil, &lazy_flat_map_funcs);
|
2012-03-09 09:34:41 +04:00
|
|
|
}
|
2012-03-08 19:30:28 +04:00
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_select_proc(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
2012-03-08 19:30:28 +04:00
|
|
|
{
|
2016-09-19 04:36:56 +03:00
|
|
|
VALUE chain = lazyenum_yield(proc_entry, result);
|
|
|
|
if (!RTEST(chain)) return 0;
|
|
|
|
return result;
|
2012-03-08 19:30:28 +04:00
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static const lazyenum_funcs lazy_select_funcs = {
|
|
|
|
lazy_select_proc, 0,
|
|
|
|
};
|
|
|
|
|
2019-03-21 02:32:11 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.find_all { |obj| block } -> lazy_enumerator
|
|
|
|
* lazy.select { |obj| block } -> lazy_enumerator
|
|
|
|
* lazy.filter { |obj| block } -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#select, but chains operation to be lazy-evaluated.
|
|
|
|
*/
|
2012-03-08 19:30:28 +04:00
|
|
|
static VALUE
|
|
|
|
lazy_select(VALUE obj)
|
|
|
|
{
|
|
|
|
if (!rb_block_given_p()) {
|
|
|
|
rb_raise(rb_eArgError, "tried to call lazy select without a block");
|
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
return lazy_add_method(obj, 0, 0, Qnil, Qnil, &lazy_select_funcs);
|
2012-03-08 19:30:28 +04:00
|
|
|
}
|
|
|
|
|
2019-06-21 11:27:20 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_filter_map_proc(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
|
|
|
{
|
|
|
|
VALUE value = lazyenum_yield_values(proc_entry, result);
|
|
|
|
if (!RTEST(value)) return 0;
|
|
|
|
LAZY_MEMO_SET_VALUE(result, value);
|
|
|
|
LAZY_MEMO_RESET_PACKED(result);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const lazyenum_funcs lazy_filter_map_funcs = {
|
|
|
|
lazy_filter_map_proc, 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.filter_map { |obj| block } -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#filter_map, but chains operation to be lazy-evaluated.
|
2019-06-21 11:53:32 +03:00
|
|
|
*
|
2019-12-22 21:46:24 +03:00
|
|
|
* (1..).lazy.filter_map { |i| i * 2 if i.even? }.first(5)
|
|
|
|
* #=> [4, 8, 12, 16, 20]
|
2019-06-21 11:27:20 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
lazy_filter_map(VALUE obj)
|
|
|
|
{
|
|
|
|
if (!rb_block_given_p()) {
|
2019-06-21 11:43:21 +03:00
|
|
|
rb_raise(rb_eArgError, "tried to call lazy filter_map without a block");
|
2019-06-21 11:27:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return lazy_add_method(obj, 0, 0, Qnil, Qnil, &lazy_filter_map_funcs);
|
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_reject_proc(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
2012-03-08 19:30:28 +04:00
|
|
|
{
|
2016-09-19 04:36:56 +03:00
|
|
|
VALUE chain = lazyenum_yield(proc_entry, result);
|
|
|
|
if (RTEST(chain)) return 0;
|
|
|
|
return result;
|
2012-03-08 19:30:28 +04:00
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static const lazyenum_funcs lazy_reject_funcs = {
|
|
|
|
lazy_reject_proc, 0,
|
|
|
|
};
|
|
|
|
|
2019-03-21 02:32:11 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.reject { |obj| block } -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#reject, but chains operation to be lazy-evaluated.
|
|
|
|
*/
|
|
|
|
|
2012-03-08 19:30:28 +04:00
|
|
|
static VALUE
|
|
|
|
lazy_reject(VALUE obj)
|
|
|
|
{
|
|
|
|
if (!rb_block_given_p()) {
|
|
|
|
rb_raise(rb_eArgError, "tried to call lazy reject without a block");
|
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
return lazy_add_method(obj, 0, 0, Qnil, Qnil, &lazy_reject_funcs);
|
2012-03-08 19:30:28 +04:00
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_grep_proc(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
2012-03-08 19:30:28 +04:00
|
|
|
{
|
2016-09-19 04:36:56 +03:00
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc_entry);
|
|
|
|
VALUE chain = rb_funcall(entry->memo, id_eqq, 1, result->memo_value);
|
|
|
|
if (!RTEST(chain)) return 0;
|
|
|
|
return result;
|
2012-03-15 18:20:27 +04:00
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_grep_iter_proc(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
2012-03-15 18:20:27 +04:00
|
|
|
{
|
2016-09-19 04:36:56 +03:00
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc_entry);
|
|
|
|
VALUE value, chain = rb_funcall(entry->memo, id_eqq, 1, result->memo_value);
|
2012-03-15 18:20:27 +04:00
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
if (!RTEST(chain)) return 0;
|
|
|
|
value = rb_proc_call_with_block(entry->proc, 1, &(result->memo_value), Qnil);
|
|
|
|
LAZY_MEMO_SET_VALUE(result, value);
|
2017-06-10 13:26:32 +03:00
|
|
|
LAZY_MEMO_RESET_PACKED(result);
|
2016-09-19 04:36:56 +03:00
|
|
|
|
|
|
|
return result;
|
2012-03-08 19:30:28 +04:00
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static const lazyenum_funcs lazy_grep_iter_funcs = {
|
|
|
|
lazy_grep_iter_proc, 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const lazyenum_funcs lazy_grep_funcs = {
|
|
|
|
lazy_grep_proc, 0,
|
|
|
|
};
|
|
|
|
|
2019-03-21 02:32:11 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.grep(pattern) -> lazy_enumerator
|
|
|
|
* lazy.grep(pattern) { |obj| block } -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#grep, but chains operation to be lazy-evaluated.
|
|
|
|
*/
|
|
|
|
|
2012-03-08 19:30:28 +04:00
|
|
|
static VALUE
|
|
|
|
lazy_grep(VALUE obj, VALUE pattern)
|
|
|
|
{
|
2016-09-19 04:36:56 +03:00
|
|
|
const lazyenum_funcs *const funcs = rb_block_given_p() ?
|
|
|
|
&lazy_grep_iter_funcs : &lazy_grep_funcs;
|
|
|
|
return lazy_add_method(obj, 0, 0, pattern, rb_ary_new3(1, pattern), funcs);
|
2012-03-08 19:30:28 +04:00
|
|
|
}
|
|
|
|
|
2018-08-16 03:58:21 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_grep_v_proc(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
2015-12-08 10:23:43 +03:00
|
|
|
{
|
2018-08-16 03:58:21 +03:00
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc_entry);
|
|
|
|
VALUE chain = rb_funcall(entry->memo, id_eqq, 1, result->memo_value);
|
|
|
|
if (RTEST(chain)) return 0;
|
|
|
|
return result;
|
2015-12-08 10:23:43 +03:00
|
|
|
}
|
|
|
|
|
2018-08-16 03:58:21 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_grep_v_iter_proc(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
2015-12-08 10:23:43 +03:00
|
|
|
{
|
2018-08-16 03:58:21 +03:00
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc_entry);
|
|
|
|
VALUE value, chain = rb_funcall(entry->memo, id_eqq, 1, result->memo_value);
|
2015-12-08 10:23:43 +03:00
|
|
|
|
2018-08-16 03:58:21 +03:00
|
|
|
if (RTEST(chain)) return 0;
|
|
|
|
value = rb_proc_call_with_block(entry->proc, 1, &(result->memo_value), Qnil);
|
|
|
|
LAZY_MEMO_SET_VALUE(result, value);
|
|
|
|
LAZY_MEMO_RESET_PACKED(result);
|
|
|
|
|
|
|
|
return result;
|
2015-12-08 10:23:43 +03:00
|
|
|
}
|
|
|
|
|
2018-08-16 03:58:21 +03:00
|
|
|
static const lazyenum_funcs lazy_grep_v_iter_funcs = {
|
|
|
|
lazy_grep_v_iter_proc, 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const lazyenum_funcs lazy_grep_v_funcs = {
|
|
|
|
lazy_grep_v_proc, 0,
|
|
|
|
};
|
|
|
|
|
2019-03-21 02:32:11 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.grep_v(pattern) -> lazy_enumerator
|
|
|
|
* lazy.grep_v(pattern) { |obj| block } -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#grep_v, but chains operation to be lazy-evaluated.
|
|
|
|
*/
|
|
|
|
|
2015-12-08 10:23:43 +03:00
|
|
|
static VALUE
|
|
|
|
lazy_grep_v(VALUE obj, VALUE pattern)
|
|
|
|
{
|
2018-08-16 03:58:21 +03:00
|
|
|
const lazyenum_funcs *const funcs = rb_block_given_p() ?
|
|
|
|
&lazy_grep_v_iter_funcs : &lazy_grep_v_funcs;
|
|
|
|
return lazy_add_method(obj, 0, 0, pattern, rb_ary_new3(1, pattern), funcs);
|
2015-12-08 10:23:43 +03:00
|
|
|
}
|
|
|
|
|
2012-03-15 14:14:22 +04:00
|
|
|
static VALUE
|
|
|
|
call_next(VALUE obj)
|
|
|
|
{
|
|
|
|
return rb_funcall(obj, id_next, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2019-08-26 08:51:00 +03:00
|
|
|
next_stopped(VALUE obj, VALUE _)
|
2012-03-15 14:14:22 +04:00
|
|
|
{
|
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
|
2020-07-22 03:52:50 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_zip_arrays_func(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
2013-01-24 11:05:42 +04:00
|
|
|
{
|
2020-07-22 03:52:50 +03:00
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc_entry);
|
|
|
|
VALUE ary, arrays = entry->memo;
|
|
|
|
VALUE memo = rb_ary_entry(memos, memo_index);
|
|
|
|
long i, count = NIL_P(memo) ? 0 : NUM2LONG(memo);
|
2013-01-24 11:05:42 +04:00
|
|
|
|
|
|
|
ary = rb_ary_new2(RARRAY_LEN(arrays) + 1);
|
2020-07-22 03:52:50 +03:00
|
|
|
rb_ary_push(ary, result->memo_value);
|
2013-01-24 11:05:42 +04:00
|
|
|
for (i = 0; i < RARRAY_LEN(arrays); i++) {
|
2013-05-13 13:56:22 +04:00
|
|
|
rb_ary_push(ary, rb_ary_entry(RARRAY_AREF(arrays, i), count));
|
2013-01-24 11:05:42 +04:00
|
|
|
}
|
2020-07-22 03:52:50 +03:00
|
|
|
LAZY_MEMO_SET_VALUE(result, ary);
|
|
|
|
rb_ary_store(memos, memo_index, LONG2NUM(++count));
|
|
|
|
return result;
|
2013-01-24 11:05:42 +04:00
|
|
|
}
|
|
|
|
|
2020-07-22 03:52:50 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_zip_func(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
2012-03-14 03:08:15 +04:00
|
|
|
{
|
2020-07-22 03:52:50 +03:00
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc_entry);
|
|
|
|
VALUE arg = rb_ary_entry(memos, memo_index);
|
|
|
|
VALUE zip_args = entry->memo;
|
|
|
|
VALUE ary, v;
|
2012-03-14 13:41:44 +04:00
|
|
|
long i;
|
2012-03-14 03:08:15 +04:00
|
|
|
|
2013-01-24 10:24:22 +04:00
|
|
|
if (NIL_P(arg)) {
|
|
|
|
arg = rb_ary_new2(RARRAY_LEN(zip_args));
|
|
|
|
for (i = 0; i < RARRAY_LEN(zip_args); i++) {
|
2013-05-13 13:56:22 +04:00
|
|
|
rb_ary_push(arg, rb_funcall(RARRAY_AREF(zip_args, i), id_to_enum, 0));
|
2013-01-24 10:24:22 +04:00
|
|
|
}
|
2020-07-22 03:52:50 +03:00
|
|
|
rb_ary_store(memos, memo_index, arg);
|
2013-01-24 10:24:22 +04:00
|
|
|
}
|
|
|
|
|
2012-03-14 03:08:15 +04:00
|
|
|
ary = rb_ary_new2(RARRAY_LEN(arg) + 1);
|
2020-07-22 03:52:50 +03:00
|
|
|
rb_ary_push(ary, result->memo_value);
|
2012-03-14 03:08:15 +04:00
|
|
|
for (i = 0; i < RARRAY_LEN(arg); i++) {
|
2013-05-13 13:56:22 +04:00
|
|
|
v = rb_rescue2(call_next, RARRAY_AREF(arg, i), next_stopped, 0,
|
2012-03-16 01:30:12 +04:00
|
|
|
rb_eStopIteration, (VALUE)0);
|
2012-03-14 03:08:15 +04:00
|
|
|
rb_ary_push(ary, v);
|
|
|
|
}
|
2020-07-22 03:52:50 +03:00
|
|
|
LAZY_MEMO_SET_VALUE(result, ary);
|
|
|
|
return result;
|
2012-03-14 03:08:15 +04:00
|
|
|
}
|
|
|
|
|
2020-07-22 03:52:50 +03:00
|
|
|
static const lazyenum_funcs lazy_zip_funcs[] = {
|
|
|
|
{lazy_zip_func, lazy_receiver_size,},
|
|
|
|
{lazy_zip_arrays_func, lazy_receiver_size,},
|
|
|
|
};
|
|
|
|
|
2019-03-21 02:32:11 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.zip(arg, ...) -> lazy_enumerator
|
|
|
|
* lazy.zip(arg, ...) { |arr| block } -> nil
|
|
|
|
*
|
|
|
|
* Like Enumerable#zip, but chains operation to be lazy-evaluated.
|
|
|
|
* However, if a block is given to zip, values are enumerated immediately.
|
|
|
|
*/
|
2012-03-14 03:08:15 +04:00
|
|
|
static VALUE
|
|
|
|
lazy_zip(int argc, VALUE *argv, VALUE obj)
|
|
|
|
{
|
2013-01-24 11:05:42 +04:00
|
|
|
VALUE ary, v;
|
|
|
|
long i;
|
2020-07-22 03:52:50 +03:00
|
|
|
const lazyenum_funcs *funcs = &lazy_zip_funcs[1];
|
2012-03-14 03:08:15 +04:00
|
|
|
|
2012-03-15 13:25:03 +04:00
|
|
|
if (rb_block_given_p()) {
|
|
|
|
return rb_call_super(argc, argv);
|
|
|
|
}
|
2013-01-24 11:05:42 +04:00
|
|
|
|
|
|
|
ary = rb_ary_new2(argc);
|
|
|
|
for (i = 0; i < argc; i++) {
|
|
|
|
v = rb_check_array_type(argv[i]);
|
|
|
|
if (NIL_P(v)) {
|
2013-01-24 11:50:33 +04:00
|
|
|
for (; i < argc; i++) {
|
|
|
|
if (!rb_respond_to(argv[i], id_each)) {
|
2015-09-28 05:40:46 +03:00
|
|
|
rb_raise(rb_eTypeError, "wrong argument type %"PRIsVALUE" (must respond to :each)",
|
|
|
|
rb_obj_class(argv[i]));
|
2013-01-24 11:50:33 +04:00
|
|
|
}
|
|
|
|
}
|
2013-01-24 11:05:42 +04:00
|
|
|
ary = rb_ary_new4(argc, argv);
|
2020-07-22 03:52:50 +03:00
|
|
|
funcs = &lazy_zip_funcs[0];
|
2013-01-24 11:05:42 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
rb_ary_push(ary, v);
|
|
|
|
}
|
2012-03-14 03:08:15 +04:00
|
|
|
|
2020-07-22 03:52:50 +03:00
|
|
|
return lazy_add_method(obj, 0, 0, ary, ary, funcs);
|
2012-03-14 03:08:15 +04:00
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_take_proc(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
2012-03-14 14:29:25 +04:00
|
|
|
{
|
2013-01-24 10:22:34 +04:00
|
|
|
long remain;
|
2016-09-19 04:36:56 +03:00
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc_entry);
|
|
|
|
VALUE memo = rb_ary_entry(memos, memo_index);
|
|
|
|
|
2013-01-24 10:22:34 +04:00
|
|
|
if (NIL_P(memo)) {
|
2016-09-19 04:36:56 +03:00
|
|
|
memo = entry->memo;
|
2013-01-24 10:22:34 +04:00
|
|
|
}
|
2012-03-14 14:29:25 +04:00
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
remain = NUM2LONG(memo);
|
2022-08-25 07:47:47 +03:00
|
|
|
if (--remain == 0) LAZY_MEMO_SET_BREAK(result);
|
|
|
|
rb_ary_store(memos, memo_index, LONG2NUM(remain));
|
2016-09-19 04:36:56 +03:00
|
|
|
return result;
|
2012-03-14 14:29:25 +04:00
|
|
|
}
|
|
|
|
|
2012-11-06 21:16:29 +04:00
|
|
|
static VALUE
|
2016-09-19 04:36:56 +03:00
|
|
|
lazy_take_size(VALUE entry, VALUE receiver)
|
2012-11-07 02:50:30 +04:00
|
|
|
{
|
2016-09-19 04:36:56 +03:00
|
|
|
long len = NUM2LONG(RARRAY_AREF(rb_ivar_get(entry, id_arguments), 0));
|
2012-11-06 21:16:29 +04:00
|
|
|
if (NIL_P(receiver) || (FIXNUM_P(receiver) && FIX2LONG(receiver) < len))
|
|
|
|
return receiver;
|
|
|
|
return LONG2NUM(len);
|
|
|
|
}
|
|
|
|
|
2022-08-25 07:42:35 +03:00
|
|
|
static int
|
|
|
|
lazy_take_precheck(VALUE proc_entry)
|
|
|
|
{
|
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc_entry);
|
|
|
|
return entry->memo != INT2FIX(0);
|
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static const lazyenum_funcs lazy_take_funcs = {
|
2022-08-25 07:42:35 +03:00
|
|
|
lazy_take_proc, lazy_take_size, lazy_take_precheck,
|
2016-09-19 04:36:56 +03:00
|
|
|
};
|
|
|
|
|
2019-03-21 02:32:11 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.take(n) -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#take, but chains operation to be lazy-evaluated.
|
|
|
|
*/
|
|
|
|
|
2012-03-14 14:29:25 +04:00
|
|
|
static VALUE
|
|
|
|
lazy_take(VALUE obj, VALUE n)
|
|
|
|
{
|
|
|
|
long len = NUM2LONG(n);
|
|
|
|
|
|
|
|
if (len < 0) {
|
|
|
|
rb_raise(rb_eArgError, "attempt to take negative size");
|
|
|
|
}
|
2016-09-19 04:36:56 +03:00
|
|
|
|
2022-08-25 07:42:35 +03:00
|
|
|
n = LONG2NUM(len); /* no more conversion */
|
2016-09-19 04:36:56 +03:00
|
|
|
|
2022-08-25 07:42:35 +03:00
|
|
|
return lazy_add_method(obj, 0, 0, n, rb_ary_new3(1, n), &lazy_take_funcs);
|
2012-03-14 14:29:25 +04:00
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_take_while_proc(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
2012-03-14 17:04:18 +04:00
|
|
|
{
|
2016-09-19 04:36:56 +03:00
|
|
|
VALUE take = lazyenum_yield_values(proc_entry, result);
|
|
|
|
if (!RTEST(take)) {
|
|
|
|
LAZY_MEMO_SET_BREAK(result);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return result;
|
2012-03-14 17:04:18 +04:00
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static const lazyenum_funcs lazy_take_while_funcs = {
|
|
|
|
lazy_take_while_proc, 0,
|
|
|
|
};
|
|
|
|
|
2019-03-21 02:32:11 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.take_while { |obj| block } -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#take_while, but chains operation to be lazy-evaluated.
|
|
|
|
*/
|
|
|
|
|
2012-03-14 17:04:18 +04:00
|
|
|
static VALUE
|
|
|
|
lazy_take_while(VALUE obj)
|
|
|
|
{
|
2013-01-14 11:42:43 +04:00
|
|
|
if (!rb_block_given_p()) {
|
|
|
|
rb_raise(rb_eArgError, "tried to call lazy take_while without a block");
|
|
|
|
}
|
2016-09-19 04:36:56 +03:00
|
|
|
|
|
|
|
return lazy_add_method(obj, 0, 0, Qnil, Qnil, &lazy_take_while_funcs);
|
2012-03-14 17:04:18 +04:00
|
|
|
}
|
|
|
|
|
2012-11-06 21:16:44 +04:00
|
|
|
static VALUE
|
2016-09-19 04:36:56 +03:00
|
|
|
lazy_drop_size(VALUE proc_entry, VALUE receiver)
|
2012-11-07 02:50:30 +04:00
|
|
|
{
|
2016-09-19 04:36:56 +03:00
|
|
|
long len = NUM2LONG(RARRAY_AREF(rb_ivar_get(proc_entry, id_arguments), 0));
|
2012-11-06 21:16:44 +04:00
|
|
|
if (NIL_P(receiver))
|
|
|
|
return receiver;
|
|
|
|
if (FIXNUM_P(receiver)) {
|
|
|
|
len = FIX2LONG(receiver) - len;
|
|
|
|
return LONG2FIX(len < 0 ? 0 : len);
|
|
|
|
}
|
|
|
|
return rb_funcall(receiver, '-', 1, LONG2NUM(len));
|
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_drop_proc(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
2012-03-14 17:04:18 +04:00
|
|
|
{
|
2013-01-24 10:23:42 +04:00
|
|
|
long remain;
|
2016-09-19 04:36:56 +03:00
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc_entry);
|
|
|
|
VALUE memo = rb_ary_entry(memos, memo_index);
|
|
|
|
|
2013-01-24 10:23:42 +04:00
|
|
|
if (NIL_P(memo)) {
|
2016-09-19 04:36:56 +03:00
|
|
|
memo = entry->memo;
|
2012-03-14 17:04:18 +04:00
|
|
|
}
|
2016-09-19 04:36:56 +03:00
|
|
|
remain = NUM2LONG(memo);
|
|
|
|
if (remain > 0) {
|
|
|
|
--remain;
|
|
|
|
rb_ary_store(memos, memo_index, LONG2NUM(remain));
|
|
|
|
return 0;
|
2012-03-14 17:04:18 +04:00
|
|
|
}
|
2016-09-19 04:36:56 +03:00
|
|
|
|
|
|
|
return result;
|
2012-03-14 17:04:18 +04:00
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static const lazyenum_funcs lazy_drop_funcs = {
|
|
|
|
lazy_drop_proc, lazy_drop_size,
|
|
|
|
};
|
|
|
|
|
2019-03-21 02:32:11 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.drop(n) -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#drop, but chains operation to be lazy-evaluated.
|
|
|
|
*/
|
|
|
|
|
2012-03-14 17:04:18 +04:00
|
|
|
static VALUE
|
|
|
|
lazy_drop(VALUE obj, VALUE n)
|
|
|
|
{
|
|
|
|
long len = NUM2LONG(n);
|
2016-09-19 04:36:56 +03:00
|
|
|
VALUE argv[2];
|
2019-05-15 06:41:31 +03:00
|
|
|
argv[0] = sym_each;
|
2016-09-19 04:36:56 +03:00
|
|
|
argv[1] = n;
|
2012-03-14 17:04:18 +04:00
|
|
|
|
|
|
|
if (len < 0) {
|
|
|
|
rb_raise(rb_eArgError, "attempt to drop negative size");
|
|
|
|
}
|
2016-09-19 04:36:56 +03:00
|
|
|
|
|
|
|
return lazy_add_method(obj, 2, argv, n, rb_ary_new3(1, n), &lazy_drop_funcs);
|
2012-03-14 17:04:18 +04:00
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_drop_while_proc(VALUE proc_entry, struct MEMO* result, VALUE memos, long memo_index)
|
2012-03-14 17:04:18 +04:00
|
|
|
{
|
2016-09-19 04:36:56 +03:00
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc_entry);
|
|
|
|
VALUE memo = rb_ary_entry(memos, memo_index);
|
|
|
|
|
|
|
|
if (NIL_P(memo)) {
|
|
|
|
memo = entry->memo;
|
2012-03-14 17:04:18 +04:00
|
|
|
}
|
2016-09-19 04:36:56 +03:00
|
|
|
|
|
|
|
if (!RTEST(memo)) {
|
|
|
|
VALUE drop = lazyenum_yield_values(proc_entry, result);
|
|
|
|
if (RTEST(drop)) return 0;
|
|
|
|
rb_ary_store(memos, memo_index, Qtrue);
|
2012-03-14 17:04:18 +04:00
|
|
|
}
|
2016-09-19 04:36:56 +03:00
|
|
|
return result;
|
2012-03-14 17:04:18 +04:00
|
|
|
}
|
|
|
|
|
2016-09-19 04:36:56 +03:00
|
|
|
static const lazyenum_funcs lazy_drop_while_funcs = {
|
|
|
|
lazy_drop_while_proc, 0,
|
|
|
|
};
|
|
|
|
|
2019-03-21 02:32:11 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.drop_while { |obj| block } -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#drop_while, but chains operation to be lazy-evaluated.
|
|
|
|
*/
|
|
|
|
|
2012-03-14 17:04:18 +04:00
|
|
|
static VALUE
|
|
|
|
lazy_drop_while(VALUE obj)
|
|
|
|
{
|
2013-01-14 11:42:43 +04:00
|
|
|
if (!rb_block_given_p()) {
|
|
|
|
rb_raise(rb_eArgError, "tried to call lazy drop_while without a block");
|
|
|
|
}
|
2016-09-19 04:36:56 +03:00
|
|
|
|
|
|
|
return lazy_add_method(obj, 0, 0, Qfalse, Qnil, &lazy_drop_while_funcs);
|
2012-03-14 17:04:18 +04:00
|
|
|
}
|
|
|
|
|
2018-08-16 03:58:21 +03:00
|
|
|
static int
|
|
|
|
lazy_uniq_check(VALUE chain, VALUE memos, long memo_index)
|
2016-07-20 11:44:08 +03:00
|
|
|
{
|
2018-08-20 18:33:59 +03:00
|
|
|
VALUE hash = rb_ary_entry(memos, memo_index);
|
2018-03-13 04:00:08 +03:00
|
|
|
|
|
|
|
if (NIL_P(hash)) {
|
|
|
|
hash = rb_obj_hide(rb_hash_new());
|
2018-08-16 03:58:21 +03:00
|
|
|
rb_ary_store(memos, memo_index, hash);
|
2018-03-13 04:00:08 +03:00
|
|
|
}
|
|
|
|
|
2018-08-16 03:58:21 +03:00
|
|
|
return rb_hash_add_new_element(hash, chain, Qfalse);
|
2016-07-20 11:44:08 +03:00
|
|
|
}
|
|
|
|
|
2018-08-16 03:58:21 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_uniq_proc(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
2016-07-20 11:44:08 +03:00
|
|
|
{
|
2018-08-16 03:58:21 +03:00
|
|
|
if (lazy_uniq_check(result->memo_value, memos, memo_index)) return 0;
|
|
|
|
return result;
|
2016-07-20 11:44:08 +03:00
|
|
|
}
|
|
|
|
|
2018-08-16 03:58:21 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_uniq_iter_proc(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
2016-07-20 11:44:08 +03:00
|
|
|
{
|
2018-08-16 03:58:21 +03:00
|
|
|
VALUE chain = lazyenum_yield(proc_entry, result);
|
|
|
|
|
|
|
|
if (lazy_uniq_check(chain, memos, memo_index)) return 0;
|
|
|
|
return result;
|
2016-07-20 11:44:08 +03:00
|
|
|
}
|
|
|
|
|
2018-08-16 03:58:21 +03:00
|
|
|
static const lazyenum_funcs lazy_uniq_iter_funcs = {
|
|
|
|
lazy_uniq_iter_proc, 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const lazyenum_funcs lazy_uniq_funcs = {
|
|
|
|
lazy_uniq_proc, 0,
|
|
|
|
};
|
|
|
|
|
2019-03-21 02:32:11 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
2019-12-22 21:46:24 +03:00
|
|
|
* lazy.uniq -> lazy_enumerator
|
|
|
|
* lazy.uniq { |item| block } -> lazy_enumerator
|
2019-03-21 02:32:11 +03:00
|
|
|
*
|
|
|
|
* Like Enumerable#uniq, but chains operation to be lazy-evaluated.
|
|
|
|
*/
|
|
|
|
|
2016-07-20 11:44:08 +03:00
|
|
|
static VALUE
|
|
|
|
lazy_uniq(VALUE obj)
|
|
|
|
{
|
2018-08-16 03:58:21 +03:00
|
|
|
const lazyenum_funcs *const funcs =
|
|
|
|
rb_block_given_p() ? &lazy_uniq_iter_funcs : &lazy_uniq_funcs;
|
|
|
|
return lazy_add_method(obj, 0, 0, Qnil, Qnil, funcs);
|
2016-07-20 11:44:08 +03:00
|
|
|
}
|
|
|
|
|
2020-12-05 14:39:20 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_compact_proc(VALUE proc_entry, struct MEMO *result, VALUE memos, long memo_index)
|
|
|
|
{
|
|
|
|
if (NIL_P(result->memo_value)) return 0;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const lazyenum_funcs lazy_compact_funcs = {
|
|
|
|
lazy_compact_proc, 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.compact -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#compact, but chains operation to be lazy-evaluated.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
lazy_compact(VALUE obj)
|
|
|
|
{
|
|
|
|
return lazy_add_method(obj, 0, 0, Qnil, Qnil, &lazy_compact_funcs);
|
|
|
|
}
|
|
|
|
|
2019-12-11 00:38:12 +03:00
|
|
|
static struct MEMO *
|
|
|
|
lazy_with_index_proc(VALUE proc_entry, struct MEMO* result, VALUE memos, long memo_index)
|
|
|
|
{
|
|
|
|
struct proc_entry *entry = proc_entry_ptr(proc_entry);
|
|
|
|
VALUE memo = rb_ary_entry(memos, memo_index);
|
|
|
|
VALUE argv[2];
|
|
|
|
|
|
|
|
if (NIL_P(memo)) {
|
|
|
|
memo = entry->memo;
|
|
|
|
}
|
|
|
|
|
|
|
|
argv[0] = result->memo_value;
|
|
|
|
argv[1] = memo;
|
|
|
|
if (entry->proc) {
|
|
|
|
rb_proc_call_with_block(entry->proc, 2, argv, Qnil);
|
|
|
|
LAZY_MEMO_RESET_PACKED(result);
|
2020-05-10 18:24:14 +03:00
|
|
|
}
|
|
|
|
else {
|
2019-12-11 00:38:12 +03:00
|
|
|
LAZY_MEMO_SET_VALUE(result, rb_ary_new_from_values(2, argv));
|
|
|
|
LAZY_MEMO_SET_PACKED(result);
|
|
|
|
}
|
|
|
|
rb_ary_store(memos, memo_index, LONG2NUM(NUM2LONG(memo) + 1));
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2021-05-27 23:29:24 +03:00
|
|
|
static VALUE
|
2021-06-16 16:07:05 +03:00
|
|
|
lazy_with_index_size(VALUE proc, VALUE receiver)
|
|
|
|
{
|
2021-05-27 23:29:24 +03:00
|
|
|
return receiver;
|
|
|
|
}
|
|
|
|
|
2019-12-11 00:38:12 +03:00
|
|
|
static const lazyenum_funcs lazy_with_index_funcs = {
|
2021-05-27 23:29:24 +03:00
|
|
|
lazy_with_index_proc, lazy_with_index_size,
|
2019-12-11 00:38:12 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
2019-12-22 21:46:24 +03:00
|
|
|
* call-seq:
|
|
|
|
* lazy.with_index(offset = 0) {|(*args), idx| block }
|
|
|
|
* lazy.with_index(offset = 0)
|
2019-12-11 00:38:12 +03:00
|
|
|
*
|
2021-04-09 20:44:07 +03:00
|
|
|
* If a block is given, returns a lazy enumerator that will
|
|
|
|
* iterate over the given block for each element
|
2019-12-22 21:46:24 +03:00
|
|
|
* with an index, which starts from +offset+, and returns a
|
|
|
|
* lazy enumerator that yields the same values (without the index).
|
2019-12-11 00:38:12 +03:00
|
|
|
*
|
2019-12-22 21:46:24 +03:00
|
|
|
* If a block is not given, returns a new lazy enumerator that
|
|
|
|
* includes the index, starting from +offset+.
|
2019-12-11 00:38:12 +03:00
|
|
|
*
|
|
|
|
* +offset+:: the starting index to use
|
|
|
|
*
|
2019-12-22 21:46:24 +03:00
|
|
|
* See Enumerator#with_index.
|
2019-12-11 00:38:12 +03:00
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
lazy_with_index(int argc, VALUE *argv, VALUE obj)
|
|
|
|
{
|
|
|
|
VALUE memo;
|
|
|
|
|
|
|
|
rb_scan_args(argc, argv, "01", &memo);
|
|
|
|
if (NIL_P(memo))
|
|
|
|
memo = LONG2NUM(0);
|
|
|
|
|
|
|
|
return lazy_add_method(obj, 0, 0, memo, rb_ary_new_from_values(1, &memo), &lazy_with_index_funcs);
|
|
|
|
}
|
|
|
|
|
2019-03-21 02:32:11 +03:00
|
|
|
#if 0 /* for RDoc */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.chunk { |elt| ... } -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#chunk, but chains operation to be lazy-evaluated.
|
|
|
|
*/
|
2022-08-06 04:13:20 +03:00
|
|
|
static VALUE
|
|
|
|
lazy_chunk(VALUE self)
|
2019-03-21 02:32:11 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.chunk_while {|elt_before, elt_after| bool } -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#chunk_while, but chains operation to be lazy-evaluated.
|
|
|
|
*/
|
2022-08-06 04:13:20 +03:00
|
|
|
static VALUE
|
|
|
|
lazy_chunk_while(VALUE self)
|
2019-03-21 02:32:11 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.slice_after(pattern) -> lazy_enumerator
|
|
|
|
* lazy.slice_after { |elt| bool } -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#slice_after, but chains operation to be lazy-evaluated.
|
|
|
|
*/
|
2022-08-06 04:13:20 +03:00
|
|
|
static VALUE
|
|
|
|
lazy_slice_after(VALUE self)
|
2019-03-21 02:32:11 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.slice_before(pattern) -> lazy_enumerator
|
|
|
|
* lazy.slice_before { |elt| bool } -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#slice_before, but chains operation to be lazy-evaluated.
|
|
|
|
*/
|
2022-08-06 04:13:20 +03:00
|
|
|
static VALUE
|
|
|
|
lazy_slice_before(VALUE self)
|
2019-03-21 02:32:11 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* lazy.slice_when {|elt_before, elt_after| bool } -> lazy_enumerator
|
|
|
|
*
|
|
|
|
* Like Enumerable#slice_when, but chains operation to be lazy-evaluated.
|
|
|
|
*/
|
2022-08-06 04:13:20 +03:00
|
|
|
static VALUE
|
|
|
|
lazy_slice_when(VALUE self)
|
2019-03-21 02:32:11 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
|
2012-03-15 06:00:30 +04:00
|
|
|
static VALUE
|
2013-02-05 07:49:59 +04:00
|
|
|
lazy_super(int argc, VALUE *argv, VALUE lazy)
|
2012-03-15 06:00:30 +04:00
|
|
|
{
|
2013-02-05 07:49:59 +04:00
|
|
|
return enumerable_lazy(rb_call_super(argc, argv));
|
2012-03-15 06:00:30 +04:00
|
|
|
}
|
|
|
|
|
2019-03-21 02:32:11 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* enum.lazy -> lazy_enumerator
|
|
|
|
*
|
2019-05-18 14:04:04 +03:00
|
|
|
* Returns self.
|
2019-03-21 02:32:11 +03:00
|
|
|
*/
|
|
|
|
|
2012-03-14 03:08:15 +04:00
|
|
|
static VALUE
|
|
|
|
lazy_lazy(VALUE obj)
|
|
|
|
{
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
2012-03-14 13:26:27 +04:00
|
|
|
/*
|
|
|
|
* Document-class: StopIteration
|
|
|
|
*
|
|
|
|
* Raised to stop the iteration, in particular by Enumerator#next. It is
|
|
|
|
* rescued by Kernel#loop.
|
|
|
|
*
|
|
|
|
* loop do
|
|
|
|
* puts "Hello"
|
|
|
|
* raise StopIteration
|
|
|
|
* puts "World"
|
|
|
|
* end
|
|
|
|
* puts "Done!"
|
|
|
|
*
|
|
|
|
* <em>produces:</em>
|
|
|
|
*
|
|
|
|
* Hello
|
|
|
|
* Done!
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* result -> value
|
|
|
|
*
|
|
|
|
* Returns the return value of the iterator.
|
|
|
|
*
|
|
|
|
* o = Object.new
|
|
|
|
* def o.each
|
|
|
|
* yield 1
|
|
|
|
* yield 2
|
|
|
|
* yield 3
|
|
|
|
* 100
|
|
|
|
* end
|
|
|
|
*
|
|
|
|
* e = o.to_enum
|
|
|
|
*
|
|
|
|
* puts e.next #=> 1
|
|
|
|
* puts e.next #=> 2
|
|
|
|
* puts e.next #=> 3
|
|
|
|
*
|
|
|
|
* begin
|
|
|
|
* e.next
|
|
|
|
* rescue StopIteration => ex
|
|
|
|
* puts ex.result #=> 100
|
|
|
|
* end
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2009-08-19 20:36:00 +04:00
|
|
|
static VALUE
|
|
|
|
stop_result(VALUE self)
|
|
|
|
{
|
2012-03-14 05:35:09 +04:00
|
|
|
return rb_attr_get(self, id_result);
|
2008-08-26 09:42:12 +04:00
|
|
|
}
|
|
|
|
|
2019-08-29 14:05:10 +03:00
|
|
|
/*
|
|
|
|
* Producer
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
producer_mark(void *p)
|
|
|
|
{
|
|
|
|
struct producer *ptr = p;
|
|
|
|
rb_gc_mark_movable(ptr->init);
|
|
|
|
rb_gc_mark_movable(ptr->proc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
producer_compact(void *p)
|
|
|
|
{
|
|
|
|
struct producer *ptr = p;
|
|
|
|
ptr->init = rb_gc_location(ptr->init);
|
|
|
|
ptr->proc = rb_gc_location(ptr->proc);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define producer_free RUBY_TYPED_DEFAULT_FREE
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
producer_memsize(const void *p)
|
|
|
|
{
|
|
|
|
return sizeof(struct producer);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const rb_data_type_t producer_data_type = {
|
|
|
|
"producer",
|
|
|
|
{
|
|
|
|
producer_mark,
|
|
|
|
producer_free,
|
|
|
|
producer_memsize,
|
|
|
|
producer_compact,
|
|
|
|
},
|
2023-11-22 18:54:32 +03:00
|
|
|
0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
|
2019-08-29 14:05:10 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct producer *
|
|
|
|
producer_ptr(VALUE obj)
|
|
|
|
{
|
|
|
|
struct producer *ptr;
|
|
|
|
|
|
|
|
TypedData_Get_Struct(obj, struct producer, &producer_data_type, ptr);
|
2022-11-15 07:24:08 +03:00
|
|
|
if (!ptr || UNDEF_P(ptr->proc)) {
|
2019-08-29 14:05:10 +03:00
|
|
|
rb_raise(rb_eArgError, "uninitialized producer");
|
|
|
|
}
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
|
|
|
producer_allocate(VALUE klass)
|
|
|
|
{
|
|
|
|
struct producer *ptr;
|
|
|
|
VALUE obj;
|
|
|
|
|
|
|
|
obj = TypedData_Make_Struct(klass, struct producer, &producer_data_type, ptr);
|
|
|
|
ptr->init = Qundef;
|
|
|
|
ptr->proc = Qundef;
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
producer_init(VALUE obj, VALUE init, VALUE proc)
|
|
|
|
{
|
|
|
|
struct producer *ptr;
|
|
|
|
|
|
|
|
TypedData_Get_Struct(obj, struct producer, &producer_data_type, ptr);
|
|
|
|
|
|
|
|
if (!ptr) {
|
|
|
|
rb_raise(rb_eArgError, "unallocated producer");
|
|
|
|
}
|
|
|
|
|
2023-11-22 18:54:32 +03:00
|
|
|
RB_OBJ_WRITE(obj, &ptr->init, init);
|
|
|
|
RB_OBJ_WRITE(obj, &ptr->proc, proc);
|
2019-08-29 14:05:10 +03:00
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
producer_each_stop(VALUE dummy, VALUE exc)
|
|
|
|
{
|
|
|
|
return rb_attr_get(exc, id_result);
|
|
|
|
}
|
|
|
|
|
2020-05-10 18:24:14 +03:00
|
|
|
NORETURN(static VALUE producer_each_i(VALUE obj));
|
|
|
|
|
2019-08-29 14:05:10 +03:00
|
|
|
static VALUE
|
|
|
|
producer_each_i(VALUE obj)
|
|
|
|
{
|
|
|
|
struct producer *ptr;
|
|
|
|
VALUE init, proc, curr;
|
|
|
|
|
|
|
|
ptr = producer_ptr(obj);
|
|
|
|
init = ptr->init;
|
|
|
|
proc = ptr->proc;
|
|
|
|
|
2022-11-15 07:24:08 +03:00
|
|
|
if (UNDEF_P(init)) {
|
2019-08-29 14:05:10 +03:00
|
|
|
curr = Qnil;
|
2020-05-10 18:24:14 +03:00
|
|
|
}
|
|
|
|
else {
|
2019-08-29 14:05:10 +03:00
|
|
|
rb_yield(init);
|
|
|
|
curr = init;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
curr = rb_funcall(proc, id_call, 1, curr);
|
|
|
|
rb_yield(curr);
|
|
|
|
}
|
|
|
|
|
2020-05-10 18:24:14 +03:00
|
|
|
UNREACHABLE_RETURN(Qnil);
|
2019-08-29 14:05:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
|
|
|
producer_each(VALUE obj)
|
|
|
|
{
|
|
|
|
rb_need_block();
|
|
|
|
|
|
|
|
return rb_rescue2(producer_each_i, obj, producer_each_stop, (VALUE)0, rb_eStopIteration, (VALUE)0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
producer_size(VALUE obj, VALUE args, VALUE eobj)
|
|
|
|
{
|
|
|
|
return DBL2NUM(HUGE_VAL);
|
|
|
|
}
|
|
|
|
|
2019-09-12 14:15:03 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
2019-12-24 11:03:42 +03:00
|
|
|
* Enumerator.produce(initial = nil) { |prev| block } -> enumerator
|
2019-09-12 14:15:03 +03:00
|
|
|
*
|
|
|
|
* Creates an infinite enumerator from any block, just called over and
|
2019-12-24 11:03:42 +03:00
|
|
|
* over. The result of the previous iteration is passed to the next one.
|
2019-09-12 14:15:03 +03:00
|
|
|
* If +initial+ is provided, it is passed to the first iteration, and
|
|
|
|
* becomes the first element of the enumerator; if it is not provided,
|
2019-12-24 11:03:42 +03:00
|
|
|
* the first iteration receives +nil+, and its result becomes the first
|
2019-09-12 14:15:03 +03:00
|
|
|
* element of the iterator.
|
|
|
|
*
|
|
|
|
* Raising StopIteration from the block stops an iteration.
|
|
|
|
*
|
|
|
|
* Enumerator.produce(1, &:succ) # => enumerator of 1, 2, 3, 4, ....
|
|
|
|
*
|
|
|
|
* Enumerator.produce { rand(10) } # => infinite random number sequence
|
|
|
|
*
|
|
|
|
* ancestors = Enumerator.produce(node) { |prev| node = prev.parent or raise StopIteration }
|
|
|
|
* enclosing_section = ancestors.find { |n| n.type == :section }
|
2019-10-26 14:02:59 +03:00
|
|
|
*
|
|
|
|
* Using ::produce together with Enumerable methods like Enumerable#detect,
|
2020-05-27 09:37:05 +03:00
|
|
|
* Enumerable#slice_after, Enumerable#take_while can provide Enumerator-based alternatives
|
2019-10-26 14:02:59 +03:00
|
|
|
* for +while+ and +until+ cycles:
|
|
|
|
*
|
|
|
|
* # Find next Tuesday
|
2019-12-24 11:03:42 +03:00
|
|
|
* require "date"
|
2019-10-26 14:02:59 +03:00
|
|
|
* Enumerator.produce(Date.today, &:succ).detect(&:tuesday?)
|
|
|
|
*
|
|
|
|
* # Simple lexer:
|
2019-12-24 11:03:42 +03:00
|
|
|
* require "strscan"
|
|
|
|
* scanner = StringScanner.new("7+38/6")
|
2019-10-26 14:02:59 +03:00
|
|
|
* PATTERN = %r{\d+|[-/+*]}
|
2019-12-24 11:03:42 +03:00
|
|
|
* Enumerator.produce { scanner.scan(PATTERN) }.slice_after { scanner.eos? }.first
|
2019-10-26 14:02:59 +03:00
|
|
|
* # => ["7", "+", "38", "/", "6"]
|
2019-09-12 14:15:03 +03:00
|
|
|
*/
|
2019-08-29 14:05:10 +03:00
|
|
|
static VALUE
|
|
|
|
enumerator_s_produce(int argc, VALUE *argv, VALUE klass)
|
|
|
|
{
|
|
|
|
VALUE init, producer;
|
|
|
|
|
|
|
|
if (!rb_block_given_p()) rb_raise(rb_eArgError, "no block given");
|
|
|
|
|
|
|
|
if (rb_scan_args(argc, argv, "01", &init) == 0) {
|
|
|
|
init = Qundef;
|
|
|
|
}
|
|
|
|
|
|
|
|
producer = producer_init(producer_allocate(rb_cEnumProducer), init, rb_block_proc());
|
|
|
|
|
2019-09-30 07:33:59 +03:00
|
|
|
return rb_enumeratorize_with_size_kw(producer, sym_each, 0, 0, producer_size, RB_NO_KEYWORDS);
|
2019-08-29 14:05:10 +03:00
|
|
|
}
|
|
|
|
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
/*
|
|
|
|
* Document-class: Enumerator::Chain
|
|
|
|
*
|
|
|
|
* Enumerator::Chain is a subclass of Enumerator, which represents a
|
|
|
|
* chain of enumerables that works as a single enumerator.
|
|
|
|
*
|
|
|
|
* This type of objects can be created by Enumerable#chain and
|
|
|
|
* Enumerator#+.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
enum_chain_mark(void *p)
|
|
|
|
{
|
|
|
|
struct enum_chain *ptr = p;
|
2019-08-13 00:00:34 +03:00
|
|
|
rb_gc_mark_movable(ptr->enums);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
enum_chain_compact(void *p)
|
|
|
|
{
|
|
|
|
struct enum_chain *ptr = p;
|
|
|
|
ptr->enums = rb_gc_location(ptr->enums);
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#define enum_chain_free RUBY_TYPED_DEFAULT_FREE
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
enum_chain_memsize(const void *p)
|
|
|
|
{
|
|
|
|
return sizeof(struct enum_chain);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const rb_data_type_t enum_chain_data_type = {
|
|
|
|
"chain",
|
|
|
|
{
|
|
|
|
enum_chain_mark,
|
|
|
|
enum_chain_free,
|
|
|
|
enum_chain_memsize,
|
2019-08-13 00:00:34 +03:00
|
|
|
enum_chain_compact,
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
},
|
|
|
|
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct enum_chain *
|
|
|
|
enum_chain_ptr(VALUE obj)
|
|
|
|
{
|
|
|
|
struct enum_chain *ptr;
|
|
|
|
|
|
|
|
TypedData_Get_Struct(obj, struct enum_chain, &enum_chain_data_type, ptr);
|
2022-11-15 07:24:08 +03:00
|
|
|
if (!ptr || UNDEF_P(ptr->enums)) {
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
rb_raise(rb_eArgError, "uninitialized chain");
|
|
|
|
}
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
|
|
|
enum_chain_allocate(VALUE klass)
|
|
|
|
{
|
|
|
|
struct enum_chain *ptr;
|
|
|
|
VALUE obj;
|
|
|
|
|
|
|
|
obj = TypedData_Make_Struct(klass, struct enum_chain, &enum_chain_data_type, ptr);
|
|
|
|
ptr->enums = Qundef;
|
|
|
|
ptr->pos = -1;
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* Enumerator::Chain.new(*enums) -> enum
|
|
|
|
*
|
|
|
|
* Generates a new enumerator object that iterates over the elements
|
|
|
|
* of given enumerable objects in sequence.
|
|
|
|
*
|
|
|
|
* e = Enumerator::Chain.new(1..3, [4, 5])
|
|
|
|
* e.to_a #=> [1, 2, 3, 4, 5]
|
|
|
|
* e.size #=> 5
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
enum_chain_initialize(VALUE obj, VALUE enums)
|
|
|
|
{
|
|
|
|
struct enum_chain *ptr;
|
|
|
|
|
|
|
|
rb_check_frozen(obj);
|
|
|
|
TypedData_Get_Struct(obj, struct enum_chain, &enum_chain_data_type, ptr);
|
|
|
|
|
|
|
|
if (!ptr) rb_raise(rb_eArgError, "unallocated chain");
|
|
|
|
|
Resize arrays in `rb_ary_freeze` and use it for freezing arrays
While working on a separate issue we found that in some cases
`ary_heap_realloc` was being called on frozen arrays. To fix this, this
change does the following:
1) Updates `rb_ary_freeze` to assert the type is an array, return if
already frozen, and shrink the capacity if it is not embedded, shared
or a shared root.
2) Replaces `rb_obj_freeze` with `rb_ary_freeze` when the object is
always an array.
3) In `ary_heap_realloc`, ensure the new capa is set with
`ARY_SET_CAPA`. Previously the change in capa was not set.
4) Adds an assertion to `ary_heap_realloc` that the array is not frozen.
Some of this work was originally done in
https://github.com/ruby/ruby/pull/2640, referencing this issue
https://bugs.ruby-lang.org/issues/16291. There didn't appear to be any
objections to this PR, it appears to have simply lost traction.
The original PR made changes to arrays and strings at the same time,
this PR only does arrays. Also it was old enough that rather than revive
that branch I've made a new one. I added Lourens as co-author in addtion
to Aaron who helped me with this patch.
The original PR made this change for performance reasons, and while
that's still true for this PR, the goal of this PR is to avoid
calling `ary_heap_realloc` on frozen arrays. The capacity should be
shrunk _before_ the array is frozen, not after.
Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
Co-Authored-By: methodmissing <lourens@methodmissing.com>
2024-06-18 21:52:18 +03:00
|
|
|
ptr->enums = rb_ary_freeze(enums);
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
ptr->pos = -1;
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
2021-03-05 23:25:51 +03:00
|
|
|
static VALUE
|
2021-06-16 16:07:05 +03:00
|
|
|
new_enum_chain(VALUE enums)
|
|
|
|
{
|
2021-03-05 23:25:51 +03:00
|
|
|
long i;
|
|
|
|
VALUE obj = enum_chain_initialize(enum_chain_allocate(rb_cEnumChain), enums);
|
|
|
|
|
|
|
|
for (i = 0; i < RARRAY_LEN(enums); i++) {
|
|
|
|
if (RTEST(rb_obj_is_kind_of(RARRAY_AREF(enums, i), rb_cLazy))) {
|
|
|
|
return enumerable_lazy(obj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
|
|
|
enum_chain_init_copy(VALUE obj, VALUE orig)
|
|
|
|
{
|
|
|
|
struct enum_chain *ptr0, *ptr1;
|
|
|
|
|
|
|
|
if (!OBJ_INIT_COPY(obj, orig)) return obj;
|
|
|
|
ptr0 = enum_chain_ptr(orig);
|
|
|
|
|
|
|
|
TypedData_Get_Struct(obj, struct enum_chain, &enum_chain_data_type, ptr1);
|
|
|
|
|
|
|
|
if (!ptr1) rb_raise(rb_eArgError, "unallocated chain");
|
|
|
|
|
|
|
|
ptr1->enums = ptr0->enums;
|
|
|
|
ptr1->pos = ptr0->pos;
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
enum_chain_total_size(VALUE enums)
|
|
|
|
{
|
|
|
|
VALUE total = INT2FIX(0);
|
2018-12-03 06:51:55 +03:00
|
|
|
long i;
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
|
2018-12-03 06:51:55 +03:00
|
|
|
for (i = 0; i < RARRAY_LEN(enums); i++) {
|
|
|
|
VALUE size = enum_size(RARRAY_AREF(enums, i));
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
|
2021-09-11 03:56:59 +03:00
|
|
|
if (NIL_P(size) || (RB_FLOAT_TYPE_P(size) && isinf(NUM2DBL(size)))) {
|
2018-12-03 06:51:55 +03:00
|
|
|
return size;
|
|
|
|
}
|
|
|
|
if (!RB_INTEGER_TYPE_P(size)) {
|
|
|
|
return Qnil;
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
}
|
2018-12-03 06:51:55 +03:00
|
|
|
|
|
|
|
total = rb_funcall(total, '+', 1, size);
|
|
|
|
}
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
2018-11-28 02:57:55 +03:00
|
|
|
* obj.size -> int, Float::INFINITY or nil
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
*
|
|
|
|
* Returns the total size of the enumerator chain calculated by
|
|
|
|
* summing up the size of each enumerable in the chain. If any of the
|
|
|
|
* enumerables reports its size as nil or Float::INFINITY, that value
|
|
|
|
* is returned as the total size.
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
enum_chain_size(VALUE obj)
|
|
|
|
{
|
|
|
|
return enum_chain_total_size(enum_chain_ptr(obj)->enums);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
enum_chain_enum_size(VALUE obj, VALUE args, VALUE eobj)
|
|
|
|
{
|
|
|
|
return enum_chain_size(obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
enum_chain_enum_no_size(VALUE obj, VALUE args, VALUE eobj)
|
|
|
|
{
|
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* obj.each(*args) { |...| ... } -> obj
|
|
|
|
* obj.each(*args) -> enumerator
|
|
|
|
*
|
|
|
|
* Iterates over the elements of the first enumerable by calling the
|
|
|
|
* "each" method on it with the given arguments, then proceeds to the
|
|
|
|
* following enumerables in sequence until all of the enumerables are
|
|
|
|
* exhausted.
|
|
|
|
*
|
|
|
|
* If no block is given, returns an enumerator.
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
enum_chain_each(int argc, VALUE *argv, VALUE obj)
|
|
|
|
{
|
|
|
|
VALUE enums, block;
|
|
|
|
struct enum_chain *objptr;
|
2018-12-03 06:51:55 +03:00
|
|
|
long i;
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
|
|
|
|
RETURN_SIZED_ENUMERATOR(obj, argc, argv, argc > 0 ? enum_chain_enum_no_size : enum_chain_enum_size);
|
|
|
|
|
|
|
|
objptr = enum_chain_ptr(obj);
|
|
|
|
enums = objptr->enums;
|
|
|
|
block = rb_block_proc();
|
|
|
|
|
2018-12-03 06:51:55 +03:00
|
|
|
for (i = 0; i < RARRAY_LEN(enums); i++) {
|
|
|
|
objptr->pos = i;
|
2019-08-29 02:41:39 +03:00
|
|
|
rb_funcall_with_block(RARRAY_AREF(enums, i), id_each, argc, argv, block);
|
2018-12-03 06:51:55 +03:00
|
|
|
}
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* obj.rewind -> obj
|
|
|
|
*
|
|
|
|
* Rewinds the enumerator chain by calling the "rewind" method on each
|
|
|
|
* enumerable in reverse order. Each call is performed only if the
|
|
|
|
* enumerable responds to the method.
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
enum_chain_rewind(VALUE obj)
|
|
|
|
{
|
|
|
|
struct enum_chain *objptr = enum_chain_ptr(obj);
|
|
|
|
VALUE enums = objptr->enums;
|
2018-12-03 06:51:55 +03:00
|
|
|
long i;
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
|
2018-12-03 06:51:55 +03:00
|
|
|
for (i = objptr->pos; 0 <= i && i < RARRAY_LEN(enums); objptr->pos = --i) {
|
|
|
|
rb_check_funcall(RARRAY_AREF(enums, i), id_rewind, 0, 0);
|
|
|
|
}
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
inspect_enum_chain(VALUE obj, VALUE dummy, int recur)
|
|
|
|
{
|
|
|
|
VALUE klass = rb_obj_class(obj);
|
|
|
|
struct enum_chain *ptr;
|
|
|
|
|
|
|
|
TypedData_Get_Struct(obj, struct enum_chain, &enum_chain_data_type, ptr);
|
|
|
|
|
2022-11-15 07:24:08 +03:00
|
|
|
if (!ptr || UNDEF_P(ptr->enums)) {
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
return rb_sprintf("#<%"PRIsVALUE": uninitialized>", rb_class_path(klass));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (recur) {
|
|
|
|
return rb_sprintf("#<%"PRIsVALUE": ...>", rb_class_path(klass));
|
|
|
|
}
|
|
|
|
|
|
|
|
return rb_sprintf("#<%"PRIsVALUE": %+"PRIsVALUE">", rb_class_path(klass), ptr->enums);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* obj.inspect -> string
|
|
|
|
*
|
|
|
|
* Returns a printable version of the enumerator chain.
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
enum_chain_inspect(VALUE obj)
|
|
|
|
{
|
|
|
|
return rb_exec_recursive(inspect_enum_chain, obj, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* e.chain(*enums) -> enumerator
|
|
|
|
*
|
|
|
|
* Returns an enumerator object generated from this enumerator and
|
|
|
|
* given enumerables.
|
|
|
|
*
|
|
|
|
* e = (1..3).chain([4, 5])
|
|
|
|
* e.to_a #=> [1, 2, 3, 4, 5]
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
enum_chain(int argc, VALUE *argv, VALUE obj)
|
|
|
|
{
|
|
|
|
VALUE enums = rb_ary_new_from_values(1, &obj);
|
|
|
|
rb_ary_cat(enums, argv, argc);
|
2021-03-05 23:25:51 +03:00
|
|
|
return new_enum_chain(enums);
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* e + enum -> enumerator
|
|
|
|
*
|
|
|
|
* Returns an enumerator object generated from this enumerator and a
|
|
|
|
* given enumerable.
|
|
|
|
*
|
|
|
|
* e = (1..3).each + [4, 5]
|
|
|
|
* e.to_a #=> [1, 2, 3, 4, 5]
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
enumerator_plus(VALUE obj, VALUE eobj)
|
|
|
|
{
|
2021-03-05 23:25:51 +03:00
|
|
|
return new_enum_chain(rb_ary_new_from_args(2, obj, eobj));
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
}
|
|
|
|
|
2022-07-29 07:56:54 +03:00
|
|
|
/*
|
|
|
|
* Document-class: Enumerator::Product
|
|
|
|
*
|
|
|
|
* Enumerator::Product generates a Cartesian product of any number of
|
|
|
|
* enumerable objects. Iterating over the product of enumerable
|
|
|
|
* objects is roughly equivalent to nested each_entry loops where the
|
|
|
|
* loop for the rightmost object is put innermost.
|
|
|
|
*
|
|
|
|
* innings = Enumerator::Product.new(1..9, ['top', 'bottom'])
|
|
|
|
*
|
|
|
|
* innings.each do |i, h|
|
|
|
|
* p [i, h]
|
|
|
|
* end
|
|
|
|
* # [1, "top"]
|
|
|
|
* # [1, "bottom"]
|
|
|
|
* # [2, "top"]
|
|
|
|
* # [2, "bottom"]
|
|
|
|
* # [3, "top"]
|
|
|
|
* # [3, "bottom"]
|
|
|
|
* # ...
|
|
|
|
* # [9, "top"]
|
|
|
|
* # [9, "bottom"]
|
|
|
|
*
|
|
|
|
* The method used against each enumerable object is `each_entry`
|
|
|
|
* instead of `each` so that the product of N enumerable objects
|
Make product consistently yield an array of N elements instead of N arguments
Inconsistency pointed out by @mame:
```
>> Enumerator.product([1], [2], [3]).to_a
=> [[1, 2, 3]]
>> Enumerator.product([1], [2]).to_a
=> [[1, 2]]
>> Enumerator.product([1]).to_a
=> [1]
>> Enumerator.product().to_a
=> [nil]
```
Got fixed as follows:
```
>> Enumerator.product([1], [2], [3]).to_a
=> [[1, 2, 3]]
>> Enumerator.product([1], [2]).to_a
=> [[1, 2]]
>> Enumerator.product([1]).to_a
=> [[1]]
>> Enumerator.product().to_a
=> [[]]
```
This was due to the nature of the N-argument funcall in Ruby.
2022-12-21 12:19:19 +03:00
|
|
|
* yields an array of exactly N elements in each iteration.
|
2022-07-29 07:56:54 +03:00
|
|
|
*
|
|
|
|
* When no enumerator is given, it calls a given block once yielding
|
|
|
|
* an empty argument list.
|
|
|
|
*
|
|
|
|
* This type of objects can be created by Enumerator.product.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
enum_product_mark(void *p)
|
|
|
|
{
|
|
|
|
struct enum_product *ptr = p;
|
|
|
|
rb_gc_mark_movable(ptr->enums);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
enum_product_compact(void *p)
|
|
|
|
{
|
|
|
|
struct enum_product *ptr = p;
|
|
|
|
ptr->enums = rb_gc_location(ptr->enums);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define enum_product_free RUBY_TYPED_DEFAULT_FREE
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
enum_product_memsize(const void *p)
|
|
|
|
{
|
|
|
|
return sizeof(struct enum_product);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const rb_data_type_t enum_product_data_type = {
|
|
|
|
"product",
|
|
|
|
{
|
|
|
|
enum_product_mark,
|
|
|
|
enum_product_free,
|
|
|
|
enum_product_memsize,
|
|
|
|
enum_product_compact,
|
|
|
|
},
|
|
|
|
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct enum_product *
|
|
|
|
enum_product_ptr(VALUE obj)
|
|
|
|
{
|
|
|
|
struct enum_product *ptr;
|
|
|
|
|
|
|
|
TypedData_Get_Struct(obj, struct enum_product, &enum_product_data_type, ptr);
|
2022-11-15 07:24:08 +03:00
|
|
|
if (!ptr || UNDEF_P(ptr->enums)) {
|
2022-07-29 07:56:54 +03:00
|
|
|
rb_raise(rb_eArgError, "uninitialized product");
|
|
|
|
}
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
|
|
|
enum_product_allocate(VALUE klass)
|
|
|
|
{
|
|
|
|
struct enum_product *ptr;
|
|
|
|
VALUE obj;
|
|
|
|
|
|
|
|
obj = TypedData_Make_Struct(klass, struct enum_product, &enum_product_data_type, ptr);
|
|
|
|
ptr->enums = Qundef;
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* Enumerator::Product.new(*enums) -> enum
|
|
|
|
*
|
|
|
|
* Generates a new enumerator object that generates a Cartesian
|
|
|
|
* product of given enumerable objects.
|
|
|
|
*
|
|
|
|
* e = Enumerator::Product.new(1..3, [4, 5])
|
|
|
|
* e.to_a #=> [[1, 4], [1, 5], [2, 4], [2, 5], [3, 4], [3, 5]]
|
|
|
|
* e.size #=> 6
|
|
|
|
*/
|
|
|
|
static VALUE
|
2022-12-16 07:32:13 +03:00
|
|
|
enum_product_initialize(int argc, VALUE *argv, VALUE obj)
|
2022-07-29 07:56:54 +03:00
|
|
|
{
|
|
|
|
struct enum_product *ptr;
|
2022-12-16 07:32:13 +03:00
|
|
|
VALUE enums = Qnil, options = Qnil;
|
|
|
|
|
|
|
|
rb_scan_args(argc, argv, "*:", &enums, &options);
|
|
|
|
|
|
|
|
if (!NIL_P(options) && !RHASH_EMPTY_P(options)) {
|
|
|
|
rb_exc_raise(rb_keyword_error_new("unknown", rb_hash_keys(options)));
|
|
|
|
}
|
2022-07-29 07:56:54 +03:00
|
|
|
|
|
|
|
rb_check_frozen(obj);
|
|
|
|
TypedData_Get_Struct(obj, struct enum_product, &enum_product_data_type, ptr);
|
|
|
|
|
|
|
|
if (!ptr) rb_raise(rb_eArgError, "unallocated product");
|
|
|
|
|
Resize arrays in `rb_ary_freeze` and use it for freezing arrays
While working on a separate issue we found that in some cases
`ary_heap_realloc` was being called on frozen arrays. To fix this, this
change does the following:
1) Updates `rb_ary_freeze` to assert the type is an array, return if
already frozen, and shrink the capacity if it is not embedded, shared
or a shared root.
2) Replaces `rb_obj_freeze` with `rb_ary_freeze` when the object is
always an array.
3) In `ary_heap_realloc`, ensure the new capa is set with
`ARY_SET_CAPA`. Previously the change in capa was not set.
4) Adds an assertion to `ary_heap_realloc` that the array is not frozen.
Some of this work was originally done in
https://github.com/ruby/ruby/pull/2640, referencing this issue
https://bugs.ruby-lang.org/issues/16291. There didn't appear to be any
objections to this PR, it appears to have simply lost traction.
The original PR made changes to arrays and strings at the same time,
this PR only does arrays. Also it was old enough that rather than revive
that branch I've made a new one. I added Lourens as co-author in addtion
to Aaron who helped me with this patch.
The original PR made this change for performance reasons, and while
that's still true for this PR, the goal of this PR is to avoid
calling `ary_heap_realloc` on frozen arrays. The capacity should be
shrunk _before_ the array is frozen, not after.
Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
Co-Authored-By: methodmissing <lourens@methodmissing.com>
2024-06-18 21:52:18 +03:00
|
|
|
ptr->enums = rb_ary_freeze(enums);
|
2022-07-29 07:56:54 +03:00
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
|
|
|
enum_product_init_copy(VALUE obj, VALUE orig)
|
|
|
|
{
|
|
|
|
struct enum_product *ptr0, *ptr1;
|
|
|
|
|
|
|
|
if (!OBJ_INIT_COPY(obj, orig)) return obj;
|
|
|
|
ptr0 = enum_product_ptr(orig);
|
|
|
|
|
|
|
|
TypedData_Get_Struct(obj, struct enum_product, &enum_product_data_type, ptr1);
|
|
|
|
|
|
|
|
if (!ptr1) rb_raise(rb_eArgError, "unallocated product");
|
|
|
|
|
|
|
|
ptr1->enums = ptr0->enums;
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
enum_product_total_size(VALUE enums)
|
|
|
|
{
|
|
|
|
VALUE total = INT2FIX(1);
|
2024-04-16 10:13:19 +03:00
|
|
|
VALUE sizes = rb_ary_hidden_new(RARRAY_LEN(enums));
|
2022-07-29 07:56:54 +03:00
|
|
|
long i;
|
|
|
|
|
|
|
|
for (i = 0; i < RARRAY_LEN(enums); i++) {
|
|
|
|
VALUE size = enum_size(RARRAY_AREF(enums, i));
|
2024-04-16 10:13:19 +03:00
|
|
|
if (size == INT2FIX(0)) {
|
|
|
|
rb_ary_resize(sizes, 0);
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
rb_ary_push(sizes, size);
|
|
|
|
}
|
|
|
|
for (i = 0; i < RARRAY_LEN(sizes); i++) {
|
|
|
|
VALUE size = RARRAY_AREF(sizes, i);
|
2022-07-29 07:56:54 +03:00
|
|
|
|
|
|
|
if (NIL_P(size) || (RB_TYPE_P(size, T_FLOAT) && isinf(NUM2DBL(size)))) {
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
if (!RB_INTEGER_TYPE_P(size)) {
|
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
|
|
|
|
total = rb_funcall(total, '*', 1, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* obj.size -> int, Float::INFINITY or nil
|
|
|
|
*
|
|
|
|
* Returns the total size of the enumerator product calculated by
|
|
|
|
* multiplying the sizes of enumerables in the product. If any of the
|
|
|
|
* enumerables reports its size as nil or Float::INFINITY, that value
|
|
|
|
* is returned as the size.
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
enum_product_size(VALUE obj)
|
|
|
|
{
|
|
|
|
return enum_product_total_size(enum_product_ptr(obj)->enums);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
enum_product_enum_size(VALUE obj, VALUE args, VALUE eobj)
|
|
|
|
{
|
|
|
|
return enum_product_size(obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct product_state {
|
|
|
|
VALUE obj;
|
|
|
|
VALUE block;
|
|
|
|
int argc;
|
|
|
|
VALUE *argv;
|
|
|
|
int index;
|
|
|
|
};
|
|
|
|
|
|
|
|
static VALUE product_each(VALUE, struct product_state *);
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
product_each_i(RB_BLOCK_CALL_FUNC_ARGLIST(value, state))
|
|
|
|
{
|
|
|
|
struct product_state *pstate = (struct product_state *)state;
|
|
|
|
pstate->argv[pstate->index++] = value;
|
|
|
|
|
|
|
|
VALUE val = product_each(pstate->obj, pstate);
|
|
|
|
pstate->index--;
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
product_each(VALUE obj, struct product_state *pstate)
|
|
|
|
{
|
|
|
|
struct enum_product *ptr = enum_product_ptr(obj);
|
|
|
|
VALUE enums = ptr->enums;
|
|
|
|
|
|
|
|
if (pstate->index < pstate->argc) {
|
|
|
|
VALUE eobj = RARRAY_AREF(enums, pstate->index);
|
|
|
|
|
|
|
|
rb_block_call(eobj, id_each_entry, 0, NULL, product_each_i, (VALUE)pstate);
|
2022-08-06 04:13:20 +03:00
|
|
|
}
|
|
|
|
else {
|
Make product consistently yield an array of N elements instead of N arguments
Inconsistency pointed out by @mame:
```
>> Enumerator.product([1], [2], [3]).to_a
=> [[1, 2, 3]]
>> Enumerator.product([1], [2]).to_a
=> [[1, 2]]
>> Enumerator.product([1]).to_a
=> [1]
>> Enumerator.product().to_a
=> [nil]
```
Got fixed as follows:
```
>> Enumerator.product([1], [2], [3]).to_a
=> [[1, 2, 3]]
>> Enumerator.product([1], [2]).to_a
=> [[1, 2]]
>> Enumerator.product([1]).to_a
=> [[1]]
>> Enumerator.product().to_a
=> [[]]
```
This was due to the nature of the N-argument funcall in Ruby.
2022-12-21 12:19:19 +03:00
|
|
|
rb_funcall(pstate->block, id_call, 1, rb_ary_new_from_values(pstate->argc, pstate->argv));
|
2022-07-29 07:56:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
enum_product_run(VALUE obj, VALUE block)
|
|
|
|
{
|
|
|
|
struct enum_product *ptr = enum_product_ptr(obj);
|
|
|
|
int argc = RARRAY_LENINT(ptr->enums);
|
|
|
|
struct product_state state = {
|
|
|
|
.obj = obj,
|
|
|
|
.block = block,
|
|
|
|
.index = 0,
|
|
|
|
.argc = argc,
|
|
|
|
.argv = ALLOCA_N(VALUE, argc),
|
|
|
|
};
|
|
|
|
|
|
|
|
return product_each(obj, &state);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* obj.each { |...| ... } -> obj
|
|
|
|
* obj.each -> enumerator
|
|
|
|
*
|
|
|
|
* Iterates over the elements of the first enumerable by calling the
|
|
|
|
* "each_entry" method on it with the given arguments, then proceeds
|
|
|
|
* to the following enumerables in sequence until all of the
|
|
|
|
* enumerables are exhausted.
|
|
|
|
*
|
|
|
|
* If no block is given, returns an enumerator. Otherwise, returns self.
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
enum_product_each(VALUE obj)
|
|
|
|
{
|
|
|
|
RETURN_SIZED_ENUMERATOR(obj, 0, 0, enum_product_enum_size);
|
|
|
|
|
|
|
|
return enum_product_run(obj, rb_block_proc());
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* obj.rewind -> obj
|
|
|
|
*
|
|
|
|
* Rewinds the product enumerator by calling the "rewind" method on
|
|
|
|
* each enumerable in reverse order. Each call is performed only if
|
|
|
|
* the enumerable responds to the method.
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
enum_product_rewind(VALUE obj)
|
|
|
|
{
|
|
|
|
struct enum_product *ptr = enum_product_ptr(obj);
|
|
|
|
VALUE enums = ptr->enums;
|
|
|
|
long i;
|
|
|
|
|
|
|
|
for (i = 0; i < RARRAY_LEN(enums); i++) {
|
|
|
|
rb_check_funcall(RARRAY_AREF(enums, i), id_rewind, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
inspect_enum_product(VALUE obj, VALUE dummy, int recur)
|
|
|
|
{
|
|
|
|
VALUE klass = rb_obj_class(obj);
|
|
|
|
struct enum_product *ptr;
|
|
|
|
|
|
|
|
TypedData_Get_Struct(obj, struct enum_product, &enum_product_data_type, ptr);
|
|
|
|
|
2022-11-15 07:24:08 +03:00
|
|
|
if (!ptr || UNDEF_P(ptr->enums)) {
|
2022-07-29 07:56:54 +03:00
|
|
|
return rb_sprintf("#<%"PRIsVALUE": uninitialized>", rb_class_path(klass));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (recur) {
|
|
|
|
return rb_sprintf("#<%"PRIsVALUE": ...>", rb_class_path(klass));
|
|
|
|
}
|
|
|
|
|
|
|
|
return rb_sprintf("#<%"PRIsVALUE": %+"PRIsVALUE">", rb_class_path(klass), ptr->enums);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* obj.inspect -> string
|
|
|
|
*
|
|
|
|
* Returns a printable version of the product enumerator.
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
enum_product_inspect(VALUE obj)
|
|
|
|
{
|
|
|
|
return rb_exec_recursive(inspect_enum_product, obj, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* Enumerator.product(*enums) -> enumerator
|
2022-12-21 13:13:15 +03:00
|
|
|
* Enumerator.product(*enums) { |elts| ... } -> enumerator
|
2022-07-29 07:56:54 +03:00
|
|
|
*
|
|
|
|
* Generates a new enumerator object that generates a Cartesian
|
|
|
|
* product of given enumerable objects. This is equivalent to
|
|
|
|
* Enumerator::Product.new.
|
|
|
|
*
|
|
|
|
* e = Enumerator.product(1..3, [4, 5])
|
|
|
|
* e.to_a #=> [[1, 4], [1, 5], [2, 4], [2, 5], [3, 4], [3, 5]]
|
|
|
|
* e.size #=> 6
|
2022-12-21 13:13:15 +03:00
|
|
|
*
|
|
|
|
* When a block is given, calls the block with each N-element array
|
|
|
|
* generated and returns +nil+.
|
2022-07-29 07:56:54 +03:00
|
|
|
*/
|
|
|
|
static VALUE
|
2022-12-16 07:32:13 +03:00
|
|
|
enumerator_s_product(int argc, VALUE *argv, VALUE klass)
|
2022-07-29 07:56:54 +03:00
|
|
|
{
|
2022-12-16 07:32:13 +03:00
|
|
|
VALUE enums = Qnil, options = Qnil, block = Qnil;
|
2022-07-29 07:56:54 +03:00
|
|
|
|
2022-12-16 07:32:13 +03:00
|
|
|
rb_scan_args(argc, argv, "*:&", &enums, &options, &block);
|
|
|
|
|
|
|
|
if (!NIL_P(options) && !RHASH_EMPTY_P(options)) {
|
|
|
|
rb_exc_raise(rb_keyword_error_new("unknown", rb_hash_keys(options)));
|
2022-07-29 07:56:54 +03:00
|
|
|
}
|
2022-12-16 07:32:13 +03:00
|
|
|
|
|
|
|
VALUE obj = enum_product_initialize(argc, argv, enum_product_allocate(rb_cEnumProduct));
|
|
|
|
|
2022-12-21 13:13:15 +03:00
|
|
|
if (!NIL_P(block)) {
|
|
|
|
enum_product_run(obj, block);
|
|
|
|
return Qnil;
|
|
|
|
}
|
2022-12-16 07:32:13 +03:00
|
|
|
|
2022-12-21 13:13:15 +03:00
|
|
|
return obj;
|
2022-07-29 07:56:54 +03:00
|
|
|
}
|
|
|
|
|
2018-09-12 11:36:48 +03:00
|
|
|
/*
|
|
|
|
* Document-class: Enumerator::ArithmeticSequence
|
|
|
|
*
|
|
|
|
* Enumerator::ArithmeticSequence is a subclass of Enumerator,
|
|
|
|
* that is a representation of sequences of numbers with common difference.
|
2018-10-13 00:55:49 +03:00
|
|
|
* Instances of this class can be generated by the Range#step and Numeric#step
|
2018-09-12 11:36:48 +03:00
|
|
|
* methods.
|
2020-12-21 03:32:30 +03:00
|
|
|
*
|
|
|
|
* The class can be used for slicing Array (see Array#slice) or custom
|
|
|
|
* collections.
|
2018-09-12 11:36:48 +03:00
|
|
|
*/
|
|
|
|
|
2018-08-06 12:08:28 +03:00
|
|
|
VALUE
|
|
|
|
rb_arith_seq_new(VALUE obj, VALUE meth, int argc, VALUE const *argv,
|
|
|
|
rb_enumerator_size_func *size_fn,
|
|
|
|
VALUE beg, VALUE end, VALUE step, int excl)
|
|
|
|
{
|
|
|
|
VALUE aseq = enumerator_init(enumerator_allocate(rb_cArithSeq),
|
2019-10-04 22:51:57 +03:00
|
|
|
obj, meth, argc, argv, size_fn, Qnil, rb_keyword_given_p());
|
2018-08-06 12:08:28 +03:00
|
|
|
rb_ivar_set(aseq, id_begin, beg);
|
|
|
|
rb_ivar_set(aseq, id_end, end);
|
|
|
|
rb_ivar_set(aseq, id_step, step);
|
2021-08-31 14:30:35 +03:00
|
|
|
rb_ivar_set(aseq, id_exclude_end, RBOOL(excl));
|
2018-08-06 12:08:28 +03:00
|
|
|
return aseq;
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:36:48 +03:00
|
|
|
/*
|
2019-04-27 06:21:35 +03:00
|
|
|
* call-seq: aseq.begin -> num or nil
|
2018-09-12 11:36:48 +03:00
|
|
|
*
|
|
|
|
* Returns the number that defines the first element of this arithmetic
|
|
|
|
* sequence.
|
|
|
|
*/
|
2018-08-06 12:08:28 +03:00
|
|
|
static inline VALUE
|
|
|
|
arith_seq_begin(VALUE self)
|
|
|
|
{
|
|
|
|
return rb_ivar_get(self, id_begin);
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:36:48 +03:00
|
|
|
/*
|
|
|
|
* call-seq: aseq.end -> num or nil
|
|
|
|
*
|
|
|
|
* Returns the number that defines the end of this arithmetic sequence.
|
|
|
|
*/
|
2018-08-06 12:08:28 +03:00
|
|
|
static inline VALUE
|
|
|
|
arith_seq_end(VALUE self)
|
|
|
|
{
|
|
|
|
return rb_ivar_get(self, id_end);
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:36:48 +03:00
|
|
|
/*
|
|
|
|
* call-seq: aseq.step -> num
|
|
|
|
*
|
|
|
|
* Returns the number that defines the common difference between
|
|
|
|
* two adjacent elements in this arithmetic sequence.
|
|
|
|
*/
|
2018-08-06 12:08:28 +03:00
|
|
|
static inline VALUE
|
|
|
|
arith_seq_step(VALUE self)
|
|
|
|
{
|
|
|
|
return rb_ivar_get(self, id_step);
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:36:48 +03:00
|
|
|
/*
|
|
|
|
* call-seq: aseq.exclude_end? -> true or false
|
|
|
|
*
|
|
|
|
* Returns <code>true</code> if this arithmetic sequence excludes its end value.
|
|
|
|
*/
|
2018-08-06 12:08:28 +03:00
|
|
|
static inline VALUE
|
|
|
|
arith_seq_exclude_end(VALUE self)
|
|
|
|
{
|
|
|
|
return rb_ivar_get(self, id_exclude_end);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
arith_seq_exclude_end_p(VALUE self)
|
|
|
|
{
|
|
|
|
return RTEST(arith_seq_exclude_end(self));
|
|
|
|
}
|
|
|
|
|
2018-12-12 09:39:58 +03:00
|
|
|
int
|
2018-12-12 10:16:07 +03:00
|
|
|
rb_arithmetic_sequence_extract(VALUE obj, rb_arithmetic_sequence_components_t *component)
|
2018-12-12 09:39:58 +03:00
|
|
|
{
|
|
|
|
if (rb_obj_is_kind_of(obj, rb_cArithSeq)) {
|
2018-12-12 10:16:07 +03:00
|
|
|
component->begin = arith_seq_begin(obj);
|
|
|
|
component->end = arith_seq_end(obj);
|
|
|
|
component->step = arith_seq_step(obj);
|
|
|
|
component->exclude_end = arith_seq_exclude_end_p(obj);
|
2018-12-12 09:39:58 +03:00
|
|
|
return 1;
|
|
|
|
}
|
2020-10-20 20:40:18 +03:00
|
|
|
else if (rb_range_values(obj, &component->begin, &component->end, &component->exclude_end)) {
|
2018-12-12 10:16:07 +03:00
|
|
|
component->step = INT2FIX(1);
|
2018-12-12 09:39:58 +03:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-20 20:40:18 +03:00
|
|
|
VALUE
|
|
|
|
rb_arithmetic_sequence_beg_len_step(VALUE obj, long *begp, long *lenp, long *stepp, long len, int err)
|
|
|
|
{
|
2021-09-27 08:47:52 +03:00
|
|
|
RBIMPL_NONNULL_ARG(begp);
|
|
|
|
RBIMPL_NONNULL_ARG(lenp);
|
|
|
|
RBIMPL_NONNULL_ARG(stepp);
|
2020-10-20 20:40:18 +03:00
|
|
|
|
|
|
|
rb_arithmetic_sequence_components_t aseq;
|
|
|
|
if (!rb_arithmetic_sequence_extract(obj, &aseq)) {
|
|
|
|
return Qfalse;
|
|
|
|
}
|
|
|
|
|
|
|
|
long step = NIL_P(aseq.step) ? 1 : NUM2LONG(aseq.step);
|
|
|
|
*stepp = step;
|
|
|
|
|
|
|
|
if (step < 0) {
|
2022-08-11 13:16:49 +03:00
|
|
|
if (aseq.exclude_end && !NIL_P(aseq.end)) {
|
|
|
|
/* Handle exclusion before range reversal */
|
|
|
|
aseq.end = LONG2NUM(NUM2LONG(aseq.end) + 1);
|
|
|
|
|
|
|
|
/* Don't exclude the previous beginning */
|
|
|
|
aseq.exclude_end = 0;
|
|
|
|
}
|
2020-10-20 20:40:18 +03:00
|
|
|
VALUE tmp = aseq.begin;
|
|
|
|
aseq.begin = aseq.end;
|
|
|
|
aseq.end = tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err == 0 && (step < -1 || step > 1)) {
|
|
|
|
if (rb_range_component_beg_len(aseq.begin, aseq.end, aseq.exclude_end, begp, lenp, len, 1) == Qtrue) {
|
|
|
|
if (*begp > len)
|
|
|
|
goto out_of_range;
|
|
|
|
if (*lenp > len)
|
|
|
|
goto out_of_range;
|
|
|
|
return Qtrue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return rb_range_component_beg_len(aseq.begin, aseq.end, aseq.exclude_end, begp, lenp, len, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
out_of_range:
|
|
|
|
rb_raise(rb_eRangeError, "%+"PRIsVALUE" out of range", obj);
|
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:36:48 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* aseq.first -> num or nil
|
|
|
|
* aseq.first(n) -> an_array
|
|
|
|
*
|
|
|
|
* Returns the first number in this arithmetic sequence,
|
|
|
|
* or an array of the first +n+ elements.
|
|
|
|
*/
|
2018-08-06 12:08:28 +03:00
|
|
|
static VALUE
|
|
|
|
arith_seq_first(int argc, VALUE *argv, VALUE self)
|
|
|
|
{
|
2019-01-30 09:05:57 +03:00
|
|
|
VALUE b, e, s, ary;
|
|
|
|
long n;
|
|
|
|
int x;
|
|
|
|
|
|
|
|
rb_check_arity(argc, 0, 1);
|
2018-08-06 12:08:28 +03:00
|
|
|
|
|
|
|
b = arith_seq_begin(self);
|
|
|
|
e = arith_seq_end(self);
|
|
|
|
s = arith_seq_step(self);
|
2019-01-30 09:05:57 +03:00
|
|
|
if (argc == 0) {
|
2019-04-04 06:34:55 +03:00
|
|
|
if (NIL_P(b)) {
|
|
|
|
return Qnil;
|
|
|
|
}
|
2019-01-30 09:05:57 +03:00
|
|
|
if (!NIL_P(e)) {
|
|
|
|
VALUE zero = INT2FIX(0);
|
|
|
|
int r = rb_cmpint(rb_num_coerce_cmp(s, zero, idCmp), s, zero);
|
|
|
|
if (r > 0 && RTEST(rb_funcall(b, '>', 1, e))) {
|
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
if (r < 0 && RTEST(rb_funcall(b, '<', 1, e))) {
|
2018-08-06 12:08:28 +03:00
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
}
|
2019-01-30 09:05:57 +03:00
|
|
|
return b;
|
2018-08-06 12:08:28 +03:00
|
|
|
}
|
|
|
|
|
2019-01-30 09:05:57 +03:00
|
|
|
// TODO: the following code should be extracted as arith_seq_take
|
|
|
|
|
|
|
|
n = NUM2LONG(argv[0]);
|
|
|
|
if (n < 0) {
|
2019-01-30 09:06:02 +03:00
|
|
|
rb_raise(rb_eArgError, "attempt to take negative size");
|
2018-08-06 12:08:28 +03:00
|
|
|
}
|
2019-01-30 09:05:57 +03:00
|
|
|
if (n == 0) {
|
|
|
|
return rb_ary_new_capa(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
x = arith_seq_exclude_end_p(self);
|
2018-08-06 12:08:28 +03:00
|
|
|
|
2019-01-30 09:05:57 +03:00
|
|
|
if (FIXNUM_P(b) && NIL_P(e) && FIXNUM_P(s)) {
|
|
|
|
long i = FIX2LONG(b), unit = FIX2LONG(s);
|
|
|
|
ary = rb_ary_new_capa(n);
|
|
|
|
while (n > 0 && FIXABLE(i)) {
|
|
|
|
rb_ary_push(ary, LONG2FIX(i));
|
|
|
|
i += unit; // FIXABLE + FIXABLE never overflow;
|
|
|
|
--n;
|
|
|
|
}
|
|
|
|
if (n > 0) {
|
|
|
|
b = LONG2NUM(i);
|
|
|
|
while (n > 0) {
|
|
|
|
rb_ary_push(ary, b);
|
|
|
|
b = rb_big_plus(b, s);
|
|
|
|
--n;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ary;
|
|
|
|
}
|
|
|
|
else if (FIXNUM_P(b) && FIXNUM_P(e) && FIXNUM_P(s)) {
|
|
|
|
long i = FIX2LONG(b);
|
|
|
|
long end = FIX2LONG(e);
|
|
|
|
long unit = FIX2LONG(s);
|
|
|
|
long len;
|
|
|
|
|
|
|
|
if (unit >= 0) {
|
|
|
|
if (!x) end += 1;
|
|
|
|
|
|
|
|
len = end - i;
|
|
|
|
if (len < 0) len = 0;
|
|
|
|
ary = rb_ary_new_capa((n < len) ? n : len);
|
|
|
|
while (n > 0 && i < end) {
|
|
|
|
rb_ary_push(ary, LONG2FIX(i));
|
|
|
|
if (i + unit < i) break;
|
|
|
|
i += unit;
|
|
|
|
--n;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (!x) end -= 1;
|
|
|
|
|
|
|
|
len = i - end;
|
|
|
|
if (len < 0) len = 0;
|
|
|
|
ary = rb_ary_new_capa((n < len) ? n : len);
|
|
|
|
while (n > 0 && i > end) {
|
|
|
|
rb_ary_push(ary, LONG2FIX(i));
|
|
|
|
if (i + unit > i) break;
|
|
|
|
i += unit;
|
|
|
|
--n;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ary;
|
|
|
|
}
|
|
|
|
else if (RB_FLOAT_TYPE_P(b) || RB_FLOAT_TYPE_P(e) || RB_FLOAT_TYPE_P(s)) {
|
|
|
|
/* generate values like ruby_float_step */
|
|
|
|
|
|
|
|
double unit = NUM2DBL(s);
|
|
|
|
double beg = NUM2DBL(b);
|
|
|
|
double end = NIL_P(e) ? (unit < 0 ? -1 : 1)*HUGE_VAL : NUM2DBL(e);
|
|
|
|
double len = ruby_float_step_size(beg, end, unit, x);
|
|
|
|
long i;
|
|
|
|
|
|
|
|
if (n > len)
|
|
|
|
n = (long)len;
|
|
|
|
|
|
|
|
if (isinf(unit)) {
|
|
|
|
if (len > 0) {
|
|
|
|
ary = rb_ary_new_capa(1);
|
|
|
|
rb_ary_push(ary, DBL2NUM(beg));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
ary = rb_ary_new_capa(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (unit == 0) {
|
|
|
|
VALUE val = DBL2NUM(beg);
|
|
|
|
ary = rb_ary_new_capa(n);
|
|
|
|
for (i = 0; i < len; ++i) {
|
|
|
|
rb_ary_push(ary, val);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
ary = rb_ary_new_capa(n);
|
|
|
|
for (i = 0; i < n; ++i) {
|
|
|
|
double d = i*unit+beg;
|
|
|
|
if (unit >= 0 ? end < d : d < end) d = end;
|
|
|
|
rb_ary_push(ary, DBL2NUM(d));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ary;
|
|
|
|
}
|
2018-08-06 12:08:28 +03:00
|
|
|
|
|
|
|
return rb_call_super(argc, argv);
|
|
|
|
}
|
|
|
|
|
2020-12-09 12:48:59 +03:00
|
|
|
static inline VALUE
|
|
|
|
num_plus(VALUE a, VALUE b)
|
|
|
|
{
|
|
|
|
if (RB_INTEGER_TYPE_P(a)) {
|
|
|
|
return rb_int_plus(a, b);
|
|
|
|
}
|
|
|
|
else if (RB_FLOAT_TYPE_P(a)) {
|
|
|
|
return rb_float_plus(a, b);
|
|
|
|
}
|
|
|
|
else if (RB_TYPE_P(a, T_RATIONAL)) {
|
|
|
|
return rb_rational_plus(a, b);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return rb_funcallv(a, '+', 1, &b);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
num_minus(VALUE a, VALUE b)
|
|
|
|
{
|
|
|
|
if (RB_INTEGER_TYPE_P(a)) {
|
|
|
|
return rb_int_minus(a, b);
|
|
|
|
}
|
|
|
|
else if (RB_FLOAT_TYPE_P(a)) {
|
|
|
|
return rb_float_minus(a, b);
|
|
|
|
}
|
|
|
|
else if (RB_TYPE_P(a, T_RATIONAL)) {
|
|
|
|
return rb_rational_minus(a, b);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return rb_funcallv(a, '-', 1, &b);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
num_mul(VALUE a, VALUE b)
|
|
|
|
{
|
|
|
|
if (RB_INTEGER_TYPE_P(a)) {
|
|
|
|
return rb_int_mul(a, b);
|
|
|
|
}
|
|
|
|
else if (RB_FLOAT_TYPE_P(a)) {
|
|
|
|
return rb_float_mul(a, b);
|
|
|
|
}
|
|
|
|
else if (RB_TYPE_P(a, T_RATIONAL)) {
|
|
|
|
return rb_rational_mul(a, b);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return rb_funcallv(a, '*', 1, &b);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
num_idiv(VALUE a, VALUE b)
|
|
|
|
{
|
|
|
|
VALUE q;
|
|
|
|
if (RB_INTEGER_TYPE_P(a)) {
|
|
|
|
q = rb_int_idiv(a, b);
|
|
|
|
}
|
|
|
|
else if (RB_FLOAT_TYPE_P(a)) {
|
|
|
|
q = rb_float_div(a, b);
|
|
|
|
}
|
|
|
|
else if (RB_TYPE_P(a, T_RATIONAL)) {
|
|
|
|
q = rb_rational_div(a, b);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
q = rb_funcallv(a, idDiv, 1, &b);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (RB_INTEGER_TYPE_P(q)) {
|
|
|
|
return q;
|
|
|
|
}
|
|
|
|
else if (RB_FLOAT_TYPE_P(q)) {
|
|
|
|
return rb_float_floor(q, 0);
|
|
|
|
}
|
|
|
|
else if (RB_TYPE_P(q, T_RATIONAL)) {
|
|
|
|
return rb_rational_floor(q, 0);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return rb_funcall(q, rb_intern("floor"), 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:36:48 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* aseq.last -> num or nil
|
|
|
|
* aseq.last(n) -> an_array
|
|
|
|
*
|
|
|
|
* Returns the last number in this arithmetic sequence,
|
|
|
|
* or an array of the last +n+ elements.
|
|
|
|
*/
|
2018-08-06 12:08:28 +03:00
|
|
|
static VALUE
|
|
|
|
arith_seq_last(int argc, VALUE *argv, VALUE self)
|
|
|
|
{
|
|
|
|
VALUE b, e, s, len_1, len, last, nv, ary;
|
|
|
|
int last_is_adjusted;
|
|
|
|
long n;
|
|
|
|
|
|
|
|
e = arith_seq_end(self);
|
|
|
|
if (NIL_P(e)) {
|
|
|
|
rb_raise(rb_eRangeError,
|
|
|
|
"cannot get the last element of endless arithmetic sequence");
|
|
|
|
}
|
|
|
|
|
|
|
|
b = arith_seq_begin(self);
|
|
|
|
s = arith_seq_step(self);
|
|
|
|
|
2020-12-09 12:48:59 +03:00
|
|
|
len_1 = num_idiv(num_minus(e, b), s);
|
2018-08-06 12:08:28 +03:00
|
|
|
if (rb_num_negative_int_p(len_1)) {
|
|
|
|
if (argc == 0) {
|
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
return rb_ary_new_capa(0);
|
|
|
|
}
|
|
|
|
|
2020-12-09 12:48:59 +03:00
|
|
|
last = num_plus(b, num_mul(s, len_1));
|
2018-08-06 12:08:28 +03:00
|
|
|
if ((last_is_adjusted = arith_seq_exclude_end_p(self) && rb_equal(last, e))) {
|
2020-12-09 12:48:59 +03:00
|
|
|
last = num_minus(last, s);
|
2018-08-06 12:08:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (argc == 0) {
|
|
|
|
return last;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (last_is_adjusted) {
|
|
|
|
len = len_1;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
len = rb_int_plus(len_1, INT2FIX(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_scan_args(argc, argv, "1", &nv);
|
2018-08-10 07:49:44 +03:00
|
|
|
if (!RB_INTEGER_TYPE_P(nv)) {
|
|
|
|
nv = rb_to_int(nv);
|
|
|
|
}
|
2018-08-06 12:08:28 +03:00
|
|
|
if (RTEST(rb_int_gt(nv, len))) {
|
|
|
|
nv = len;
|
|
|
|
}
|
|
|
|
n = NUM2LONG(nv);
|
|
|
|
if (n < 0) {
|
|
|
|
rb_raise(rb_eArgError, "negative array size");
|
|
|
|
}
|
|
|
|
|
|
|
|
ary = rb_ary_new_capa(n);
|
|
|
|
b = rb_int_minus(last, rb_int_mul(s, nv));
|
|
|
|
while (n) {
|
|
|
|
b = rb_int_plus(b, s);
|
|
|
|
rb_ary_push(ary, b);
|
|
|
|
--n;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ary;
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:36:48 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* aseq.inspect -> string
|
|
|
|
*
|
|
|
|
* Convert this arithmetic sequence to a printable form.
|
|
|
|
*/
|
2018-08-06 12:08:28 +03:00
|
|
|
static VALUE
|
|
|
|
arith_seq_inspect(VALUE self)
|
|
|
|
{
|
|
|
|
struct enumerator *e;
|
|
|
|
VALUE eobj, str, eargs;
|
|
|
|
int range_p;
|
|
|
|
|
|
|
|
TypedData_Get_Struct(self, struct enumerator, &enumerator_data_type, e);
|
|
|
|
|
|
|
|
eobj = rb_attr_get(self, id_receiver);
|
|
|
|
if (NIL_P(eobj)) {
|
|
|
|
eobj = e->obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
range_p = RTEST(rb_obj_is_kind_of(eobj, rb_cRange));
|
|
|
|
str = rb_sprintf("(%s%"PRIsVALUE"%s.", range_p ? "(" : "", eobj, range_p ? ")" : "");
|
|
|
|
|
|
|
|
rb_str_buf_append(str, rb_id2str(e->meth));
|
|
|
|
|
|
|
|
eargs = rb_attr_get(eobj, id_arguments);
|
|
|
|
if (NIL_P(eargs)) {
|
|
|
|
eargs = e->args;
|
|
|
|
}
|
|
|
|
if (eargs != Qfalse) {
|
|
|
|
long argc = RARRAY_LEN(eargs);
|
|
|
|
const VALUE *argv = RARRAY_CONST_PTR(eargs); /* WB: no new reference */
|
|
|
|
|
|
|
|
if (argc > 0) {
|
|
|
|
VALUE kwds = Qnil;
|
|
|
|
|
|
|
|
rb_str_buf_cat2(str, "(");
|
|
|
|
|
|
|
|
if (RB_TYPE_P(argv[argc-1], T_HASH)) {
|
|
|
|
int all_key = TRUE;
|
|
|
|
rb_hash_foreach(argv[argc-1], key_symbol_p, (VALUE)&all_key);
|
|
|
|
if (all_key) kwds = argv[--argc];
|
|
|
|
}
|
|
|
|
|
|
|
|
while (argc--) {
|
|
|
|
VALUE arg = *argv++;
|
|
|
|
|
|
|
|
rb_str_append(str, rb_inspect(arg));
|
|
|
|
rb_str_buf_cat2(str, ", ");
|
|
|
|
}
|
|
|
|
if (!NIL_P(kwds)) {
|
|
|
|
rb_hash_foreach(kwds, kwd_append, str);
|
|
|
|
}
|
|
|
|
rb_str_set_len(str, RSTRING_LEN(str)-2); /* drop the last ", " */
|
|
|
|
rb_str_buf_cat2(str, ")");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_str_buf_cat2(str, ")");
|
|
|
|
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:36:48 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* aseq == obj -> true or false
|
|
|
|
*
|
|
|
|
* Returns <code>true</code> only if +obj+ is an Enumerator::ArithmeticSequence,
|
|
|
|
* has equivalent begin, end, step, and exclude_end? settings.
|
|
|
|
*/
|
2018-08-06 12:08:28 +03:00
|
|
|
static VALUE
|
|
|
|
arith_seq_eq(VALUE self, VALUE other)
|
|
|
|
{
|
|
|
|
if (!RTEST(rb_obj_is_kind_of(other, rb_cArithSeq))) {
|
|
|
|
return Qfalse;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!rb_equal(arith_seq_begin(self), arith_seq_begin(other))) {
|
|
|
|
return Qfalse;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!rb_equal(arith_seq_end(self), arith_seq_end(other))) {
|
|
|
|
return Qfalse;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!rb_equal(arith_seq_step(self), arith_seq_step(other))) {
|
|
|
|
return Qfalse;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (arith_seq_exclude_end_p(self) != arith_seq_exclude_end_p(other)) {
|
|
|
|
return Qfalse;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Qtrue;
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:36:48 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* aseq.hash -> integer
|
|
|
|
*
|
|
|
|
* Compute a hash-value for this arithmetic sequence.
|
|
|
|
* Two arithmetic sequences with same begin, end, step, and exclude_end?
|
|
|
|
* values will generate the same hash-value.
|
|
|
|
*
|
|
|
|
* See also Object#hash.
|
|
|
|
*/
|
2018-08-06 12:08:28 +03:00
|
|
|
static VALUE
|
|
|
|
arith_seq_hash(VALUE self)
|
|
|
|
{
|
|
|
|
st_index_t hash;
|
|
|
|
VALUE v;
|
|
|
|
|
|
|
|
hash = rb_hash_start(arith_seq_exclude_end_p(self));
|
|
|
|
v = rb_hash(arith_seq_begin(self));
|
|
|
|
hash = rb_hash_uint(hash, NUM2LONG(v));
|
|
|
|
v = rb_hash(arith_seq_end(self));
|
|
|
|
hash = rb_hash_uint(hash, NUM2LONG(v));
|
|
|
|
v = rb_hash(arith_seq_step(self));
|
|
|
|
hash = rb_hash_uint(hash, NUM2LONG(v));
|
|
|
|
hash = rb_hash_end(hash);
|
|
|
|
|
2019-04-08 06:26:27 +03:00
|
|
|
return ST2FIX(hash);
|
2018-08-06 12:08:28 +03:00
|
|
|
}
|
|
|
|
|
2018-12-21 03:03:39 +03:00
|
|
|
#define NUM_GE(x, y) RTEST(rb_num_coerce_relop((x), (y), idGE))
|
|
|
|
|
2018-08-06 12:08:28 +03:00
|
|
|
struct arith_seq_gen {
|
|
|
|
VALUE current;
|
|
|
|
VALUE end;
|
|
|
|
VALUE step;
|
|
|
|
int excl;
|
|
|
|
};
|
|
|
|
|
2018-09-12 11:36:48 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* aseq.each {|i| block } -> aseq
|
2018-09-12 19:05:07 +03:00
|
|
|
* aseq.each -> aseq
|
2018-09-12 11:36:48 +03:00
|
|
|
*/
|
2018-08-06 12:08:28 +03:00
|
|
|
static VALUE
|
|
|
|
arith_seq_each(VALUE self)
|
|
|
|
{
|
|
|
|
VALUE c, e, s, len_1, last;
|
|
|
|
int x;
|
|
|
|
|
|
|
|
if (!rb_block_given_p()) return self;
|
|
|
|
|
|
|
|
c = arith_seq_begin(self);
|
|
|
|
e = arith_seq_end(self);
|
|
|
|
s = arith_seq_step(self);
|
|
|
|
x = arith_seq_exclude_end_p(self);
|
|
|
|
|
2018-09-12 10:35:42 +03:00
|
|
|
if (!RB_TYPE_P(s, T_COMPLEX) && ruby_float_step(c, e, s, x, TRUE)) {
|
2018-08-06 12:08:28 +03:00
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NIL_P(e)) {
|
|
|
|
while (1) {
|
|
|
|
rb_yield(c);
|
|
|
|
c = rb_int_plus(c, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rb_equal(s, INT2FIX(0))) {
|
|
|
|
while (1) {
|
|
|
|
rb_yield(c);
|
|
|
|
}
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2020-12-09 12:48:59 +03:00
|
|
|
len_1 = num_idiv(num_minus(e, c), s);
|
|
|
|
last = num_plus(c, num_mul(s, len_1));
|
2018-08-06 12:08:28 +03:00
|
|
|
if (x && rb_equal(last, e)) {
|
2020-12-09 12:48:59 +03:00
|
|
|
last = num_minus(last, s);
|
2018-08-06 12:08:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (rb_num_negative_int_p(s)) {
|
2018-12-21 03:03:39 +03:00
|
|
|
while (NUM_GE(c, last)) {
|
2018-08-06 12:08:28 +03:00
|
|
|
rb_yield(c);
|
2020-12-09 12:48:59 +03:00
|
|
|
c = num_plus(c, s);
|
2018-08-06 12:08:28 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
2018-12-21 03:03:39 +03:00
|
|
|
while (NUM_GE(last, c)) {
|
2018-08-06 12:08:28 +03:00
|
|
|
rb_yield(c);
|
2020-12-09 12:48:59 +03:00
|
|
|
c = num_plus(c, s);
|
2018-08-06 12:08:28 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:36:48 +03:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* aseq.size -> num or nil
|
|
|
|
*
|
|
|
|
* Returns the number of elements in this arithmetic sequence if it is a finite
|
|
|
|
* sequence. Otherwise, returns <code>nil</code>.
|
|
|
|
*/
|
2018-08-06 12:08:28 +03:00
|
|
|
static VALUE
|
|
|
|
arith_seq_size(VALUE self)
|
|
|
|
{
|
|
|
|
VALUE b, e, s, len_1, len, last;
|
|
|
|
int x;
|
|
|
|
|
|
|
|
b = arith_seq_begin(self);
|
|
|
|
e = arith_seq_end(self);
|
|
|
|
s = arith_seq_step(self);
|
|
|
|
x = arith_seq_exclude_end_p(self);
|
|
|
|
|
|
|
|
if (RB_FLOAT_TYPE_P(b) || RB_FLOAT_TYPE_P(e) || RB_FLOAT_TYPE_P(s)) {
|
|
|
|
double ee, n;
|
|
|
|
|
|
|
|
if (NIL_P(e)) {
|
|
|
|
if (rb_num_negative_int_p(s)) {
|
|
|
|
ee = -HUGE_VAL;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
ee = HUGE_VAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
ee = NUM2DBL(e);
|
|
|
|
}
|
|
|
|
|
2020-02-09 09:43:15 +03:00
|
|
|
n = ruby_float_step_size(NUM2DBL(b), ee, NUM2DBL(s), x);
|
2018-08-06 12:08:28 +03:00
|
|
|
if (isinf(n)) return DBL2NUM(n);
|
2020-04-08 12:03:46 +03:00
|
|
|
if (POSFIXABLE(n)) return LONG2FIX((long)n);
|
2018-08-06 12:08:28 +03:00
|
|
|
return rb_dbl2big(n);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NIL_P(e)) {
|
|
|
|
return DBL2NUM(HUGE_VAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!rb_obj_is_kind_of(s, rb_cNumeric)) {
|
|
|
|
s = rb_to_int(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rb_equal(s, INT2FIX(0))) {
|
|
|
|
return DBL2NUM(HUGE_VAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
len_1 = rb_int_idiv(rb_int_minus(e, b), s);
|
|
|
|
if (rb_num_negative_int_p(len_1)) {
|
|
|
|
return INT2FIX(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
last = rb_int_plus(b, rb_int_mul(s, len_1));
|
|
|
|
if (x && rb_equal(last, e)) {
|
|
|
|
len = len_1;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
len = rb_int_plus(len_1, INT2FIX(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2020-10-21 07:03:07 +03:00
|
|
|
#define sym(name) ID2SYM(rb_intern_const(name))
|
2003-10-13 21:09:23 +04:00
|
|
|
void
|
2012-03-14 05:35:12 +04:00
|
|
|
InitVM_Enumerator(void)
|
2003-10-13 21:09:23 +04:00
|
|
|
{
|
2020-10-21 07:03:07 +03:00
|
|
|
ID id_private = rb_intern_const("private");
|
Fix Enumerator::Lazy#{to_enum,enum_for} where method is defined in Lazy
Previously, passing to_enum/enum_for a method that was defined in
Lazy itself returned wrong results:
[1,2,3].to_enum(:map).to_a
# => [1, 2, 3]
[1,2,3].lazy.to_enum(:map).to_a
# => []
I'm not sure why methods that are designed to be lazy do not work
with to_enum/enum_for. However, one possible way to work around
this bug is to have to_enum/enum_for use the implementation found
in Enumerable/Enumerator, which is what this commit does.
While this commit works around the problem, it is a band-aid, not a
real fix. It doesn't handle aliases of Enumerable::Lazy methods,
for instance. A better fix would be appreciated.
2019-09-02 23:22:26 +03:00
|
|
|
|
2005-07-16 18:43:34 +04:00
|
|
|
rb_define_method(rb_mKernel, "to_enum", obj_to_enum, -1);
|
|
|
|
rb_define_method(rb_mKernel, "enum_for", obj_to_enum, -1);
|
2003-10-13 21:09:23 +04:00
|
|
|
|
2008-08-13 10:25:53 +04:00
|
|
|
rb_cEnumerator = rb_define_class("Enumerator", rb_cObject);
|
2003-10-13 21:09:23 +04:00
|
|
|
rb_include_module(rb_cEnumerator, rb_mEnumerable);
|
|
|
|
|
2005-07-11 18:50:42 +04:00
|
|
|
rb_define_alloc_func(rb_cEnumerator, enumerator_allocate);
|
2003-10-13 21:09:23 +04:00
|
|
|
rb_define_method(rb_cEnumerator, "initialize", enumerator_initialize, -1);
|
2007-01-26 01:52:38 +03:00
|
|
|
rb_define_method(rb_cEnumerator, "initialize_copy", enumerator_init_copy, 1);
|
2012-03-08 19:26:01 +04:00
|
|
|
rb_define_method(rb_cEnumerator, "each", enumerator_each, -1);
|
2009-02-08 17:42:01 +03:00
|
|
|
rb_define_method(rb_cEnumerator, "each_with_index", enumerator_each_with_index, 0);
|
2008-08-26 09:45:18 +04:00
|
|
|
rb_define_method(rb_cEnumerator, "each_with_object", enumerator_with_object, 1);
|
2009-02-08 17:42:01 +03:00
|
|
|
rb_define_method(rb_cEnumerator, "with_index", enumerator_with_index, -1);
|
2008-06-16 04:49:25 +04:00
|
|
|
rb_define_method(rb_cEnumerator, "with_object", enumerator_with_object, 1);
|
2009-08-19 20:36:00 +04:00
|
|
|
rb_define_method(rb_cEnumerator, "next_values", enumerator_next_values, 0);
|
2009-08-21 19:51:35 +04:00
|
|
|
rb_define_method(rb_cEnumerator, "peek_values", enumerator_peek_values_m, 0);
|
2007-08-06 20:41:17 +04:00
|
|
|
rb_define_method(rb_cEnumerator, "next", enumerator_next, 0);
|
2009-08-18 16:02:53 +04:00
|
|
|
rb_define_method(rb_cEnumerator, "peek", enumerator_peek, 0);
|
2009-08-19 20:36:00 +04:00
|
|
|
rb_define_method(rb_cEnumerator, "feed", enumerator_feed, 1);
|
2007-08-06 20:41:17 +04:00
|
|
|
rb_define_method(rb_cEnumerator, "rewind", enumerator_rewind, 0);
|
2008-12-04 05:44:38 +03:00
|
|
|
rb_define_method(rb_cEnumerator, "inspect", enumerator_inspect, 0);
|
2012-11-06 21:10:06 +04:00
|
|
|
rb_define_method(rb_cEnumerator, "size", enumerator_size, 0);
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
rb_define_method(rb_cEnumerator, "+", enumerator_plus, 1);
|
|
|
|
rb_define_method(rb_mEnumerable, "chain", enum_chain, -1);
|
2003-10-13 21:09:23 +04:00
|
|
|
|
2012-03-17 02:05:31 +04:00
|
|
|
/* Lazy */
|
2012-03-15 12:47:09 +04:00
|
|
|
rb_cLazy = rb_define_class_under(rb_cEnumerator, "Lazy", rb_cEnumerator);
|
2012-03-08 19:35:05 +04:00
|
|
|
rb_define_method(rb_mEnumerable, "lazy", enumerable_lazy, 0);
|
Fix Enumerator::Lazy#{to_enum,enum_for} where method is defined in Lazy
Previously, passing to_enum/enum_for a method that was defined in
Lazy itself returned wrong results:
[1,2,3].to_enum(:map).to_a
# => [1, 2, 3]
[1,2,3].lazy.to_enum(:map).to_a
# => []
I'm not sure why methods that are designed to be lazy do not work
with to_enum/enum_for. However, one possible way to work around
this bug is to have to_enum/enum_for use the implementation found
in Enumerable/Enumerator, which is what this commit does.
While this commit works around the problem, it is a band-aid, not a
real fix. It doesn't handle aliases of Enumerable::Lazy methods,
for instance. A better fix would be appreciated.
2019-09-02 23:22:26 +03:00
|
|
|
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_map", "map");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_collect", "collect");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_flat_map", "flat_map");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_collect_concat", "collect_concat");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_select", "select");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_find_all", "find_all");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_filter", "filter");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_filter_map", "filter_map");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_reject", "reject");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_grep", "grep");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_grep_v", "grep_v");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_zip", "zip");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_take", "take");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_take_while", "take_while");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_drop", "drop");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_drop_while", "drop_while");
|
|
|
|
rb_define_alias(rb_cLazy, "_enumerable_uniq", "uniq");
|
|
|
|
rb_define_private_method(rb_cLazy, "_enumerable_with_index", enumerator_with_index, -1);
|
|
|
|
|
2020-10-21 07:03:07 +03:00
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_map"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_collect"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_flat_map"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_collect_concat"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_select"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_find_all"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_filter"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_filter_map"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_reject"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_grep"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_grep_v"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_zip"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_take"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_take_while"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_drop"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_drop_while"));
|
|
|
|
rb_funcall(rb_cLazy, id_private, 1, sym("_enumerable_uniq"));
|
Fix Enumerator::Lazy#{to_enum,enum_for} where method is defined in Lazy
Previously, passing to_enum/enum_for a method that was defined in
Lazy itself returned wrong results:
[1,2,3].to_enum(:map).to_a
# => [1, 2, 3]
[1,2,3].lazy.to_enum(:map).to_a
# => []
I'm not sure why methods that are designed to be lazy do not work
with to_enum/enum_for. However, one possible way to work around
this bug is to have to_enum/enum_for use the implementation found
in Enumerable/Enumerator, which is what this commit does.
While this commit works around the problem, it is a band-aid, not a
real fix. It doesn't handle aliases of Enumerable::Lazy methods,
for instance. A better fix would be appreciated.
2019-09-02 23:22:26 +03:00
|
|
|
|
2012-03-15 06:00:30 +04:00
|
|
|
rb_define_method(rb_cLazy, "initialize", lazy_initialize, -1);
|
2013-02-05 07:49:41 +04:00
|
|
|
rb_define_method(rb_cLazy, "to_enum", lazy_to_enum, -1);
|
|
|
|
rb_define_method(rb_cLazy, "enum_for", lazy_to_enum, -1);
|
2019-06-05 14:39:21 +03:00
|
|
|
rb_define_method(rb_cLazy, "eager", lazy_eager, 0);
|
2012-03-08 19:30:28 +04:00
|
|
|
rb_define_method(rb_cLazy, "map", lazy_map, 0);
|
2012-03-24 19:17:31 +04:00
|
|
|
rb_define_method(rb_cLazy, "collect", lazy_map, 0);
|
2012-03-09 09:34:41 +04:00
|
|
|
rb_define_method(rb_cLazy, "flat_map", lazy_flat_map, 0);
|
2012-03-24 19:17:31 +04:00
|
|
|
rb_define_method(rb_cLazy, "collect_concat", lazy_flat_map, 0);
|
2012-03-08 19:30:28 +04:00
|
|
|
rb_define_method(rb_cLazy, "select", lazy_select, 0);
|
2012-03-24 19:17:31 +04:00
|
|
|
rb_define_method(rb_cLazy, "find_all", lazy_select, 0);
|
2018-02-25 16:52:07 +03:00
|
|
|
rb_define_method(rb_cLazy, "filter", lazy_select, 0);
|
2019-06-21 10:28:39 +03:00
|
|
|
rb_define_method(rb_cLazy, "filter_map", lazy_filter_map, 0);
|
2012-03-08 19:30:28 +04:00
|
|
|
rb_define_method(rb_cLazy, "reject", lazy_reject, 0);
|
|
|
|
rb_define_method(rb_cLazy, "grep", lazy_grep, 1);
|
2015-12-08 10:23:43 +03:00
|
|
|
rb_define_method(rb_cLazy, "grep_v", lazy_grep_v, 1);
|
2012-03-14 03:08:15 +04:00
|
|
|
rb_define_method(rb_cLazy, "zip", lazy_zip, -1);
|
2012-03-14 14:29:25 +04:00
|
|
|
rb_define_method(rb_cLazy, "take", lazy_take, 1);
|
2012-03-14 17:04:18 +04:00
|
|
|
rb_define_method(rb_cLazy, "take_while", lazy_take_while, 0);
|
|
|
|
rb_define_method(rb_cLazy, "drop", lazy_drop, 1);
|
|
|
|
rb_define_method(rb_cLazy, "drop_while", lazy_drop_while, 0);
|
2012-03-14 03:08:15 +04:00
|
|
|
rb_define_method(rb_cLazy, "lazy", lazy_lazy, 0);
|
2013-02-05 07:49:59 +04:00
|
|
|
rb_define_method(rb_cLazy, "chunk", lazy_super, -1);
|
|
|
|
rb_define_method(rb_cLazy, "slice_before", lazy_super, -1);
|
2014-05-18 04:06:05 +04:00
|
|
|
rb_define_method(rb_cLazy, "slice_after", lazy_super, -1);
|
2014-09-20 10:52:29 +04:00
|
|
|
rb_define_method(rb_cLazy, "slice_when", lazy_super, -1);
|
2016-11-05 18:46:48 +03:00
|
|
|
rb_define_method(rb_cLazy, "chunk_while", lazy_super, -1);
|
2016-07-20 11:44:08 +03:00
|
|
|
rb_define_method(rb_cLazy, "uniq", lazy_uniq, 0);
|
2020-12-05 14:39:20 +03:00
|
|
|
rb_define_method(rb_cLazy, "compact", lazy_compact, 0);
|
2019-08-08 23:09:17 +03:00
|
|
|
rb_define_method(rb_cLazy, "with_index", lazy_with_index, -1);
|
2012-03-08 19:30:28 +04:00
|
|
|
|
2019-10-22 01:31:46 +03:00
|
|
|
lazy_use_super_method = rb_hash_new_with_size(18);
|
2020-10-21 07:03:07 +03:00
|
|
|
rb_hash_aset(lazy_use_super_method, sym("map"), sym("_enumerable_map"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("collect"), sym("_enumerable_collect"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("flat_map"), sym("_enumerable_flat_map"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("collect_concat"), sym("_enumerable_collect_concat"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("select"), sym("_enumerable_select"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("find_all"), sym("_enumerable_find_all"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("filter"), sym("_enumerable_filter"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("filter_map"), sym("_enumerable_filter_map"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("reject"), sym("_enumerable_reject"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("grep"), sym("_enumerable_grep"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("grep_v"), sym("_enumerable_grep_v"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("zip"), sym("_enumerable_zip"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("take"), sym("_enumerable_take"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("take_while"), sym("_enumerable_take_while"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("drop"), sym("_enumerable_drop"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("drop_while"), sym("_enumerable_drop_while"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("uniq"), sym("_enumerable_uniq"));
|
|
|
|
rb_hash_aset(lazy_use_super_method, sym("with_index"), sym("_enumerable_with_index"));
|
Fix Enumerator::Lazy#{to_enum,enum_for} where method is defined in Lazy
Previously, passing to_enum/enum_for a method that was defined in
Lazy itself returned wrong results:
[1,2,3].to_enum(:map).to_a
# => [1, 2, 3]
[1,2,3].lazy.to_enum(:map).to_a
# => []
I'm not sure why methods that are designed to be lazy do not work
with to_enum/enum_for. However, one possible way to work around
this bug is to have to_enum/enum_for use the implementation found
in Enumerable/Enumerator, which is what this commit does.
While this commit works around the problem, it is a band-aid, not a
real fix. It doesn't handle aliases of Enumerable::Lazy methods,
for instance. A better fix would be appreciated.
2019-09-02 23:22:26 +03:00
|
|
|
rb_obj_freeze(lazy_use_super_method);
|
2024-03-03 12:46:46 +03:00
|
|
|
rb_vm_register_global_object(lazy_use_super_method);
|
Fix Enumerator::Lazy#{to_enum,enum_for} where method is defined in Lazy
Previously, passing to_enum/enum_for a method that was defined in
Lazy itself returned wrong results:
[1,2,3].to_enum(:map).to_a
# => [1, 2, 3]
[1,2,3].lazy.to_enum(:map).to_a
# => []
I'm not sure why methods that are designed to be lazy do not work
with to_enum/enum_for. However, one possible way to work around
this bug is to have to_enum/enum_for use the implementation found
in Enumerable/Enumerator, which is what this commit does.
While this commit works around the problem, it is a band-aid, not a
real fix. It doesn't handle aliases of Enumerable::Lazy methods,
for instance. A better fix would be appreciated.
2019-09-02 23:22:26 +03:00
|
|
|
|
2018-09-05 23:40:49 +03:00
|
|
|
#if 0 /* for RDoc */
|
|
|
|
rb_define_method(rb_cLazy, "to_a", lazy_to_a, 0);
|
2019-03-21 02:32:11 +03:00
|
|
|
rb_define_method(rb_cLazy, "chunk", lazy_chunk, 0);
|
|
|
|
rb_define_method(rb_cLazy, "chunk_while", lazy_chunk_while, 0);
|
|
|
|
rb_define_method(rb_cLazy, "slice_after", lazy_slice_after, 0);
|
|
|
|
rb_define_method(rb_cLazy, "slice_before", lazy_slice_before, 0);
|
|
|
|
rb_define_method(rb_cLazy, "slice_when", lazy_slice_when, 0);
|
2018-09-05 23:40:49 +03:00
|
|
|
#endif
|
2012-03-14 17:04:18 +04:00
|
|
|
rb_define_alias(rb_cLazy, "force", "to_a");
|
2012-03-08 19:30:28 +04:00
|
|
|
|
2008-08-26 09:42:12 +04:00
|
|
|
rb_eStopIteration = rb_define_class("StopIteration", rb_eIndexError);
|
2009-08-19 20:36:00 +04:00
|
|
|
rb_define_method(rb_eStopIteration, "result", stop_result, 0);
|
2008-08-26 09:42:12 +04:00
|
|
|
|
|
|
|
/* Generator */
|
|
|
|
rb_cGenerator = rb_define_class_under(rb_cEnumerator, "Generator", rb_cObject);
|
|
|
|
rb_include_module(rb_cGenerator, rb_mEnumerable);
|
|
|
|
rb_define_alloc_func(rb_cGenerator, generator_allocate);
|
|
|
|
rb_define_method(rb_cGenerator, "initialize", generator_initialize, -1);
|
|
|
|
rb_define_method(rb_cGenerator, "initialize_copy", generator_init_copy, 1);
|
2012-03-08 19:26:01 +04:00
|
|
|
rb_define_method(rb_cGenerator, "each", generator_each, -1);
|
2008-08-26 09:42:12 +04:00
|
|
|
|
|
|
|
/* Yielder */
|
|
|
|
rb_cYielder = rb_define_class_under(rb_cEnumerator, "Yielder", rb_cObject);
|
|
|
|
rb_define_alloc_func(rb_cYielder, yielder_allocate);
|
|
|
|
rb_define_method(rb_cYielder, "initialize", yielder_initialize, 0);
|
|
|
|
rb_define_method(rb_cYielder, "yield", yielder_yield, -2);
|
2018-09-18 11:49:40 +03:00
|
|
|
rb_define_method(rb_cYielder, "<<", yielder_yield_push, 1);
|
2019-03-11 12:49:14 +03:00
|
|
|
rb_define_method(rb_cYielder, "to_proc", yielder_to_proc, 0);
|
2007-08-08 11:07:03 +04:00
|
|
|
|
2019-08-29 14:05:10 +03:00
|
|
|
/* Producer */
|
|
|
|
rb_cEnumProducer = rb_define_class_under(rb_cEnumerator, "Producer", rb_cObject);
|
|
|
|
rb_define_alloc_func(rb_cEnumProducer, producer_allocate);
|
|
|
|
rb_define_method(rb_cEnumProducer, "each", producer_each, 0);
|
|
|
|
rb_define_singleton_method(rb_cEnumerator, "produce", enumerator_s_produce, -1);
|
|
|
|
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
/* Chain */
|
|
|
|
rb_cEnumChain = rb_define_class_under(rb_cEnumerator, "Chain", rb_cEnumerator);
|
|
|
|
rb_define_alloc_func(rb_cEnumChain, enum_chain_allocate);
|
|
|
|
rb_define_method(rb_cEnumChain, "initialize", enum_chain_initialize, -2);
|
|
|
|
rb_define_method(rb_cEnumChain, "initialize_copy", enum_chain_init_copy, 1);
|
|
|
|
rb_define_method(rb_cEnumChain, "each", enum_chain_each, -1);
|
|
|
|
rb_define_method(rb_cEnumChain, "size", enum_chain_size, 0);
|
|
|
|
rb_define_method(rb_cEnumChain, "rewind", enum_chain_rewind, 0);
|
|
|
|
rb_define_method(rb_cEnumChain, "inspect", enum_chain_inspect, 0);
|
2021-03-05 23:47:00 +03:00
|
|
|
rb_undef_method(rb_cEnumChain, "feed");
|
|
|
|
rb_undef_method(rb_cEnumChain, "next");
|
|
|
|
rb_undef_method(rb_cEnumChain, "next_values");
|
|
|
|
rb_undef_method(rb_cEnumChain, "peek");
|
|
|
|
rb_undef_method(rb_cEnumChain, "peek_values");
|
Implement Enumerator#+ and Enumerable#chain [Feature #15144]
They return an Enumerator::Chain object which is a subclass of
Enumerator, which represents a chain of enumerables that works as a
single enumerator.
```ruby
e = (1..3).chain([4, 5])
e.to_a #=> [1, 2, 3, 4, 5]
e = (1..3).each + [4, 5]
e.to_a #=> [1, 2, 3, 4, 5]
```
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65949 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-11-24 11:38:35 +03:00
|
|
|
|
2022-07-29 07:56:54 +03:00
|
|
|
/* Product */
|
|
|
|
rb_cEnumProduct = rb_define_class_under(rb_cEnumerator, "Product", rb_cEnumerator);
|
|
|
|
rb_define_alloc_func(rb_cEnumProduct, enum_product_allocate);
|
2022-12-16 07:32:13 +03:00
|
|
|
rb_define_method(rb_cEnumProduct, "initialize", enum_product_initialize, -1);
|
2022-07-29 07:56:54 +03:00
|
|
|
rb_define_method(rb_cEnumProduct, "initialize_copy", enum_product_init_copy, 1);
|
|
|
|
rb_define_method(rb_cEnumProduct, "each", enum_product_each, 0);
|
|
|
|
rb_define_method(rb_cEnumProduct, "size", enum_product_size, 0);
|
|
|
|
rb_define_method(rb_cEnumProduct, "rewind", enum_product_rewind, 0);
|
|
|
|
rb_define_method(rb_cEnumProduct, "inspect", enum_product_inspect, 0);
|
|
|
|
rb_undef_method(rb_cEnumProduct, "feed");
|
|
|
|
rb_undef_method(rb_cEnumProduct, "next");
|
|
|
|
rb_undef_method(rb_cEnumProduct, "next_values");
|
|
|
|
rb_undef_method(rb_cEnumProduct, "peek");
|
|
|
|
rb_undef_method(rb_cEnumProduct, "peek_values");
|
2022-12-16 07:32:13 +03:00
|
|
|
rb_define_singleton_method(rb_cEnumerator, "product", enumerator_s_product, -1);
|
2022-07-29 07:56:54 +03:00
|
|
|
|
2018-08-06 12:08:28 +03:00
|
|
|
/* ArithmeticSequence */
|
|
|
|
rb_cArithSeq = rb_define_class_under(rb_cEnumerator, "ArithmeticSequence", rb_cEnumerator);
|
2018-08-09 16:18:08 +03:00
|
|
|
rb_undef_alloc_func(rb_cArithSeq);
|
|
|
|
rb_undef_method(CLASS_OF(rb_cArithSeq), "new");
|
2018-08-06 12:08:28 +03:00
|
|
|
rb_define_method(rb_cArithSeq, "begin", arith_seq_begin, 0);
|
|
|
|
rb_define_method(rb_cArithSeq, "end", arith_seq_end, 0);
|
|
|
|
rb_define_method(rb_cArithSeq, "exclude_end?", arith_seq_exclude_end, 0);
|
|
|
|
rb_define_method(rb_cArithSeq, "step", arith_seq_step, 0);
|
|
|
|
rb_define_method(rb_cArithSeq, "first", arith_seq_first, -1);
|
|
|
|
rb_define_method(rb_cArithSeq, "last", arith_seq_last, -1);
|
|
|
|
rb_define_method(rb_cArithSeq, "inspect", arith_seq_inspect, 0);
|
|
|
|
rb_define_method(rb_cArithSeq, "==", arith_seq_eq, 1);
|
|
|
|
rb_define_method(rb_cArithSeq, "===", arith_seq_eq, 1);
|
|
|
|
rb_define_method(rb_cArithSeq, "eql?", arith_seq_eq, 1);
|
|
|
|
rb_define_method(rb_cArithSeq, "hash", arith_seq_hash, 0);
|
|
|
|
rb_define_method(rb_cArithSeq, "each", arith_seq_each, 0);
|
|
|
|
rb_define_method(rb_cArithSeq, "size", arith_seq_size, 0);
|
|
|
|
|
2012-03-14 05:35:12 +04:00
|
|
|
rb_provide("enumerator.so"); /* for backward compatibility */
|
|
|
|
}
|
2020-10-21 07:03:07 +03:00
|
|
|
#undef sym
|
2012-03-14 05:35:12 +04:00
|
|
|
|
|
|
|
void
|
|
|
|
Init_Enumerator(void)
|
|
|
|
{
|
2020-10-21 07:03:07 +03:00
|
|
|
id_rewind = rb_intern_const("rewind");
|
|
|
|
id_new = rb_intern_const("new");
|
|
|
|
id_next = rb_intern_const("next");
|
|
|
|
id_result = rb_intern_const("result");
|
|
|
|
id_receiver = rb_intern_const("receiver");
|
|
|
|
id_arguments = rb_intern_const("arguments");
|
|
|
|
id_memo = rb_intern_const("memo");
|
|
|
|
id_method = rb_intern_const("method");
|
|
|
|
id_force = rb_intern_const("force");
|
|
|
|
id_to_enum = rb_intern_const("to_enum");
|
2022-07-29 07:56:54 +03:00
|
|
|
id_each_entry = rb_intern_const("each_entry");
|
2020-10-21 07:03:07 +03:00
|
|
|
id_begin = rb_intern_const("begin");
|
|
|
|
id_end = rb_intern_const("end");
|
|
|
|
id_step = rb_intern_const("step");
|
|
|
|
id_exclude_end = rb_intern_const("exclude_end");
|
2019-05-15 06:41:31 +03:00
|
|
|
sym_each = ID2SYM(id_each);
|
2020-10-21 07:03:07 +03:00
|
|
|
sym_cycle = ID2SYM(rb_intern_const("cycle"));
|
|
|
|
sym_yield = ID2SYM(rb_intern_const("yield"));
|
2005-07-14 19:15:22 +04:00
|
|
|
|
2012-03-14 05:35:12 +04:00
|
|
|
InitVM(Enumerator);
|
2003-10-13 21:09:23 +04:00
|
|
|
}
|