.\" Automatically generated by Pod::Man 4.14 (Pod::Simple 3.43) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' . ds C` . ds C' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is >0, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .\" .\" Avoid warning from groff about undefined register 'F'. .de IX .. .nr rF 0 .if \n(.g .if rF .nr rF 1 .if (\n(rF:(\n(.g==0)) \{\ . if \nF \{\ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . if !\nF==2 \{\ . nr % 0 . nr F 2 . \} . \} .\} .rr rF .\" ======================================================================== .\" .IX Title "MCE::Core 3pm" .TH MCE::Core 3pm "2023-09-29" "perl v5.36.0" "User Contributed Perl Documentation" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" MCE::Core \- Documentation describing the core MCE API .SH "VERSION" .IX Header "VERSION" This document describes MCE::Core version 1.889 .SH "SYNOPSIS" .IX Header "SYNOPSIS" This is a simplistic use case of \s-1MCE\s0 running with 5 workers. .PP .Vb 1 \& # Construction using the Core API \& \& use MCE; \& \& my $mce = MCE\->new( \& max_workers => 5, \& user_func => sub { \& my ($mce) = @_; \& $mce\->say("Hello from " . $mce\->wid); \& } \& ); \& \& $mce\->run; \& \& # Construction using a MCE model \& \& use MCE::Flow max_workers => 5; \& \& mce_flow sub { \& my ($mce) = @_; \& MCE\->say("Hello from " . MCE\->wid); \& }; \& \& \-\- Output \& \& Hello from 2 \& Hello from 4 \& Hello from 5 \& Hello from 1 \& Hello from 3 .Ve .SS "\s-1MCE\-\s0>new ( [ options ] )" .IX Subsection "MCE->new ( [ options ] )" Below, a new instance is configured with all available options. .PP .Vb 1 \& use MCE; \& \& my $mce = MCE\->new( \& \& max_workers => 8, # Default 1 \& \& # Number of workers to spawn. \& \& # MCE sets an upper\-limit of 8 for \*(Aqauto\*(Aq. MCE 1.521+. \& # max_workers => \*(Aqauto\*(Aq, # # of lcores, 8 maximum \& # max_workers => \*(Aqauto\-1\*(Aq, # 7 on HW with 16 lcores \& # max_workers => \*(Aqauto\-1\*(Aq, # 3 on HW with 4 lcores \& \& # Specify a percentage. MCE 1.875+. \& # max_workers => \*(Aq25%\*(Aq, # 4 on HW with 16 lcores \& # max_workers => \*(Aq50%\*(Aq, # 8 on HW with 16 lcores \& \& # Run on all logical cores. \& # max_workers => MCE::Util::get_ncpu(), \& \& chunk_size => 2000, # Default 1 \& \& # Can also take a suffix; k (kibiBytes) or m (mebiBytes). \& # The default is 1 when using the Core API and \*(Aqauto\*(Aq for \& # MCE Models. For arrays or queues, chunk_size means the \& # number of records per chunk. For iterators, MCE will not \& # use chunk_size, though the iterator may use it to determine \& # how much to return per iteration. For files, smaller than or \& # equal to 8192 is the number of records. Greater than 8192 \& # is the number of bytes. MCE reads until the end of record \& # before calling user_func. \& \& # chunk_size => 1, # Consists of 1 record \& # chunk_size => 1000, # Consists of 1000 records \& # chunk_size => \*(Aq16k\*(Aq, # Approximate 16 kibiBytes (KiB) \& # chunk_size => \*(Aq20m\*(Aq, # Approximate 20 mebiBytes (MiB) \& \& tmp_dir => $tmp_dir, # Default $MCE::Signal::tmp_dir \& \& # Default is $MCE::Signal::tmp_dir which points to \& # $ENV{TEMP} if defined. Otherwise, tmp_dir points \& # to a location under /tmp. \& \& freeze => \e&encode_sereal, # Default \e&Storable::freeze \& thaw => \e&decode_sereal, # Default \e&Storable::thaw \& \& # Release 1.412 allows freeze and thaw to be overridden. \& # Simply include a serialization module prior to loading \& # MCE. Configure freeze/thaw options. \& \& # use Sereal qw( encode_sereal decode_sereal ); \& # use CBOR::XS qw( encode_cbor decode_cbor ); \& # use JSON::XS qw( encode_json decode_json ); \& # \& # use MCE; \& \& gather => \e@a, # Default undef \& \& # Release 1.5 allows for gathering of data to an array or \& # hash reference, a MCE::Queue/Thread::Queue object, or code \& # reference. One invokes gathering by calling the gather \& # method as often as needed. \& \& # gather => \e@array, \& # gather => \e%hash, \& # gather => $queue, \& # gather => \e&order, \& \& init_relay => 0, # Default undef \& \& # For specifying the initial relay value. Allowed values \& # are array_ref, hash_ref, or scalar. The MCE::Relay module \& # is loaded automatically when specified. \& \& # init_relay => \e@array, \& # init_relay => \e%hash, \& # init_relay => scalar, \& \& input_data => $input_file, # Default undef \& RS => "\en>", # Default undef \& \& # input_data => \*(Aq/path/to/file\*(Aq # Process file \& # input_data => \e@array # Process array \& # input_data => \e*FILE_HNDL # Process file handle \& # input_data => $io # Process IO::All { File, Pipe, STDIO } \& # input_data => \e$scalar # Treated like a file \& # input_data => \e&iterator # User specified iterator \& \& # The RS option (for input record separator) applies to files \& # and file handles. \& \& # MCE applies additional logic when RS begins with a newline \& # character; e.g. RS => "\en>". It trims away characters after \& # the newline and prepends them to the next record. \& # \& # Typically, the left side is what happens for $/ = "\en>". \& # The right side is what user_func receives. \& # \& # All records begin with > and end with \en \& # Record 1: >seq1 ... \en> (to) >seq1 ... \en \& # Record 2: seq2 ... \en> >seq2 ... \en \& # Record 3: seq3 ... \en> >seq3 ... \en \& # Last Rec: seqN ... \en >seqN ... \en \& \& loop_timeout => 20, # Default 0 \& \& # Added in 1.7, enables the manager process to timeout of a read \& # operation on channel 0 (UNIX platforms only). The manager process \& # decrements the total workers running for any worker which have \& # died in an uncontrollable manner. Specify this option if on \& # occassion a worker dies unexpectedly (i.e. from an XS module). \& \& # Option works with init_relay on UNIX platforms since MCE 1.844. \& # A number smaller than 5 is silently increased to 5. \& \& max_retries => 2, # Default 0 \& \& # This option, added in 1.7, causes MCE to retry a failed \& # chunk from a worker dying while processing input data or \& # sequence of numbers. \& \& parallel_io => 1, # Default 0 \& posix_exit => 1, # Default 0 \& use_slurpio => 1, # Default 0 \& \& # The parallel_io option enables parallel reads during large \& # slurpio, useful when reading from fast storage. Do not enable \& # parallel_io when running MCE on many nodes with input coming \& # from shared storage. \& \& # Set posix_exit to avoid all END and destructor processing. \& # Constructing MCE inside a thread implies 1 or if present CGI, \& # FCGI, Coro, Curses, Gearman::Util, Gearman::XS, LWP::UserAgent, \& # Mojo::IOLoop, STFL, Tk, Wx, or Win32::GUI. \& \& # Enable slurpio to pass the raw chunk (scalar ref) to the user \& # function when reading input files. \& \& use_threads => 1, # Auto 0 or 1 \& \& # By default MCE spawns child processes on UNIX platforms and \& # threads on Windows (i.e. $^O eq \*(AqMSWin32\*(Aq). \& \& # MCE supports threads via two threading libraries if threads \& # is preferred over child processes. The use of threads requires \& # a thread library prior to loading MCE, causing the use_threads \& # option to default to 1. Specify 0 for child processes. \& # \& # use threads; use forks; \& # use threads::shared; use forks::shared; \& # use MCE (or) use MCE; (or) use MCE; \& \& spawn_delay => 0.045, # Default undef \& submit_delay => 0.015, # Default undef \& job_delay => 0.060, # Default undef \& \& # Time to wait in fractional seconds after spawning a worker, \& # after submitting parameters to worker (MCE\->run, MCE\->process), \& # and worker running (one time staggered delay). \& \& # Specify job_delay to stagger workers connecting to a database. \& \& on_post_exit => \e&on_post_exit, # Default undef \& on_post_run => \e&on_post_run, # Default undef \& \& # Execute the code block after a worker exits or dies. \& # (i.e. MCE\->exit, exit, die) \& \& # Execute the code block after running. \& # (i.e. MCE\->process, MCE\->run) \& \& progress => sub { ... }, # Default undef \& \& # A code block for receiving info on the progress made. \& # See section labeled "MCE PROGRESS DEMONSTRATIONS" at the \& # end of this document. \& \& user_args => { env => \*(Aqtest\*(Aq }, # Default undef \& \& # MCE release 1.4 added a new parameter to allow one to \& # specify arbitrary arguments such as a string, an ARRAY \& # or HASH reference. Workers can access this directly. \& # (i.e. my $args = $mce\->{user_args} or MCE\->user_args) \& \& user_begin => \e&user_begin, # Default undef \& user_func => \e&user_func, # Default undef \& user_end => \e&user_end, # Default undef \& \& # Think of user_begin, user_func, and user_end as in \& # the awk scripting language: \& # awk \*(AqBEGIN { begin } { func } { func } ... END { end }\*(Aq \& \& # MCE workers call user_begin once at the start of a job, \& # then user_func repeatedly until no chunks remain. \& # Afterwards, user_end is called. \& \& user_error => \e&user_error, # Default undef \& user_output => \e&user_output, # Default undef \& \& # MCE will forward data to user_error/user_output, \& # when defined, for the following methods. \& \& # MCE\->sendto(\e*STDERR, "sent to user_error\en"); \& # MCE\->printf(\e*STDERR, "%s\en", "sent to user_error"); \& # MCE\->print(\e*STDERR, "sent to user_error\en"); \& # MCE\->say(\e*STDERR, "sent to user_error"); \& \& # MCE\->sendto(\e*STDOUT, "sent to user_output\en"); \& # MCE\->printf("%s\en", "sent to user_output"); \& # MCE\->print("sent to user_output\en"); \& # MCE\->say("sent to user_output"); \& \& stderr_file => \*(Aqerr_file\*(Aq, # Default STDERR \& stdout_file => \*(Aqout_file\*(Aq, # Default STDOUT \& \& # Or to file; user_error and user_output take precedence. \& \& flush_file => 0, # Default 1 \& flush_stderr => 0, # Default 1 \& flush_stdout => 0, # Default 1 \& \& # Flush sendto file, standard error, or standard output. \& \& interval => { \& delay => 0.007 [, max_nodes => 4, node_id => 1 ] \& }, \& \& # For use with the yield method introduced in MCE 1.5. \& # Both max_nodes & node_id are optional and default to 1. \& # Delay is the amount of time between intervals. \& \& # interval => 0.007 # Shorter; MCE 1.506+ \& \& sequence => { # Default undef \& begin => \-1, end => 1 [, step => 0.1 [, format => "%4.1f" ] ] \& }, \& \& bounds_only => 1, # Default undef \& \& # For looping through a sequence of numbers in parallel. \& # STEP, if omitted, defaults to 1 if BEGIN is smaller than \& # END or \-1 if BEGIN is greater than END. The FORMAT string \& # is passed to sprintf behind the scene (% may be omitted). \& # e.g. $seq_n_formatted = sprintf("%4.1f", $seq_n); \& \& # Do not specify both options; input_data and sequence. \& # Release 1.4 allows one to specify an array reference. \& # e.g. sequence => [ \-1, 1, 0.1, "%4.1f" ] \& \& # The bounds_only => 1 option will compute the \*(Aqbegin\*(Aq and \& # \*(Aqend\*(Aq items only for the chunk and not the items in between \& # (hence boundaries only). This option has no effect when \& # sequence is not specified or chunk_size equals 1. \& \& # my $begin = $chunk_ref\->[0]; my $end = $chunk_ref\->[1]; \& \& task_end => \e&task_end, # Default undef \& \& # This is called by the manager process after the task \& # has completed processing. MCE 1.5 allows this option \& # to be specified at the top level. \& \& task_name => \*(Aqstring\*(Aq, # Default \*(AqMCE\*(Aq \& \& # Added in MCE 1.5 and mainly beneficial for user_tasks. \& # One may specify a unique name per each sub\-task. \& # The string is passed as the 3rd arg to task_end. \& \& user_tasks => [ # Default undef \& { ... }, # Options for task 0 \& { ... }, # Options for task 1 \& { ... }, # Options for task 2 \& ], \& \& # Takes a list of hash references, each allowing up to 17 \& # options. All other MCE options are ignored. The init_relay, \& # input_data, RS, and use_slurpio options are applicable to \& # the first task only. \& \& # max_workers, chunk_size, input_data, interval, sequence, \& # bounds_only, user_args, user_begin, user_end, user_func, \& # gather, task_end, task_name, use_slurpio, use_threads, \& # init_relay, RS \& \& # Options not specified here will default to same option \& # specified at the top level. \& ); .Ve .SS "\s-1EXPORT_CONST, CONST\s0" .IX Subsection "EXPORT_CONST, CONST" There are 3 constants which are exportable. Using the constants in lieu of 0,1,2 makes it more legible when accessing the user_func arguments directly. .PP \fI\s-1SELF CHUNK CID\s0 \- \s-1MCE CONSTANTS\s0\fR .IX Subsection "SELF CHUNK CID - MCE CONSTANTS" .PP Exports \s-1SELF\s0 => 0, \s-1CHUNK\s0 => 1, and \s-1CID\s0 => 2. .PP .Vb 2 \& use MCE export_const => 1; \& use MCE const => 1; # Shorter; MCE 1.415+ \& \& user_func => sub { \& # my ($mce, $chunk_ref, $chunk_id) = @_; \& print "Hello from ", $_[SELF]\->wid, "\en"; \& } .Ve .PP \&\s-1MCE 1.5\s0 allows all public method to be called directly. .PP .Vb 1 \& use MCE; \& \& user_func => sub { \& # my ($mce, $chunk_ref, $chunk_id) = @_; \& print "Hello from ", MCE\->wid, "\en"; \& } .Ve .SS "\s-1OVERRIDING DEFAULTS\s0" .IX Subsection "OVERRIDING DEFAULTS" The following list options which may be overridden when loading the module. .PP .Vb 3 \& use Sereal qw( encode_sereal decode_sereal ); \& use CBOR::XS qw( encode_cbor decode_cbor ); \& use JSON::XS qw( encode_json decode_json ); \& \& use MCE \& max_workers => 4, # Default 1 \& chunk_size => 100, # Default 1 \& tmp_dir => "/path/to/app/tmp", # $MCE::Signal::tmp_dir \& freeze => \e&encode_sereal, # \e&Storable::freeze \& thaw => \e&decode_sereal, # \e&Storable::thaw \& init_relay => 0, # Default undef; MCE 1.882+ \& use_threads => 0, # Default undef; MCE 1.882+ \& ; \& \& my $mce = MCE\->new( ... ); .Ve .PP From \s-1MCE 1.8\s0 onwards, Sereal 3.015+ is loaded automatically if available. Specify \f(CW\*(C`Sereal => 0\*(C'\fR to use Storable instead. .PP .Vb 1 \& use MCE Sereal => 0; .Ve .SS "\s-1RUNNING\s0" .IX Subsection "RUNNING" Run calls spawn, submits the job; workers call user_begin, user_func, and user_end. Run shuts down workers afterwards. Call spawn whenever the need arises for large data structures prior to running. .PP .Vb 1 \& $mce\->spawn; # Call early if desired \& \& $mce\->run; # Call run or process below \& \& # Acquire data arrays and/or input_files. Workers persist after \& # processing. \& \& $mce\->process(\e@input_data_1); # Process array \& $mce\->process(\e@input_data_2); \& $mce\->process(\e@input_data_n); \& \& $mce\->process(\e%input_hash_1); # Process hash, current API \& $mce\->process(\e%input_hash_2); # available since 1.828 \& $mce\->process(\e%input_hash_n); \& \& $mce\->process(\*(Aqinput_file_1\*(Aq); # Process file \& $mce\->process(\*(Aqinput_file_2\*(Aq); \& $mce\->process(\*(Aqinput_file_n\*(Aq); \& \& $mce\->shutdown; # Shutdown workers .Ve .SS "\s-1SYNTAX\s0 for \s-1ON_POST_EXIT\s0" .IX Subsection "SYNTAX for ON_POST_EXIT" Often times, one may want to capture the exit status. The on_post_exit option, if defined, is executed immediately by the manager process after a worker exits via exit (children only), \s-1MCE\-\s0>exit (children and threads), or die. .PP The format of \f(CW$e\fR\->{pid} is \s-1PID_123\s0 for children and \s-1THR_123\s0 for threads. .PP .Vb 1 \& my $restart_flag = 1; \& \& sub on_post_exit { \& my ($mce, $e) = @_; \& \& # Display all possible hash elements. \& print "$e\->{wid}: $e\->{pid}: $e\->{status}: $e\->{msg}: $e\->{id}\en"; \& \& # Restart this worker if desired. \& if ($restart_flag && $e\->{wid} == 2) { \& $mce\->restart_worker; \& $restart_flag = 0; \& } \& } \& \& sub user_func { \& my ($mce) = @_; \& MCE\->exit(0, \*(Aqmsg_foo\*(Aq, 1000 + MCE\->wid); # Args, not necessary \& } \& \& my $mce = MCE\->new( \& on_post_exit => \e&on_post_exit, \& user_func => \e&user_func, \& max_workers => 3 \& ); \& \& $mce\->run; \& \& \-\- Output (child processes) \& \& 2: PID_33223: 0: msg_foo: 1002 \& 1: PID_33222: 0: msg_foo: 1001 \& 3: PID_33224: 0: msg_foo: 1003 \& 2: PID_33225: 0: msg_foo: 1002 \& \& \-\- Output (running with threads) \& \& 3: TID_3: 0: msg_foo: 1003 \& 2: TID_2: 0: msg_foo: 1002 \& 1: TID_1: 0: msg_foo: 1001 \& 2: TID_4: 0: msg_foo: 1002 .Ve .SS "\s-1SYNTAX\s0 for \s-1ON_POST_RUN\s0" .IX Subsection "SYNTAX for ON_POST_RUN" The on_post_run option, if defined, is executed immediately by the manager process after running \s-1MCE\-\s0>process or \s-1MCE\-\s0>run. This option receives an array reference of hashes. .PP The difference between on_post_exit and on_post_run is that the former is called immediately whereas the latter is called after all workers have completed running. .PP .Vb 7 \& sub on_post_run { \& my ($mce, $status_ref) = @_; \& foreach my $e ( @{ $status_ref } ) { \& # Display all possible hash elements. \& print "$e\->{wid}: $e\->{pid}: $e\->{status}: $e\->{msg}: $e\->{id}\en"; \& } \& } \& \& sub user_func { \& my ($mce) = @_; \& MCE\->exit(0, \*(Aqmsg_foo\*(Aq, 1000 + MCE\->wid); # Args, not necessary \& } \& \& my $mce = MCE\->new( \& on_post_run => \e&on_post_run, \& user_func => \e&user_func, \& max_workers => 3 \& ); \& \& $mce\->run; \& \& \-\- Output (child processes) \& \& 3: PID_33174: 0: msg_foo: 1003 \& 1: PID_33172: 0: msg_foo: 1001 \& 2: PID_33173: 0: msg_foo: 1002 \& \& \-\- Output (running with threads) \& \& 2: TID_2: 0: msg_foo: 1002 \& 3: TID_3: 0: msg_foo: 1003 \& 1: TID_1: 0: msg_foo: 1001 .Ve .SS "\s-1SYNTAX\s0 for \s-1INPUT_DATA\s0" .IX Subsection "SYNTAX for INPUT_DATA" \&\s-1MCE\s0 supports many ways to specify input_data. Support for iterators was added in \s-1MCE 1.505.\s0 The \s-1RS\s0 option allows one to specify the record separator when processing files. .PP \&\s-1MCE\s0 is a chunking engine. Therefore, chunk_size is applicable to input_data. Specifying 1 for use_slurpio causes user_func to receive a scalar reference containing the raw data (applicable to files only) instead of an array reference. .PP \&\f(CW\*(C`IO::All\*(C'\fR { File, Pipe, \s-1STDIO\s0 } is supported since \s-1MCE 1.845.\s0 .PP .Vb 10 \& input_data => \*(Aq/path/to/file\*(Aq, # process file \& input_data => \e@array, # process array \& input_data => \e%hash, # process hash, API since 1.828 \& input_data => \e*FILE_HNDL, # process file handle \& input_data => $fh, # open $fh, "<", "file" \& input_data => $fh, # IO::File "file", "r" \& input_data => $fh, # IO::Uncompress::Gunzip "file.gz" \& input_data => $io, # IO::All { File, Pipe, STDIO } \& input_data => \e$scalar, # treated like a file \& input_data => \e&iterator, # user specified iterator \& \& chunk_size => 1, # >1 means looping inside user_func \& use_slurpio => 1, # $chunk_ref is a scalar ref \& RS => "\en>", # input record separator .Ve .PP The chunk_size value determines the chunking mode to use when processing files. Otherwise, chunk_size is the number of elements for arrays. For files, a chunk size value of <= 8192 is how many records to read. Greater than 8192 is how many bytes to read. \s-1MCE\s0 appends (the rest) up to the next record separator. .PP .Vb 2 \& chunk_size => 8192, # Consists of 8192 records \& chunk_size => 8193, # Approximate 8193 bytes for files \& \& chunk_size => 1, # Consists of 1 record or element \& chunk_size => 1000, # Consists of 1000 records \& chunk_size => \*(Aq16k\*(Aq, # Approximate 16 kibiBytes (KiB) \& chunk_size => \*(Aq20m\*(Aq, # Approximate 20 mebiBytes (MiB) .Ve .PP The construction for user_func when chunk_size > 1 and assuming use_slurpio equals 0. .PP .Vb 2 \& user_func => sub { \& my ($mce, $chunk_ref, $chunk_id) = @_; \& \& # $_ is $chunk_ref\->[0] when chunk_size equals 1 \& # $_ is $chunk_ref otherwise; $_ can be used below \& \& for my $record ( @{ $chunk_ref } ) { \& print "$chunk_id: $record\en"; \& } \& } \& \& # input_data => \e%hash \& # current API available since 1.828 \& \& user_func => sub { \& my ($mce, $chunk_ref, $chunk_id) = @_; \& \& # $_ points to $chunk_ref regardless of chunk_size \& \& for my $key ( keys %{ $chunk_ref } ) { \& print "$key: ", $chunk_ref\->{$key}, "\en"; \& } \& } .Ve .PP Specifying a value for input_data is straight forward for arrays and files. The next several examples specify an iterator reference for input_data. .PP .Vb 1 \& use MCE; \& \& # A factory function which creates a closure (the iterator itself) \& # for generating a sequence of numbers. The external variables \& # ($n, $max, $step) are used for keeping state across successive \& # calls to the closure. The iterator simply returns when $n > max. \& \& sub input_iterator { \& my ($n, $max, $step) = @_; \& \& return sub { \& return if $n > $max; \& \& my $current = $n; \& $n += $step; \& \& return $current; \& }; \& } \& \& # Run user_func in parallel. Input data can be specified during \& # the construction or as an argument to the process method. \& \& my $mce = MCE\->new( \& \& # input_data => input_iterator(10, 30, 2), \& chunk_size => 1, max_workers => 4, \& \& user_func => sub { \& my ($mce, $chunk_ref, $chunk_id) = @_; \& MCE\->print("$_: ", $_ * 2, "\en"); \& } \& \& )\->spawn; \& \& $mce\->process( input_iterator(10, 30, 2) ); \& \& \-\- Output Note that output order is not guaranteed \& Take a look at iterator.pl for ordered output \& \& 10: 20 \& 12: 24 \& 16: 32 \& 20: 40 \& 14: 28 \& 22: 44 \& 18: 36 \& 24: 48 \& 26: 52 \& 28: 56 \& 30: 60 .Ve .PP The following example queries the \s-1DB\s0 for the next 1000 rows. Notice the use of fetchall_arrayref. The iterator function itself receives one argument which is chunk_size (added in \s-1MCE 1.510\s0) to determine how much to return per iteration. The default is 1 for the Core \s-1API\s0 and \s-1MCE\s0 Models. .PP .Vb 2 \& use DBI; \& use MCE; \& \& sub db_iter { \& \& my $dsn = "DBI:Oracle:host=db_server;port=db_port;sid=db_name"; \& \& my $dbh = DBI\->connect($dsn, \*(Aqdb_user\*(Aq, \*(Aqdb_passwd\*(Aq) || \& die "Could not connect to database: $DBI::errstr"; \& \& my $sth = $dbh\->prepare(\*(Aqselect color, desc from table\*(Aq); \& \& $sth\->execute; \& \& return sub { \& my ($chunk_size) = @_; \& \& if (my $aref = $sth\->fetchall_arrayref(undef, $chunk_size)) { \& return @{ $aref }; \& } \& \& return; \& }; \& } \& \& # Let\*(Aqs enumerate column indexes for easy column retrieval. \& my ($i_color, $i_desc) = (0 .. 1); \& \& my $mce = MCE\->new( \& max_workers => 3, chunk_size => 1000, \& input_data => db_iter(), \& \& user_func => sub { \& my ($mce, $chunk_ref, $chunk_id) = @_; \& my $ret = \*(Aq\*(Aq; \& \& foreach my $row (@{ $chunk_ref }) { \& $ret .= $row\->[$i_color] .": ". $row\->[$i_desc] ."\en"; \& } \& \& MCE\->print($ret); \& } \& ); \& \& $mce\->run; .Ve .PP There are many modules on \s-1CPAN\s0 which return an iterator reference. Showing one such example below. The demonstration ensures \s-1MCE\s0 workers are spawned before obtaining the iterator. Note the worker_id value (left column) in the output. .PP .Vb 2 \& use Path::Iterator::Rule; \& use MCE; \& \& my $start_dir = shift \& or die "Please specify a starting directory"; \& \& \-d $start_dir \& or die "Cannot open ($start_dir): No such file or directory"; \& \& my $mce = MCE\->new( \& max_workers => \*(Aqauto\*(Aq, \& user_func => sub { MCE\->say( MCE\->wid . ": $_" ) } \& )\->spawn; \& \& my $rule = Path::Iterator::Rule\->new\->file\->name( qr/[.](pm)$/ ); \& \& my $iterator = $rule\->iter( \& $start_dir, { follow_symlinks => 0, depthfirst => 1 } \& ); \& \& $mce\->process( $iterator ); \& \& \-\- Output \& \& 8: lib/MCE/Core/Input/Generator.pm \& 5: lib/MCE/Core/Input/Handle.pm \& 6: lib/MCE/Core/Input/Iterator.pm \& 2: lib/MCE/Core/Input/Request.pm \& 3: lib/MCE/Core/Manager.pm \& 4: lib/MCE/Core/Input/Sequence.pm \& 7: lib/MCE/Core/Validation.pm \& 1: lib/MCE/Core/Worker.pm \& 8: lib/MCE/Flow.pm \& 5: lib/MCE/Grep.pm \& 6: lib/MCE/Loop.pm \& 2: lib/MCE/Map.pm \& 3: lib/MCE/Queue.pm \& 4: lib/MCE/Signal.pm \& 7: lib/MCE/Stream.pm \& 1: lib/MCE/Subs.pm \& 8: lib/MCE/Util.pm \& 5: lib/MCE.pm .Ve .PP Although \s-1MCE\s0 supports arrays, extra measures are needed to use a \*(L"lazy\*(R" array as input data. The reason for this is that \s-1MCE\s0 needs the size of the array before processing which may be unknown for lazy arrays. Therefore, closures provides an excellent mechanism for this. .PP The code block belonging to the lazy array must return undef after exhausting its input data. Otherwise, the process will never end. .PP .Vb 2 \& use Tie::Array::Lazy; \& use MCE; \& \& tie my @a, \*(AqTie::Array::Lazy\*(Aq, [], sub { \& my $i = $_[0]\->index; \& \& return ($i < 10) ? $i : undef; \& }; \& \& sub make_iterator { \& my $i = 0; my $a_ref = shift; \& \& return sub { \& return $a_ref\->[$i++]; \& }; \& } \& \& my $mce = MCE\->new( \& max_workers => 4, input_data => make_iterator(\e@a), \& \& user_func => sub { \& my ($mce, $chunk_ref, $chunk_id) = @_; \& MCE\->say($_); \& } \& \& )\->run; \& \& \-\- Output \& \& 0 \& 1 \& 2 \& 3 \& 4 \& 6 \& 7 \& 8 \& 5 \& 9 .Ve .PP The following demonstrates how to retrieve a chunk from the lazy array per each successive call. Here, undef is sent by the iterator block when \f(CW$i\fR is greater than \f(CW$max\fR. Iterators may optionally use chunk_size to determine how much to return per iteration. .PP .Vb 2 \& use Tie::Array::Lazy; \& use MCE; \& \& tie my @a, \*(AqTie::Array::Lazy\*(Aq, [], sub { \& $_[0]\->index; \& }; \& \& sub make_iterator { \& my $j = 0; my ($a_ref, $max) = @_; \& \& return sub { \& my ($chunk_size) = @_; \& my $i = $j; $j += $chunk_size; \& \& return if $i > $max; \& return $j <= $max ? @$a_ref[$i .. $j \- 1] : @$a_ref[$i .. $max]; \& }; \& } \& \& my $mce = MCE\->new( \& chunk_size => 15, max_workers => 4, \& input_data => make_iterator(\e@a, 100), \& \& user_func => sub { \& my ($mce, $chunk_ref, $chunk_id) = @_; \& MCE\->say("$chunk_id: " . join(\*(Aq \*(Aq, @{ $chunk_ref })); \& } \& \& )\->run; \& \& \-\- Output \& \& 1: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 \& 2: 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 \& 3: 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 \& 4: 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 \& 5: 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 \& 6: 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 \& 7: 90 91 92 93 94 95 96 97 98 99 100 .Ve .SS "\s-1SYNTAX\s0 for \s-1SEQUENCE\s0" .IX Subsection "SYNTAX for SEQUENCE" The 1.3 release and above allows workers to loop through a sequence of numbers computed mathematically without the overhead of an array. The sequence can be specified separately per each user_task entry unlike input_data which is applicable to the first task only. .PP See the seq_demo.pl example, included with this distribution, on applying sequences with the user_tasks option. .PP Sequence can be defined using an array or a hash reference. .PP .Vb 1 \& use MCE; \& \& my $mce = MCE\->new( \& max_workers => 3, \& \& # sequence => [ 10, 19, 0.7, "%4.1f" ], # up to 4 options \& \& sequence => { \& begin => 10, end => 19, step => 0.7, format => "%4.1f" \& }, \& \& user_func => sub { \& my ($mce, $n, $chunk_id) = @_; \& print $n, " from ", MCE\->wid, " id ", $chunk_id, "\en"; \& } \& ); \& \& $mce\->run; \& \& \-\- Output (sorted afterwards, notice wid and chunk_id in output) \& \& 10.0 from 1 id 1 \& 10.7 from 2 id 2 \& 11.4 from 3 id 3 \& 12.1 from 1 id 4 \& 12.8 from 2 id 5 \& 13.5 from 3 id 6 \& 14.2 from 1 id 7 \& 14.9 from 2 id 8 \& 15.6 from 3 id 9 \& 16.3 from 1 id 10 \& 17.0 from 2 id 11 \& 17.7 from 3 id 12 \& 18.4 from 1 id 13 .Ve .PP The 1.5 release includes a new option (bounds_only). This option tells the sequence engine to compute 'begin' and 'end' items only, for the chunk, and not the items in between (hence boundaries only). This option applies to sequence only and has no effect when chunk_size equals 1. .PP The time to run is 0.006s below. This becomes 0.827s without the bounds_only option due to computing all items in between, thus creating a very large array. Basically, specify bounds_only => 1 when boundaries is all you need for looping inside the block; e.g. Monte Carlo simulations. .PP Time was measured using 1 worker to emphasize the difference. .PP .Vb 1 \& use MCE; \& \& my $mce = MCE\->new( \& max_workers => 1, chunk_size => 1_250_000, \& \& sequence => { begin => 1, end => 10_000_000 }, \& bounds_only => 1, \& \& # For sequence, the input scalar $_ points to $chunk_ref \& # when chunk_size > 1, otherwise $chunk_ref\->[0]. \& # \& # user_func => sub { \& # my $begin = $_\->[0]; my $end = $_\->[\-1]; \& # \& # for ($begin .. $end) { \& # ... \& # } \& # }, \& \& user_func => sub { \& my ($mce, $chunk_ref, $chunk_id) = @_; \& # $chunk_ref contains 2 items, not 1_250_000 \& \& my $begin = $chunk_ref\->[ 0]; \& my $end = $chunk_ref\->[\-1]; # or $chunk_ref\->[1] \& \& MCE\->printf("%7d .. %8d\en", $begin, $end); \& } \& ); \& \& $mce\->run; \& \& \-\- Output \& \& 1 .. 1250000 \& 1250001 .. 2500000 \& 2500001 .. 3750000 \& 3750001 .. 5000000 \& 5000001 .. 6250000 \& 6250001 .. 7500000 \& 7500001 .. 8750000 \& 8750001 .. 10000000 .Ve .SS "\s-1SYNTAX\s0 for \s-1MAX_RETRIES\s0" .IX Subsection "SYNTAX for MAX_RETRIES" The max_retries option, added in 1.7, allows \s-1MCE\s0 to retry a failed chunk from a worker dying while processing input data or a sequence of numbers. .PP When max_retries is set, \s-1MCE\s0 configures the on_post_exit option automatically using the following code before running. Specify on_post_exit explicitly for any further tailoring. The restart_worker line is necessary, obviously. .PP .Vb 2 \& on_post_exit => sub { \& my ( $mce, $e, $retry_cnt ) = @_; \& \& if ( $e\->{id} ) { \& my $cnt = $retry_cnt + 1; \& my $msg = "Error: chunk $e\->{id} failed"; \& \& if ( defined $mce\->{init_relay} ) { \& print {*STDERR} "$msg, retrying chunk attempt # $cnt\en" \& if ( $retry_cnt < $mce\->{max_retries} ); \& } \& else { \& ( $retry_cnt < $mce\->{max_retries} ) \& ? print {*STDERR} "$msg, retrying chunk attempt # $cnt\en" \& : print {*STDERR} "$msg\en"; \& } \& \& $mce\->restart_worker; \& } \& } .Ve .PP We let \s-1MCE\s0 handle on_post_exit automatically below, which is essentially the same code shown above. For max_retries to work, the worker must die, abnormally included, or call \s-1MCE\-\s0>exit. Notice that we pass the chunk_id value for the 3rd argument to \s-1MCE\-\s0>exit (defaults to chunk_id if omitted since \s-1MCE 1.844\s0). .PP .Vb 1 \& # max_retries demonstration \& \& use strict; \& use warnings; \& \& use MCE; \& \& sub user_func { \& my ( $mce, $chunk_ref, $chunk_id ) = @_; \& \& # die "Died : chunk_id = 3\en" if $chunk_id == 3; \& MCE\->exit(1, undef, $chunk_id) if $chunk_id == 3; \& \& print "$chunk_id\en"; \& } \& \& my $mce = MCE\->new( \& max_workers => 1, \& max_retries => 2, \& user_func => \e&user_func, \& )\->spawn; \& \& my $input_data = [ 0..7 ]; \& \& $mce\->process( { chunk_size => 1 }, $input_data ); \& $mce\->shutdown; \& \& \-\- Output \& \& 1 \& 2 \& Error: chunk 3 failed, retrying chunk attempt # 1 \& Error: chunk 3 failed, retrying chunk attempt # 2 \& Error: chunk 3 failed \& 4 \& 5 \& 6 \& 7 \& 8 .Ve .PP Orderly output with max_retries is possible since \s-1MCE 1.844.\s0 Below, chunk 3 succeeds whereas chunk 5 fails due to exceeding the number of retries. Be sure to call MCE::relay inside \f(CW\*(C`user_func\*(C'\fR and near the end of the block. .PP .Vb 1 \& # max_retries demonstration with init_relay \& \& use strict; \& use warnings; \& \& use MCE; \& use MCE::Shared; \& \& tie my $retries1, \*(AqMCE::Shared\*(Aq, 0; \& tie my $retries2, \*(AqMCE::Shared\*(Aq, 0; \& \& MCE\->new( \& max_workers => 4, \& input_data => [ 1..7 ], \& chunk_size => 1, \& \& max_retries => 2, \& init_relay => 0, \& \& user_func => sub { \& if ( MCE\->chunk_id == 3 ) { \& MCE\->exit if ++$retries1 <= 2; \& } \& if ( MCE\->chunk_id == 5 ) { \& MCE\->exit if ++$retries2 <= 3; \& } \& MCE::relay { \& $_ += 1; \& print MCE\->chunk_id, "\en"; \& }; \& } \& )\->run; \& \& print "final: ", MCE::relay_final(), "\en"; \& \& \-\- Output \& \& 1 \& 2 \& Error: chunk 3 failed, retrying chunk attempt # 1 \& Error: chunk 5 failed, retrying chunk attempt # 1 \& Error: chunk 3 failed, retrying chunk attempt # 2 \& Error: chunk 5 failed, retrying chunk attempt # 2 \& 3 \& 4 \& Error: chunk 5 failed \& 6 \& 7 \& final: 6 .Ve .SS "\s-1SYNTAX\s0 for \s-1USER_BEGIN\s0 and \s-1USER_END\s0" .IX Subsection "SYNTAX for USER_BEGIN and USER_END" The user_begin and user_end options, if specified, behave similarly to awk '\s-1BEGIN\s0 { begin } { func } { func } ... \s-1END\s0 { end }'. These are called once per worker during each run. .PP \&\s-1MCE 1.510\s0 passes 2 additional parameters ($task_id and \f(CW$task_name\fR). .PP .Vb 4 \& sub user_begin { # Called once at the beginning \& my ($mce, $task_id, $task_name) = @_; \& $mce\->{wk_total_rows} = 0; \& } \& \& sub user_func { # Called while processing \& my $mce = shift; \& $mce\->{wk_total_rows} += 1; \& } \& \& sub user_end { # Called once at the end \& my ($mce, $task_id, $task_name) = @_; \& printf "## %d: Processed %d rows\en", \& MCE\->wid, $mce\->{wk_total_rows}; \& } \& \& my $mce = MCE\->new( \& user_begin => \e&user_begin, \& user_func => \e&user_func, \& user_end => \e&user_end \& ); \& \& $mce\->run; .Ve .SS "\s-1SYNTAX\s0 for \s-1USER_FUNC\s0 with \s-1USE_SLURPIO\s0 => 0" .IX Subsection "SYNTAX for USER_FUNC with USE_SLURPIO => 0" When processing input data, \s-1MCE\s0 can pass an array of rows or a slurped chunk. Below, a reference to an array containing the chunk data is processed. .PP e.g. \f(CW$chunk_ref\fR = [ record1, record2, record3, ... ] .PP .Vb 1 \& sub user_func { \& \& my ($mce, $chunk_ref, $chunk_id) = @_; \& \& foreach my $row ( @{ $chunk_ref } ) { \& $mce\->{wk_total_rows} += 1; \& print $row; \& } \& } \& \& my $mce = MCE\->new( \& chunk_size => 100, \& input_data => "/path/to/file", \& user_func => \e&user_func, \& use_slurpio => 0 \& ); \& \& $mce\->run; .Ve .SS "\s-1SYNTAX\s0 for \s-1USER_FUNC\s0 with \s-1USE_SLURPIO\s0 => 1" .IX Subsection "SYNTAX for USER_FUNC with USE_SLURPIO => 1" Here, a reference to a scalar containing the raw chunk data is processed. .PP .Vb 1 \& sub user_func { \& \& my ($mce, $chunk_ref, $chunk_id) = @_; \& \& my $count = () = $$chunk_ref =~ /abc/; \& } \& \& my $mce = MCE\->new( \& chunk_size => 16000, \& input_data => "/path/to/file", \& user_func => \e&user_func, \& use_slurpio => 1 \& ); \& \& $mce\->run; .Ve .SS "\s-1SYNTAX\s0 for \s-1USER_ERROR\s0 and \s-1USER_OUTPUT\s0" .IX Subsection "SYNTAX for USER_ERROR and USER_OUTPUT" Output from \s-1MCE\-\s0>sendto('\s-1STDERR/STDOUT\s0', ...), \s-1MCE\-\s0>printf, \s-1MCE\-\s0>print, and \&\s-1MCE\-\s0>say can be intercepted by specifying the user_error and user_output options. \s-1MCE\s0 on receiving output will forward to user_error or user_output in a serialized fashion. .PP Handy when wanting to filter, modify, and/or direct the output elsewhere. .PP .Vb 4 \& sub user_error { # Redirect STDERR to STDOUT \& my $error = shift; \& print {*STDOUT} $error; \& } \& \& sub user_output { # Redirect STDOUT to STDERR \& my $output = shift; \& print {*STDERR} $output; \& } \& \& sub user_func { \& my ($mce, $chunk_ref, $chunk_id) = @_; \& my $count = 0; \& \& foreach my $row ( @{ $chunk_ref } ) { \& MCE\->print($row); \& $count += 1; \& } \& \& MCE\->print(\e*STDERR, "$chunk_id: processed $count rows\en"); \& } \& \& my $mce = MCE\->new( \& chunk_size => 1000, \& input_data => "/path/to/file", \& user_error => \e&user_error, \& user_output => \e&user_output, \& user_func => \e&user_func \& ); \& \& $mce\->run; .Ve .SS "\s-1SYNTAX\s0 for \s-1USER_TASKS\s0 and \s-1TASK_END\s0" .IX Subsection "SYNTAX for USER_TASKS and TASK_END" This option takes an array of tasks. Each task allows up to 17 options. The init_relay, input_data, \s-1RS,\s0 and use_slurpio options may be defined inside the first task or at the top level, otherwise ignored under other sub-tasks. .PP .Vb 4 \& max_workers, chunk_size, input_data, interval, sequence, \& bounds_only, user_args, user_begin, user_end, user_func, \& gather, task_end, task_name, use_slurpio, use_threads, \& init_relay, RS .Ve .PP Sequence and chunk_size were added in 1.3. User_args was introduced in 1.4. Name and input_data are new options allowed in 1.5. In addition, one can specify task_end at the top level. Task_end also receives 2 additional arguments \f(CW$task_id\fR and \f(CW$task_name\fR (shown below). .PP Options not specified here will default to the same option specified at the top level. The task_end option is called by the manager process when all workers for that sub-task have completed processing. .PP Forking and threading can be intermixed among tasks unless running Cygwin. The run method will continue running until all workers have completed processing. .PP .Vb 2 \& use threads; \& use threads::shared; \& \& use MCE; \& \& sub parallel_task1 { sleep 2; } \& sub parallel_task2 { sleep 1; } \& \& my $mce = MCE\->new( \& \& task_end => sub { \& my ($mce, $task_id, $task_name) = @_; \& print "Task [$task_id \-\- $task_name] completed processing\en"; \& }, \& \& user_tasks => [{ \& task_name => \*(Aqfoo\*(Aq, \& max_workers => 2, \& user_func => \e¶llel_task1, \& use_threads => 0 # Not using threads \& \& },{ \& task_name => \*(Aqbar\*(Aq, \& max_workers => 4, \& user_func => \e¶llel_task2, \& use_threads => 1 # Yes, threads \& \& }] \& ); \& \& $mce\->run; \& \& \-\- Output \& \& Task [1 \-\- bar] completed processing \& Task [0 \-\- foo] completed processing .Ve .SH "DEFAULT INPUT SCALAR" .IX Header "DEFAULT INPUT SCALAR" Beginning with \s-1MCE 1.5,\s0 the input scalar \f(CW$_\fR is localized prior to calling user_func for input_data and sequence of numbers. The following applies. .IP "use_slurpio => 1" 3 .IX Item "use_slurpio => 1" .Vb 2 \& $_ is a reference to the buffer e.g. $_ = \e$_buffer; \& $_ is a reference regardless of whether chunk_size is 1 or greater \& \& user_func => sub { \& # my ($mce, $chunk_ref, $chunk_id) = @_; \& print ${ $_ }; # $_ is same as $chunk_ref \& } .Ve .IP "chunk_size is greater than 1, use_slurpio => 0" 3 .IX Item "chunk_size is greater than 1, use_slurpio => 0" .Vb 2 \& $_ is a reference to an array. $_ = \e@_records; $_ = \e@_seq_n; \& $_ is same as $chunk_ref or $_[CHUNK] \& \& user_func => sub { \& # my ($mce, $chunk_ref, $chunk_id) = @_; \& for my $row ( @{ $_ } ) { \& print $row, "\en"; \& } \& } \& \& use MCE const => 1; \& \& user_func => sub { \& # my ($mce, $chunk_ref, $chunk_id) = @_; \& for my $row ( @{ $_[CHUNK] } ) { \& print $row, "\en"; \& } \& } .Ve .IP "chunk_size equals 1, use_slurpio => 0" 3 .IX Item "chunk_size equals 1, use_slurpio => 0" .Vb 1 \& $_ contains the actual value. $_ = $_buffer; $_ = $seq_n; \& \& # Note that $_ and $chunk_ref are not the same below. \& # $chunk_ref is a reference to an array. \& \& user_func => sub { \& # my ($mce, $chunk_ref, $chunk_id) = @_; \& print $_, "\en; # Same as $chunk_ref\->[0]; \& } \& \& $mce\->foreach("/path/to/file", sub { \& # my ($mce, $chunk_ref, $chunk_id) = @_; \& print $_; # Same as $chunk_ref\->[0]; \& }); \& \& # However, that is not the case for the forseq method. \& # Both $_ and $n_seq are the same when chunk_size => 1. \& \& $mce\->forseq([ 1, 9 ], sub { \& # my ($mce, $n_seq, $chunk_id) = @_; \& print $_, "\en"; # Same as $n_seq \& }); .Ve .Sp Sequence can also be specified using an array reference. The below is the same as the example afterwards. .Sp .Vb 1 \& $mce\->forseq( { begin => 10, end => 40, step => 2 }, ... ); .Ve .Sp The code block receives an array containing the next 5 sequences. Chunk 1 (chunk_id 1) contains 10,12,14,16,18. \f(CW$n_seq\fR is a reference to an array, same as \f(CW$_\fR, due to chunk_size being greater than 1. .Sp .Vb 8 \& $mce\->forseq( [ 10, 40000, 2 ], { chunk_size => 5 }, sub { \& # my ($mce, $n_seq, $chunk_id) = @_; \& my @result; \& for my $n ( @{ $_ } ) { \& ... do work, append to result for 5 \& } \& ... do something with result afterwards \& }); .Ve .SH "METHODS for the MANAGER PROCESS and WORKERS" .IX Header "METHODS for the MANAGER PROCESS and WORKERS" The methods listed below are callable by the main process and workers. .SS "\s-1MCE\-\s0>abort ( void )" .IX Subsection "MCE->abort ( void )" .ie n .SS "$mce\->abort ( void )" .el .SS "\f(CW$mce\fP\->abort ( void )" .IX Subsection "$mce->abort ( void )" The 'abort' method is applicable when processing input_data only. This causes all workers to abort after processing the current chunk. .PP Workers write the next offset position to the queue socket for the next available worker. In essence, the 'abort' method writes the last offset position. Workers, on requesting the next offset position, will think the end of input_data has been reached and leave the chunking loop. .PP .Vb 2 \& MCE\->abort; \& $mce\->abort; .Ve .SS "\s-1MCE\-\s0>chunk_id ( void )" .IX Subsection "MCE->chunk_id ( void )" .ie n .SS "$mce\->chunk_id ( void )" .el .SS "\f(CW$mce\fP\->chunk_id ( void )" .IX Subsection "$mce->chunk_id ( void )" Returns the chunk_id for the current chunk. The value starts at 1. Chunking applies to input_data or sequence. The value is 0 for the manager process. .PP .Vb 2 \& my $chunk_id = MCE\->chunk_id; \& my $chunk_id = $mce\->chunk_id; .Ve .SS "\s-1MCE\-\s0>chunk_size ( void )" .IX Subsection "MCE->chunk_size ( void )" .ie n .SS "$mce\->chunk_size ( void )" .el .SS "\f(CW$mce\fP\->chunk_size ( void )" .IX Subsection "$mce->chunk_size ( void )" Getter method for chunk_size used by \s-1MCE.\s0 .PP .Vb 2 \& my $chunk_size = MCE\->chunk_size; \& my $chunk_size = $mce\->chunk_size; .Ve .ie n .SS "\s-1MCE\-\s0>do ( 'callback_func' [, $arg1, ... ] )" .el .SS "\s-1MCE\-\s0>do ( 'callback_func' [, \f(CW$arg1\fP, ... ] )" .IX Subsection "MCE->do ( 'callback_func' [, $arg1, ... ] )" .ie n .SS "$mce\->do ( 'callback_func' [, $arg1, ... ] )" .el .SS "\f(CW$mce\fP\->do ( 'callback_func' [, \f(CW$arg1\fP, ... ] )" .IX Subsection "$mce->do ( 'callback_func' [, $arg1, ... ] )" \&\s-1MCE\s0 serializes data transfers from a worker process via helper functions do & sendto to the manager process. The callback function can optionally return a reply. Support for calling by the manager process was enabled in \s-1MCE 1.839.\s0 .PP .Vb 1 \& [ $reply = ] MCE\->do(\*(Aqcallback\*(Aq [, $arg1, ... ]); .Ve .PP Passing args to a callback function using references & scalar. .PP .Vb 4 \& sub callback { \& my ($array_ref, $hash_ref, $scalar_ref, $scalar) = @_; \& ... \& } \& \& MCE\->do(\*(Aqmain::callback\*(Aq, \e@a, \e%h, \e$s, \*(Aqfoo\*(Aq); \& MCE\->do(\*(Aqcallback\*(Aq, \e@a, \e%h, \e$s, \*(Aqfoo\*(Aq); .Ve .PP \&\s-1MCE\s0 knows if wanting a void, list, hash, or a scalar return value. .PP .Vb 1 \& MCE\->do(\*(Aqcallback\*(Aq [, $arg1, ... ]); \& \& my @array = MCE\->do(\*(Aqcallback\*(Aq [, $arg1, ... ]); \& my %hash = MCE\->do(\*(Aqcallback\*(Aq [, $arg1, ... ]); \& my $scalar = MCE\->do(\*(Aqcallback\*(Aq [, $arg1, ... ]); .Ve .ie n .SS "\s-1MCE\-\s0>freeze ( $object_ref )" .el .SS "\s-1MCE\-\s0>freeze ( \f(CW$object_ref\fP )" .IX Subsection "MCE->freeze ( $object_ref )" .ie n .SS "$mce\->freeze ( $object_ref )" .el .SS "\f(CW$mce\fP\->freeze ( \f(CW$object_ref\fP )" .IX Subsection "$mce->freeze ( $object_ref )" Calls the internal freeze method to serialize an object. The default serialization routines are handled by Sereal if available or Storable. .PP .Vb 2 \& my $frozen = MCE\->freeze([ 0, 2, 4 ]); \& my $frozen = $mce\->freeze([ 0, 2, 4 ]); .Ve .SS "\s-1MCE\-\s0>max_retries ( void )" .IX Subsection "MCE->max_retries ( void )" .ie n .SS "$mce\->max_retries ( void )" .el .SS "\f(CW$mce\fP\->max_retries ( void )" .IX Subsection "$mce->max_retries ( void )" Getter method for max_retries used by \s-1MCE.\s0 .PP .Vb 2 \& my $max_retries = MCE\->max_retries; \& my $max_retries = $mce\->max_retries; .Ve .SS "\s-1MCE\-\s0>max_workers ( void )" .IX Subsection "MCE->max_workers ( void )" .ie n .SS "$mce\->max_workers ( void )" .el .SS "\f(CW$mce\fP\->max_workers ( void )" .IX Subsection "$mce->max_workers ( void )" Getter method for max_workers used by \s-1MCE.\s0 .PP .Vb 2 \& my $max_workers = MCE\->max_workers; \& my $max_workers = $mce\->max_workers; .Ve .SS "\s-1MCE\-\s0>pid ( void )" .IX Subsection "MCE->pid ( void )" .ie n .SS "$mce\->pid ( void )" .el .SS "\f(CW$mce\fP\->pid ( void )" .IX Subsection "$mce->pid ( void )" Returns the Process \s-1ID.\s0 Threads have thread \s-1ID\s0 attached to the value. .PP .Vb 2 \& my $pid = MCE\->pid; # 16180 (pid) ; 16180.2 (pid.tid) \& my $pid = $mce\->pid; .Ve .ie n .SS "\s-1MCE\-\s0>printf ( $format, $list [, ... ] )" .el .SS "\s-1MCE\-\s0>printf ( \f(CW$format\fP, \f(CW$list\fP [, ... ] )" .IX Subsection "MCE->printf ( $format, $list [, ... ] )" .ie n .SS "\s-1MCE\-\s0>print ( $list [, ... ] )" .el .SS "\s-1MCE\-\s0>print ( \f(CW$list\fP [, ... ] )" .IX Subsection "MCE->print ( $list [, ... ] )" .ie n .SS "\s-1MCE\-\s0>say ( $list [, ... ] )" .el .SS "\s-1MCE\-\s0>say ( \f(CW$list\fP [, ... ] )" .IX Subsection "MCE->say ( $list [, ... ] )" .ie n .SS "$mce\->printf ( $format, $list [, ... ] )" .el .SS "\f(CW$mce\fP\->printf ( \f(CW$format\fP, \f(CW$list\fP [, ... ] )" .IX Subsection "$mce->printf ( $format, $list [, ... ] )" .ie n .SS "$mce\->print ( $list [, ... ] )" .el .SS "\f(CW$mce\fP\->print ( \f(CW$list\fP [, ... ] )" .IX Subsection "$mce->print ( $list [, ... ] )" .ie n .SS "$mce\->say ( $list [, ... ] )" .el .SS "\f(CW$mce\fP\->say ( \f(CW$list\fP [, ... ] )" .IX Subsection "$mce->say ( $list [, ... ] )" Use the printf, print, and say methods when wanting to serialize output among workers and the manager process. These are sugar syntax for the sendto method. These behave similar to the native subroutines in Perl with the exception that barewords must be passed as a reference and require the comma after it including file handles. .PP Say is like print, but implicitly appends a newline. .PP .Vb 3 \& MCE\->printf(\e*STDOUT, "%s: %d\en", $name, $age); \& MCE\->printf($fh, "%s: %d\en", $name, $age); \& MCE\->printf("%s: %d\en", $name, $age); \& \& MCE\->print(\e*STDERR, "$error_msg\en"); \& MCE\->print($fh, $log_msg."\en"); \& MCE\->print("$output_msg\en"); \& \& MCE\->say(\e*STDERR, $error_msg); \& MCE\->say($fh, $log_msg); \& MCE\->say($output_msg); .Ve .PP Caveat: Use the following syntax when passing a reference not a glob or file handle. Otherwise, \s-1MCE\s0 will error indicating the first argument is not a glob reference. .PP .Vb 2 \& MCE\->print(\e*STDOUT, \e@array, "\en"); \& MCE\->print("", \e@array, "\en"); # ok .Ve .PP Sending to \f(CW\*(C`IO::All\*(C'\fR { File, Pipe, \s-1STDIO\s0 } is supported since \s-1MCE 1.845.\s0 .PP .Vb 1 \& use IO::All; \& \& my $out = io\->stdout; \& my $err = io\->stderr; \& \& MCE\->printf($out, "%s\en", "sent to stdout"); \& MCE\->printf($err, "%s\en", "sent to stderr"); \& \& MCE\->print($out, "sent to stdout\en"); \& MCE\->print($err, "sent to stderr\en"); \& \& MCE\->say($out, "sent to stdout"); \& MCE\->say($err, "sent to stderr"); .Ve .SS "\s-1MCE\-\s0>sess_dir ( void )" .IX Subsection "MCE->sess_dir ( void )" .ie n .SS "$mce\->sess_dir ( void )" .el .SS "\f(CW$mce\fP\->sess_dir ( void )" .IX Subsection "$mce->sess_dir ( void )" Returns the session directory used by the \s-1MCE\s0 instance. This is defined during spawning and removed during shutdown. .PP .Vb 2 \& my $sess_dir = MCE\->sess_dir; \& my $sess_dir = $mce\->sess_dir; .Ve .SS "\s-1MCE\-\s0>task_id ( void )" .IX Subsection "MCE->task_id ( void )" .ie n .SS "$mce\->task_id ( void )" .el .SS "\f(CW$mce\fP\->task_id ( void )" .IX Subsection "$mce->task_id ( void )" Returns the task \s-1ID.\s0 This applies to the user_tasks option (starts at 0). .PP .Vb 2 \& my $task_id = MCE\->task_id; \& my $task_id = $mce\->task_id; .Ve .SS "\s-1MCE\-\s0>task_name ( void )" .IX Subsection "MCE->task_name ( void )" .ie n .SS "$mce\->task_name ( void )" .el .SS "\f(CW$mce\fP\->task_name ( void )" .IX Subsection "$mce->task_name ( void )" Returns the task_name value specified via the task_name option when configuring \s-1MCE.\s0 .PP .Vb 2 \& my $task_name = MCE\->task_name; \& my $task_name = $mce\->task_name; .Ve .SS "\s-1MCE\-\s0>task_wid ( void )" .IX Subsection "MCE->task_wid ( void )" .ie n .SS "$mce\->task_wid ( void )" .el .SS "\f(CW$mce\fP\->task_wid ( void )" .IX Subsection "$mce->task_wid ( void )" Returns the task worker \s-1ID\s0 (applies to user_tasks). The value starts at 1 per each task configured within user_tasks. The value is 0 for the manager process. .PP .Vb 2 \& my $task_wid = MCE\->task_wid; \& my $task_wid = $mce\->task_wid; .Ve .ie n .SS "\s-1MCE\-\s0>thaw ( $frozen )" .el .SS "\s-1MCE\-\s0>thaw ( \f(CW$frozen\fP )" .IX Subsection "MCE->thaw ( $frozen )" .ie n .SS "$mce\->thaw ( $frozen )" .el .SS "\f(CW$mce\fP\->thaw ( \f(CW$frozen\fP )" .IX Subsection "$mce->thaw ( $frozen )" Calls the internal thaw method to un-serialize the frozen object. .PP .Vb 2 \& my $object_ref = MCE\->thaw($frozen); \& my $object_ref = $mce\->thaw($frozen); .Ve .SS "\s-1MCE\-\s0>tmp_dir ( void )" .IX Subsection "MCE->tmp_dir ( void )" .ie n .SS "$mce\->tmp_dir ( void )" .el .SS "\f(CW$mce\fP\->tmp_dir ( void )" .IX Subsection "$mce->tmp_dir ( void )" Returns the temporary directory used by \s-1MCE.\s0 .PP .Vb 2 \& my $tmp_dir = MCE\->tmp_dir; \& my $tmp_dir = $mce\->tmp_dir; .Ve .SS "\s-1MCE\-\s0>user_args ( void )" .IX Subsection "MCE->user_args ( void )" .ie n .SS "$mce\->user_args ( void )" .el .SS "\f(CW$mce\fP\->user_args ( void )" .IX Subsection "$mce->user_args ( void )" Returns the arguments specified via the user_args option. .PP .Vb 2 \& my ($arg1, $arg2, $arg3) = MCE\->user_args; \& my ($arg1, $arg2, $arg3) = $mce\->user_args; .Ve .SS "\s-1MCE\-\s0>wid ( void )" .IX Subsection "MCE->wid ( void )" .ie n .SS "$mce\->wid ( void )" .el .SS "\f(CW$mce\fP\->wid ( void )" .IX Subsection "$mce->wid ( void )" Returns the \s-1MCE\s0 worker \s-1ID.\s0 Starts at 1 per each \s-1MCE\s0 instance. The value is 0 for the manager process. .PP .Vb 2 \& my $wid = MCE\->wid; \& my $wid = $mce\->wid; .Ve .SH "METHODS for the MANAGER PROCESS only" .IX Header "METHODS for the MANAGER PROCESS only" Methods listed below are callable by the main process only. .ie n .SS "\s-1MCE\-\s0>forchunk ( $input_data [, { options } ], sub { ... } )" .el .SS "\s-1MCE\-\s0>forchunk ( \f(CW$input_data\fP [, { options } ], sub { ... } )" .IX Subsection "MCE->forchunk ( $input_data [, { options } ], sub { ... } )" .ie n .SS "\s-1MCE\-\s0>foreach ( $input_data [, { options } ], sub { ... } )" .el .SS "\s-1MCE\-\s0>foreach ( \f(CW$input_data\fP [, { options } ], sub { ... } )" .IX Subsection "MCE->foreach ( $input_data [, { options } ], sub { ... } )" .ie n .SS "\s-1MCE\-\s0>forseq ( $sequence_spec [, { options } ], sub { ... } )" .el .SS "\s-1MCE\-\s0>forseq ( \f(CW$sequence_spec\fP [, { options } ], sub { ... } )" .IX Subsection "MCE->forseq ( $sequence_spec [, { options } ], sub { ... } )" .ie n .SS "$mce\->forchunk ( $input_data [, { options } ], sub { ... } )" .el .SS "\f(CW$mce\fP\->forchunk ( \f(CW$input_data\fP [, { options } ], sub { ... } )" .IX Subsection "$mce->forchunk ( $input_data [, { options } ], sub { ... } )" .ie n .SS "$mce\->foreach ( $input_data [, { options } ], sub { ... } )" .el .SS "\f(CW$mce\fP\->foreach ( \f(CW$input_data\fP [, { options } ], sub { ... } )" .IX Subsection "$mce->foreach ( $input_data [, { options } ], sub { ... } )" .ie n .SS "$mce\->forseq ( $sequence_spec [, { options } ], sub { ... } )" .el .SS "\f(CW$mce\fP\->forseq ( \f(CW$sequence_spec\fP [, { options } ], sub { ... } )" .IX Subsection "$mce->forseq ( $sequence_spec [, { options } ], sub { ... } )" Forchunk, foreach, and forseq are sugar methods and described in MCE::Candy. Stubs exist in \s-1MCE\s0 which load MCE::Candy automatically. .ie n .SS "\s-1MCE\-\s0>process ( $input_data [, { options } ] )" .el .SS "\s-1MCE\-\s0>process ( \f(CW$input_data\fP [, { options } ] )" .IX Subsection "MCE->process ( $input_data [, { options } ] )" .ie n .SS "$mce\->process ( $input_data [, { options } ] )" .el .SS "\f(CW$mce\fP\->process ( \f(CW$input_data\fP [, { options } ] )" .IX Subsection "$mce->process ( $input_data [, { options } ] )" The process method will spawn workers automatically if not already spawned. It will set input_data => \f(CW$input_data\fR. It calls \fBrun\fR\|(0) to not auto-shutdown workers. Specifying options is optional. .PP Allowable options { key => value, ... } are: .PP .Vb 4 \& chunk_size input_data job_delay spawn_delay submit_delay \& flush_file flush_stderr flush_stdout stderr_file stdout_file \& on_post_exit on_post_run sequence user_args user_begin user_end \& user_func user_error user_output use_slurpio RS .Ve .PP Options remain persistent going forward unless changed. Setting user_begin, user_end, or user_func will cause already spawned workers to shut down and re-spawn automatically. Therefore, define these during instantiation. .PP The below will cause workers to re-spawn after running. .PP .Vb 1 \& my $mce = MCE\->new( max_workers => \*(Aqauto\*(Aq ); \& \& $mce\->process( { \& user_begin => sub { # connect to DB }, \& user_func => sub { # process each row }, \& user_end => sub { # close handle to DB }, \& }, \e@input_data ); \& \& $mce\->process( { \& user_begin => sub { # connect to DB }, \& user_func => sub { # process each file }, \& user_end => sub { # close handle to DB }, \& }, "/list/of/files" ); .Ve .PP Do the following if wanting workers to persist between jobs. .PP .Vb 1 \& use MCE max_workers => \*(Aqauto\*(Aq; \& \& my $mce = MCE\->new( \& user_begin => sub { # connect to DB }, \& user_func => sub { # process each chunk or row or host }, \& user_end => sub { # close handle to DB }, \& ); \& \& $mce\->spawn; # Spawn early if desired \& \& $mce\->process("/one/very_big_file/_mce_/will_chunk_in_parallel"); \& $mce\->process(\e@array_of_files_to_grep); \& $mce\->process("/path/to/host/list"); \& \& $mce\->process($array_ref); \& $mce\->process($array_ref, { stdout_file => $output_file }); \& \& # This was not allowed before. Fixed in 1.415. \& $mce\->process({ sequence => { begin => 10, end => 90, step 2 } }); \& $mce\->process({ sequence => [ 10, 90, 2 ] }); \& \& $mce\->shutdown; .Ve .SS "\s-1MCE\-\s0>relay_final ( void )" .IX Subsection "MCE->relay_final ( void )" .ie n .SS "$mce\->relay_final ( void )" .el .SS "\f(CW$mce\fP\->relay_final ( void )" .IX Subsection "$mce->relay_final ( void )" The relay methods are described in MCE::Relay. Relay capabilities are enabled by specifying the \f(CW\*(C`init_relay\*(C'\fR \s-1MCE\s0 option. .SS "\s-1MCE\-\s0>restart_worker ( void )" .IX Subsection "MCE->restart_worker ( void )" .ie n .SS "$mce\->restart_worker ( void )" .el .SS "\f(CW$mce\fP\->restart_worker ( void )" .IX Subsection "$mce->restart_worker ( void )" One can restart a worker who has died or exited. The job never ends below due to restarting each time. Recommended is to call \s-1MCE\-\s0>exit or \f(CW$mce\fR\->exit instead of the native exit function for better handling, especially under the Windows environment. .PP The \f(CW$e\fR\->{wid} argument is no longer necessary starting with the 1.5 release. .PP Press [ctrl\-c] to terminate the script. .PP .Vb 1 \& my $mce = MCE\->new( \& \& on_post_exit => sub { \& my ($mce, $e) = @_; \& print "$e\->{wid}: $e\->{pid}: status $e\->{status}: $e\->{msg}"; \& # $mce\->restart_worker($e\->{wid}); # MCE\-1.415 and below \& $mce\->restart_worker; # MCE\-1.500 and above \& }, \& \& user_begin => sub { \& my ($mce, $task_id, $task_name) = @_; \& # Not interested in die messages going to STDERR, \& # because the die handler calls MCE\->exit(255, $_[0]). \& close STDERR; \& }, \& \& user_tasks => [{ \& max_workers => 5, \& user_func => sub { \& my ($mce) = @_; sleep MCE\->wid; \& MCE\->exit(3, "exited from " . MCE\->wid . "\en"); \& } \& },{ \& max_workers => 4, \& user_func => sub { \& my ($mce) = @_; sleep MCE\->wid; \& die("died from " . MCE\->wid . "\en"); \& } \& }] \& ); \& \& $mce\->run; \& \& \-\- Output \& \& 1: PID_85388: status 3: exited from 1 \& 2: PID_85389: status 3: exited from 2 \& 1: PID_85397: status 3: exited from 1 \& 3: PID_85390: status 3: exited from 3 \& 1: PID_85399: status 3: exited from 1 \& 4: PID_85391: status 3: exited from 4 \& 2: PID_85398: status 3: exited from 2 \& 1: PID_85401: status 3: exited from 1 \& 5: PID_85392: status 3: exited from 5 \& 1: PID_85404: status 3: exited from 1 \& 6: PID_85393: status 255: died from 6 \& 3: PID_85400: status 3: exited from 3 \& 2: PID_85403: status 3: exited from 2 \& 1: PID_85406: status 3: exited from 1 \& 7: PID_85394: status 255: died from 7 \& 1: PID_85410: status 3: exited from 1 \& 8: PID_85395: status 255: died from 8 \& 4: PID_85402: status 3: exited from 4 \& 2: PID_85409: status 3: exited from 2 \& 1: PID_85412: status 3: exited from 1 \& 9: PID_85396: status 255: died from 9 \& 3: PID_85408: status 3: exited from 3 \& 1: PID_85416: status 3: exited from 1 \& \& ... .Ve .ie n .SS "\s-1MCE\-\s0>run ( [ $auto_shutdown [, { options } ] ] )" .el .SS "\s-1MCE\-\s0>run ( [ \f(CW$auto_shutdown\fP [, { options } ] ] )" .IX Subsection "MCE->run ( [ $auto_shutdown [, { options } ] ] )" .ie n .SS "$mce\->run ( [ $auto_shutdown [, { options } ] ] )" .el .SS "\f(CW$mce\fP\->run ( [ \f(CW$auto_shutdown\fP [, { options } ] ] )" .IX Subsection "$mce->run ( [ $auto_shutdown [, { options } ] ] )" The run method, by default, spawns workers, processes once, and shuts down afterwards. Specify 0 for \f(CW$auto_shutdown\fR when wanting workers to persist after running (default 1). .PP Specifying options is optional. Valid options are the same as for the process method. .PP .Vb 1 \& my $mce = MCE\->new( ... ); \& \& # Disables auto\-shutdown \& $mce\->run(0); .Ve .ie n .SS "\s-1MCE\-\s0>send ( $data_ref )" .el .SS "\s-1MCE\-\s0>send ( \f(CW$data_ref\fP )" .IX Subsection "MCE->send ( $data_ref )" .ie n .SS "$mce\->send ( $data_ref )" .el .SS "\f(CW$mce\fP\->send ( \f(CW$data_ref\fP )" .IX Subsection "$mce->send ( $data_ref )" The 'send' method is useful when wanting to spawn workers early to minimize memory consumption and afterwards send data individually to each worker. One cannot send more than the total workers spawned. Workers store the received data as \f(CW$mce\fR\->{user_data}. .PP The data which can be sent is restricted to an \s-1ARRAY, HASH,\s0 or \s-1PDL\s0 reference. Workers begin processing immediately after receiving data. Workers set \&\f(CW$mce\fR\->{user_data} to undef after processing. One cannot specify input_data, sequence, or user_tasks when using the \*(L"send\*(R" method. .PP Passing any options e.g. run(0, { options }) is ignored due to workers running immediately after receiving user data. There is no guarantee to which worker will receive data first. It depends on which worker is available awaiting data. .PP .Vb 1 \& use MCE; \& \& my $mce = MCE\->new( \& max_workers => 5, \& \& user_func => sub { \& my ($mce) = @_; \& my $data = $mce\->{user_data}; \& my $first_name = $data\->{first_name}; \& print MCE\->wid, ": Hello from $first_name\en"; \& } \& ); \& \& $mce\->spawn; # Optional, send will spawn if necessary. \& \& $mce\->send( { first_name => "Theresa" } ); \& $mce\->send( { first_name => "Francis" } ); \& $mce\->send( { first_name => "Padre" } ); \& $mce\->send( { first_name => "Anthony" } ); \& \& $mce\->run; # Wait for workers to complete processing. \& \& \-\- Output \& \& 2: Hello from Theresa \& 5: Hello from Anthony \& 3: Hello from Francis \& 4: Hello from Padre .Ve .SS "\s-1MCE\-\s0>shutdown ( void )" .IX Subsection "MCE->shutdown ( void )" .ie n .SS "$mce\->shutdown ( void )" .el .SS "\f(CW$mce\fP\->shutdown ( void )" .IX Subsection "$mce->shutdown ( void )" The run method will automatically spawn workers, run once, and shutdown workers automatically. Workers persist after running below. Shutdown may be called as needed or prior to exiting. .PP .Vb 1 \& my $mce = MCE\->new( ... ); \& \& $mce\->spawn; \& \& $mce\->process(\e@input_data_1); # Processing multiple arrays \& $mce\->process(\e@input_data_2); \& $mce\->process(\e@input_data_n); \& \& $mce\->shutdown; \& \& $mce\->process(\*(Aqinput_file_1\*(Aq); # Processing multiple files \& $mce\->process(\*(Aqinput_file_2\*(Aq); \& $mce\->process(\*(Aqinput_file_n\*(Aq); \& \& $mce\->shutdown; .Ve .SS "\s-1MCE\-\s0>spawn ( void )" .IX Subsection "MCE->spawn ( void )" .ie n .SS "$mce\->spawn ( void )" .el .SS "\f(CW$mce\fP\->spawn ( void )" .IX Subsection "$mce->spawn ( void )" Workers are normally spawned automatically. The spawn method allows one to spawn workers early if so desired. .PP .Vb 1 \& my $mce = MCE\->new( ... ); \& \& $mce\->spawn; .Ve .SS "\s-1MCE\-\s0>status ( void )" .IX Subsection "MCE->status ( void )" .ie n .SS "$mce\->status ( void )" .el .SS "\f(CW$mce\fP\->status ( void )" .IX Subsection "$mce->status ( void )" The greatest exit status is saved among workers while running. Look at the on_post_exit or on_post_run options for callback support. .PP .Vb 1 \& my $mce = MCE\->new( ... ); \& \& $mce\->run; \& \& my $exit_status = $mce\->status; .Ve .SH "METHODS for WORKERS only" .IX Header "METHODS for WORKERS only" Methods listed below are callable by workers only. .ie n .SS "\s-1MCE\-\s0>exit ( [ $status [, $message [, $id ] ] ] )" .el .SS "\s-1MCE\-\s0>exit ( [ \f(CW$status\fP [, \f(CW$message\fP [, \f(CW$id\fP ] ] ] )" .IX Subsection "MCE->exit ( [ $status [, $message [, $id ] ] ] )" .ie n .SS "$mce\->exit ( [ $status [, $message [, $id ] ] ] )" .el .SS "\f(CW$mce\fP\->exit ( [ \f(CW$status\fP [, \f(CW$message\fP [, \f(CW$id\fP ] ] ] )" .IX Subsection "$mce->exit ( [ $status [, $message [, $id ] ] ] )" A worker exits from \s-1MCE\s0 entirely. \f(CW$id\fR (optional) can be used for passing the primary key or a string along with the message. Look at the on_post_exit or on_post_run options for callback support. .PP .Vb 4 \& MCE\->exit; # default 0 \& MCE\->exit(1); \& MCE\->exit(2, \*(Aqchunk failed\*(Aq, $chunk_id); \& MCE\->exit(0, \*(Aqmsg_foo\*(Aq, \*(Aqid_1000\*(Aq); .Ve .ie n .SS "\s-1MCE\-\s0>gather ( $arg1, [, $arg2, ... ] )" .el .SS "\s-1MCE\-\s0>gather ( \f(CW$arg1\fP, [, \f(CW$arg2\fP, ... ] )" .IX Subsection "MCE->gather ( $arg1, [, $arg2, ... ] )" .ie n .SS "$mce\->gather ( $arg1, [, $arg2, ... ] )" .el .SS "\f(CW$mce\fP\->gather ( \f(CW$arg1\fP, [, \f(CW$arg2\fP, ... ] )" .IX Subsection "$mce->gather ( $arg1, [, $arg2, ... ] )" A worker can submit data to the location specified via the gather option by calling this method. See MCE::Flow and MCE::Loop for additional use-case. .PP .Vb 1 \& use MCE; \& \& my @hosts = qw( \& hosta hostb hostc hostd hoste \& ); \& \& my $mce = MCE\->new( \& chunk_size => 1, max_workers => 3, \& \& user_func => sub { \& # my ($mce, $chunk_ref, $chunk_id) = @_; \& my ($output, $error, $status); my $host = $_; \& \& # Do something with $host; \& $output = "Worker ". MCE\->wid .": Hello from $host"; \& \& if (MCE\->chunk_id % 3 == 0) { \& # Simulating an error condition \& local $? = 1; $status = $?; \& $error = "Error from $host" \& } \& else { \& $status = 0; \& } \& \& # Ensure unique keys (key, value) when gathering to a \& # hash. \& MCE\->gather("$host.out", $output, "$host.sta", $status); \& MCE\->gather("$host.err", $error) if (defined $error); \& } \& ); \& \& my %h; $mce\->process(\e@hosts, { gather => \e%h }); \& \& foreach my $host (@hosts) { \& print $h{"$host.out"}, "\en"; \& print $h{"$host.err"}, "\en" if (exists $h{"$host.err"}); \& print "Exit status: ", $h{"$host.sta"}, "\en\en"; \& } \& \& \-\- Output \& \& Worker 2: Hello from hosta \& Exit status: 0 \& \& Worker 1: Hello from hostb \& Exit status: 0 \& \& Worker 3: Hello from hostc \& Error from hostc \& Exit status: 1 \& \& Worker 2: Hello from hostd \& Exit status: 0 \& \& Worker 1: Hello from hoste \& Exit status: 0 .Ve .SS "\s-1MCE\-\s0>last ( void )" .IX Subsection "MCE->last ( void )" .ie n .SS "$mce\->last ( void )" .el .SS "\f(CW$mce\fP\->last ( void )" .IX Subsection "$mce->last ( void )" Worker leaves the chunking loop or user_func block immediately. Callable from inside foreach, forchunk, forseq, and user_func. .PP .Vb 1 \& use MCE; \& \& my $mce = MCE\->new( \& max_workers => 5 \& ); \& \& my @list = (1 .. 80); \& \& $mce\->forchunk(\e@list, { chunk_size => 2 }, sub { \& \& my ($mce, $chunk_ref, $chunk_id) = @_; \& MCE\->last if ($chunk_id > 4); \& \& my @output = (); \& \& foreach my $rec ( @{ $chunk_ref } ) { \& push @output, $rec, "\en"; \& } \& \& MCE\->print(@output); \& }); \& \& \-\- Output (each chunk above consists of 2 elements) \& \& 3 \& 4 \& 1 \& 2 \& 7 \& 8 \& 5 \& 6 .Ve .SS "\s-1MCE\-\s0>next ( void )" .IX Subsection "MCE->next ( void )" .ie n .SS "$mce\->next ( void )" .el .SS "\f(CW$mce\fP\->next ( void )" .IX Subsection "$mce->next ( void )" Worker starts the next iteration of the chunking loop. Callable from inside foreach, forchunk, forseq, and user_func. .PP .Vb 1 \& use MCE; \& \& my $mce = MCE\->new( \& max_workers => 5 \& ); \& \& my @list = (1 .. 80); \& \& $mce\->forchunk(\e@list, { chunk_size => 4 }, sub { \& \& my ($mce, $chunk_ref, $chunk_id) = @_; \& MCE\->next if ($chunk_id < 20); \& \& my @output = (); \& \& foreach my $rec ( @{ $chunk_ref } ) { \& push @output, $rec, "\en"; \& } \& \& MCE\->print(@output); \& }); \& \& \-\- Output (each chunk above consists of 4 elements) \& \& 77 \& 78 \& 79 \& 80 .Ve .SS "MCE::relay { code }" .IX Subsection "MCE::relay { code }" .SS "\s-1MCE\-\s0>relay ( sub { code } )" .IX Subsection "MCE->relay ( sub { code } )" .SS "\s-1MCE\-\s0>relay_recv ( void )" .IX Subsection "MCE->relay_recv ( void )" .ie n .SS "$mce\->relay ( sub { code } )" .el .SS "\f(CW$mce\fP\->relay ( sub { code } )" .IX Subsection "$mce->relay ( sub { code } )" .ie n .SS "$mce\->relay_recv ( void )" .el .SS "\f(CW$mce\fP\->relay_recv ( void )" .IX Subsection "$mce->relay_recv ( void )" The relay methods are described in MCE::Relay. Relay capabilities are enabled by specifying the \f(CW\*(C`init_relay\*(C'\fR \s-1MCE\s0 option. .ie n .SS "\s-1MCE\-\s0>sendto ( $to, $arg1, ... )" .el .SS "\s-1MCE\-\s0>sendto ( \f(CW$to\fP, \f(CW$arg1\fP, ... )" .IX Subsection "MCE->sendto ( $to, $arg1, ... )" .ie n .SS "$mce\->sendto ( $to, $arg1, ... )" .el .SS "\f(CW$mce\fP\->sendto ( \f(CW$to\fP, \f(CW$arg1\fP, ... )" .IX Subsection "$mce->sendto ( $to, $arg1, ... )" The sendto method is called by workers for serializing data to standard output, standard error, or end of file. The action is done by the manager process. .PP Release 1.00x supported 1 data argument, not more. .PP .Vb 3 \& MCE\->sendto(\*(Aqfile\*(Aq, \e@array, \*(Aq/path/to/file\*(Aq); \& MCE\->sendto(\*(Aqfile\*(Aq, \e$scalar, \*(Aq/path/to/file\*(Aq); \& MCE\->sendto(\*(Aqfile\*(Aq, $scalar, \*(Aq/path/to/file\*(Aq); \& \& MCE\->sendto(\*(AqSTDERR\*(Aq, \e@array); \& MCE\->sendto(\*(AqSTDERR\*(Aq, \e$scalar); \& MCE\->sendto(\*(AqSTDERR\*(Aq, $scalar); \& \& MCE\->sendto(\*(AqSTDOUT\*(Aq, \e@array); \& MCE\->sendto(\*(AqSTDOUT\*(Aq, \e$scalar); \& MCE\->sendto(\*(AqSTDOUT\*(Aq, $scalar); .Ve .PP Release 1.100 added the ability to pass multiple arguments. Notice the syntax change for sending to a file. Passing a reference to an array is no longer necessary. .PP .Vb 3 \& MCE\->sendto(\*(Aqfile:/path/to/file\*(Aq, $arg1 [, $arg2, ... ]); \& MCE\->sendto(\*(AqSTDERR\*(Aq, $arg1 [, $arg2, ... ]); \& MCE\->sendto(\*(AqSTDOUT\*(Aq, $arg1 [, $arg2, ... ]); \& \& MCE\->sendto(\*(AqSTDOUT\*(Aq, @a, "\en", %h, "\en", $s, "\en"); .Ve .PP To retain 1.00x compatibility, sendto outputs the content when a single data reference is specified. Otherwise, the reference for \e@array or \e$scalar is shown in 1.500, not the content. .PP .Vb 3 \& MCE\->sendto(\*(AqSTDERR\*(Aq, \e@array); # 1.00x behavior, content \& MCE\->sendto(\*(AqSTDOUT\*(Aq, \e$scalar); \& MCE\->sendto(\*(Aqfile:/path/to/file\*(Aq, \e@array); \& \& # Output matches the print statement \& \& MCE\->sendto(\e*STDERR, \e@array); # 1.500 behavior, reference \& MCE\->sendto(\e*STDOUT, \e$scalar); \& MCE\->sendto($fh, \e@array); \& \& MCE\->sendto(\*(AqSTDOUT\*(Aq, \e@array, "\en", \e$scalar, "\en"); \& print {*STDOUT} \e@array, "\en", \e$scalar, "\en"; .Ve .PP \&\s-1MCE 1.500\s0 added support for sending to a glob reference, file descriptor, and file handle. .PP .Vb 3 \& MCE\->sendto(\e*STDERR, "foo\en", \e@array, \e$scalar, "\en"); \& MCE\->sendto(\*(Aqfd:2\*(Aq, "foo\en", \e@array, \e$scalar, "\en"); \& MCE\->sendto($fh, "foo\en", \e@array, \e$scalar, "\en"); .Ve .SS "\s-1MCE\-\s0>sync ( void )" .IX Subsection "MCE->sync ( void )" .ie n .SS "$mce\->sync ( void )" .el .SS "\f(CW$mce\fP\->sync ( void )" .IX Subsection "$mce->sync ( void )" A barrier sync operation means any worker must stop at this point until all workers reach this barrier. Barrier syncing is useful for many computer algorithms. .PP Barrier synchronization is supported for task 0 only or omitting user_tasks. All workers assigned task_id 0 must call sync whenever barrier syncing. .PP .Vb 1 \& use MCE; \& \& sub user_func { \& \& my ($mce) = @_; \& my $wid = MCE\->wid; \& \& MCE\->sendto("STDOUT", "a: $wid\en"); # MCE 1.0+ \& MCE\->sync; \& \& MCE\->sendto(\e*STDOUT, "b: $wid\en"); # MCE 1.5+ \& MCE\->sync; \& \& MCE\->print("c: $wid\en"); # MCE 1.5+ \& MCE\->sync; \& \& return; \& } \& \& my $mce = MCE\->new( \& max_workers => 4, user_func => \e&user_func \& )\->run; \& \& \-\- Output (without barrier synchronization) \& \& a: 1 \& a: 2 \& b: 1 \& b: 2 \& c: 1 \& c: 2 \& a: 3 \& b: 3 \& c: 3 \& a: 4 \& b: 4 \& c: 4 \& \& \-\- Output (with barrier synchronization) \& \& a: 1 \& a: 2 \& a: 4 \& a: 3 \& b: 2 \& b: 1 \& b: 3 \& b: 4 \& c: 1 \& c: 4 \& c: 2 \& c: 3 .Ve .PP Consider the following example. The \s-1MCE\-\s0>sync operation is done inside a loop along with \s-1MCE\-\s0>do. A stall may occur for workers calling sync the 2nd or 3rd time while other workers are sending results via \s-1MCE\-\s0>do or \s-1MCE\-\s0>sendto. .PP It requires another semaphore lock in \s-1MCE\s0 to solve this which was not done in order to keep resources low. Therefore, please keep this in mind when mixing \&\s-1MCE\-\s0>sync with \s-1MCE\-\s0>do or output serialization methods inside a loop. .PP .Vb 1 \& sub user_func { \& \& my ($mce) = @_; \& my @result; \& \& for (1 .. 3) { \& ... compute algorithm ... \& \& MCE\->sync; \& \& ... compute algorithm ... \& \& MCE\->sync; \& \& MCE\->do(\*(Aqaggregate_result\*(Aq, \e@result); # or MCE\->sendto \& \& MCE\->sync; # The sync operation is also needed here to \& # prevent MCE from stalling. \& } \& } .Ve .SS "\s-1MCE\-\s0>yield ( void )" .IX Subsection "MCE->yield ( void )" .ie n .SS "$mce\->yield ( void )" .el .SS "\f(CW$mce\fP\->yield ( void )" .IX Subsection "$mce->yield ( void )" There may be on occasion when the \s-1MCE\s0 driven app is too fast. The interval option combined with the yield method, both introduced with \s-1MCE 1.5,\s0 allows one to throttle the app. It adds a \*(L"grace\*(R" factor to the design. .PP A use case is an app configured with 100 workers running on a 24 logical way box. Data is polled from a database containing over 2.5 million rows. Workers chunk away at 300 rows per chunk performing \s-1SNMP\s0 gets (300 sockets per worker) polling 25 metrics from each device. With this scenario, the load on the box may rise beyond 90+. In addition, IP_Tables may reach its contention point causing the entire application to fail. .PP The scenario above is solved by simply having workers yield among themselves in a synchronized fashion. A delay of 0.007 seconds between intervals is all that's needed. The load on the box will hover between 23 ~ 27 for the duration of the run. Polling completes in under 17 minutes time. This is quite fast considering the app polls 62.5 million metrics combined. The math equates to 3,676,470 per minute or rather 61,275 per second from a single box. .PP .Vb 1 \& # Both max_nodes and node_id are optional (default 1). \& \& interval => { \& delay => 0.007, max_nodes => $max_nodes, node_id => $node_id \& } .Ve .PP A 4 node setup can poll 10 million devices without the additional overhead of a distribution agent. The difference between the 4 nodes are simply node_id and the where clause used to query the database. The mac addresses are random such that the data divides equally to any power of 2. The distribution key lies in the mac address itself. In fact, the 2nd character from the right is sufficient for maximizing on the power of randomness for equal distribution. .PP .Vb 4 \& Query NodeID 1: ... AND substr(MAC, \-2, 1) IN (\*(Aq0\*(Aq, \*(Aq1\*(Aq, \*(Aq2\*(Aq, \*(Aq3\*(Aq) \& Query NodeID 2: ... AND substr(MAC, \-2, 1) IN (\*(Aq4\*(Aq, \*(Aq5\*(Aq, \*(Aq6\*(Aq, \*(Aq7\*(Aq) \& Query NodeID 3: ... AND substr(MAC, \-2, 1) IN (\*(Aq8\*(Aq, \*(Aq9\*(Aq, \*(AqA\*(Aq, \*(AqB\*(Aq) \& Query NodeID 4: ... AND substr(MAC, \-2, 1) IN (\*(AqC\*(Aq, \*(AqD\*(Aq, \*(AqE\*(Aq, \*(AqF\*(Aq) .Ve .PP Below, the user_tasks is configured to simulate 4 nodes. This demonstration uses 2 workers to minimize the output size. Input is from the sequence option. .PP .Vb 2 \& use Time::HiRes qw(time); \& use MCE; \& \& my $d = shift || 0.1; \& \& local $| = 1; \& \& sub create_task { \& \& my ($node_id) = @_; \& \& my $seq_size = 6; \& my $seq_start = ($node_id \- 1) * $seq_size + 1; \& my $seq_end = $seq_start + $seq_size \- 1; \& \& return { \& max_workers => 2, sequence => [ $seq_start, $seq_end ], \& interval => { delay => $d, max_nodes => 4, node_id => $node_id } \& }; \& } \& \& sub user_begin { \& \& my ($mce, $task_id, $task_name) = @_; \& \& # The yield method causes this worker to wait for its next time \& # interval slot before running. Yield has no effect without the \& # \*(Aqinterval\*(Aq option. \& \& # Yielding is beneficial inside a user_begin block. A use case \& # is staggering database connections among workers in order \& # to not impact the DB server. \& \& MCE\->yield; \& \& MCE\->printf( \& "Node %2d: %0.5f \-\- Worker %2d: %12s \-\- Started\en", \& MCE\->task_id + 1, time, MCE\->task_wid, \*(Aq\*(Aq \& ); \& \& return; \& } \& \& { \& my $prev_time = time; \& \& sub user_func { \& \& my ($mce, $seq_n, $chunk_id) = @_; \& \& # Yield simply waits for the next time interval. \& MCE\->yield; \& \& # Calculate how long this worker has waited. \& my $curr_time = time; \& my $time_waited = $curr_time \- $prev_time; \& \& $prev_time = $curr_time; \& \& MCE\->printf( \& "Node %2d: %0.5f \-\- Worker %2d: %12.5f \-\- Seq_N %3d\en", \& MCE\->task_id + 1, time, MCE\->task_wid, $time_waited, $seq_n \& ); \& \& return; \& } \& } \& \& # Simulate a 4 node environment passing node_id to create_task. \& \& print "Node_ID Current_Time Worker_ID Time_Waited Comment\en"; \& \& MCE\->new( \& user_begin => \e&user_begin, \& user_func => \e&user_func, \& \& user_tasks => [ \& create_task(1), \& create_task(2), \& create_task(3), \& create_task(4) \& ] \& \& )\->run; \& \& \-\- Output (notice Current_Time below, stays 0.10 apart) \& \& Node_ID Current_Time Worker_ID Time_Waited Comment \& Node 1: 1374807976.74634 \-\- Worker 1: \-\- Started \& Node 2: 1374807976.84634 \-\- Worker 1: \-\- Started \& Node 3: 1374807976.94638 \-\- Worker 1: \-\- Started \& Node 4: 1374807977.04639 \-\- Worker 1: \-\- Started \& Node 1: 1374807977.14634 \-\- Worker 2: \-\- Started \& Node 2: 1374807977.24640 \-\- Worker 2: \-\- Started \& Node 3: 1374807977.34649 \-\- Worker 2: \-\- Started \& Node 4: 1374807977.44657 \-\- Worker 2: \-\- Started \& Node 1: 1374807977.54636 \-\- Worker 1: 0.90037 \-\- Seq_N 1 \& Node 2: 1374807977.64638 \-\- Worker 1: 1.00040 \-\- Seq_N 7 \& Node 3: 1374807977.74642 \-\- Worker 1: 1.10043 \-\- Seq_N 13 \& Node 4: 1374807977.84643 \-\- Worker 1: 1.20045 \-\- Seq_N 19 \& Node 1: 1374807977.94636 \-\- Worker 2: 1.30037 \-\- Seq_N 2 \& Node 2: 1374807978.04638 \-\- Worker 2: 1.40040 \-\- Seq_N 8 \& Node 3: 1374807978.14641 \-\- Worker 2: 1.50042 \-\- Seq_N 14 \& Node 4: 1374807978.24644 \-\- Worker 2: 1.60045 \-\- Seq_N 20 \& Node 1: 1374807978.34628 \-\- Worker 1: 0.79996 \-\- Seq_N 3 \& Node 2: 1374807978.44631 \-\- Worker 1: 0.79996 \-\- Seq_N 9 \& Node 3: 1374807978.54634 \-\- Worker 1: 0.79996 \-\- Seq_N 15 \& Node 4: 1374807978.64636 \-\- Worker 1: 0.79997 \-\- Seq_N 21 \& Node 1: 1374807978.74628 \-\- Worker 2: 0.79996 \-\- Seq_N 4 \& Node 2: 1374807978.84632 \-\- Worker 2: 0.79997 \-\- Seq_N 10 \& Node 3: 1374807978.94634 \-\- Worker 2: 0.79996 \-\- Seq_N 16 \& Node 4: 1374807979.04636 \-\- Worker 2: 0.79996 \-\- Seq_N 22 \& Node 1: 1374807979.14628 \-\- Worker 1: 0.80001 \-\- Seq_N 5 \& Node 2: 1374807979.24631 \-\- Worker 1: 0.80000 \-\- Seq_N 11 \& Node 3: 1374807979.34634 \-\- Worker 1: 0.80001 \-\- Seq_N 17 \& Node 4: 1374807979.44636 \-\- Worker 1: 0.80000 \-\- Seq_N 23 \& Node 1: 1374807979.54628 \-\- Worker 2: 0.80000 \-\- Seq_N 6 \& Node 2: 1374807979.64631 \-\- Worker 2: 0.80000 \-\- Seq_N 12 \& Node 3: 1374807979.74633 \-\- Worker 2: 0.80000 \-\- Seq_N 18 \& Node 4: 1374807979.84636 \-\- Worker 2: 0.80000 \-\- Seq_N 24 .Ve .PP The interval.pl example above is included with \s-1MCE.\s0 .SH "MCE PROGRESS DEMONSTRATIONS" .IX Header "MCE PROGRESS DEMONSTRATIONS" The \f(CW\*(C`progress\*(C'\fR option takes a code block for receiving info on the progress made while processing input data; e.g. \f(CW\*(C`input_data\*(C'\fR or \f(CW\*(C`sequence\*(C'\fR. To make this work, one provides the \f(CW\*(C`progress\*(C'\fR option a closure block like so, passing along the size of the input_data; e.g \f(CW\*(C`scalar @array\*(C'\fR or \f(CW\*(C`\-s /path/to/file\*(C'\fR. .PP Current \s-1API\s0 available since 1.813. .PP A worker, upon completing processing its chunk, notifies the manager-process with the size of the chunk. That could be the number of rows or literally the size of the chunk when processing an input file. The manager-process accumulates the size before calling the code block associated with the \f(CW\*(C`progress\*(C'\fR option. .PP When running many tasks simultaneously, via \f(CW\*(C`user_tasks\*(C'\fR, the call is initiated by workers at level 0 only or rather the first task, not shown here. .PP .Vb 2 \& use Time::HiRes \*(Aqsleep\*(Aq; \& use MCE; \& \& sub make_progress { \& my ($total_size) = @_; \& return sub { \& my ($completed_size) = @_; \& printf "%0.1f%%\en", $completed_size / $total_size * 100; \& }; \& } \& \& my @input = (1..150); \& \& MCE\->new( \& chunk_size => 10, \& max_workers => 4, \& input_data => \e@input, \& progress => make_progress( scalar @input ), \& user_func => sub { sleep 1.5 } \& )\->run(); \& \& \-\- Output \& \& 6.7% \& 13.3% \& 20.0% \& 26.7% \& 33.3% \& 40.0% \& 46.7% \& 53.3% \& 60.0% \& 66.7% \& 73.3% \& 80.0% \& 86.7% \& 93.3% \& 100.0% .Ve .PP Next is the code using MCE::Flow and ProgressBar::Stack to do the same thing, practically. .PP .Vb 3 \& use Time::HiRes \*(Aqsleep\*(Aq; \& use ProgressBar::Stack; \& use MCE::Flow; \& \& sub make_progress { \& my ($total_size) = @_; \& init_progress(); \& return sub { \& my ($completed_size) = @_; \& update_progress sprintf("%0.1f", $completed_size / $total_size * 100); \& }; \& } \& \& my @input = (1..150); \& \& MCE::Flow\->init( \& chunk_size => 10, \& max_workers => 4, \& progress => make_progress( scalar @input ) \& ); \& \& MCE::Flow\->run( sub { sleep 1.5 }, \e@input ); \& MCE::Flow\->finish(); \& \& print "\en"; \& \& \-\- Output \& \& [################ ] 80.0% ETA: 0:01 .Ve .PP For sequence of numbers, using the \f(CW\*(C`sequence\*(C'\fR option, one must account for \&\f(CW\*(C`step_size\*(C'\fR, typically set to \f(CW1\fR automatically. .PP .Vb 2 \& use Time::HiRes \*(Aqsleep\*(Aq; \& use MCE; \& \& sub make_progress { \& my ($total_size) = @_; \& return sub { \& my ($completed_size) = @_; \& printf "%0.1f%%\en", $completed_size / $total_size * 100; \& }; \& } \& \& MCE\->new( \& chunk_size => 10, \& max_workers => 4, \& sequence => [ 1, 100, 2 ], \& progress => make_progress( int( 100 / 2 + 0.5 ) ), \& user_func => sub { sleep 1.5 } \& )\->run(); \& \& \-\- Output \& \& 20.0% \& 40.0% \& 60.0% \& 80.0% \& 100.0% .Ve .PP Changing \f(CW\*(C`chunk_size\*(C'\fR to \f(CW1\fR means workers notify the manager process more often, thus increasing granularity. Take a look at the output. .PP .Vb 11 \& 2.0% \& 4.0% \& 6.0% \& 8.0% \& 10.0% \& ... \& 92.0% \& 94.0% \& 96.0% \& 98.0% \& 100.0% .Ve .PP Here is the same thing using MCE::Flow together with ProgressBar::Stack. .PP .Vb 3 \& use Time::HiRes \*(Aqsleep\*(Aq; \& use ProgressBar::Stack; \& use MCE::Flow; \& \& sub make_progress { \& my ($total_size) = @_; \& init_progress(); \& return sub { \& my ($completed_size) = @_; \& update_progress sprintf("%0.1f", $completed_size / $total_size * 100); \& }; \& } \& \& MCE::Flow\->init( \& chunk_size => 1, \& max_workers => 4, \& progress => make_progress( int( 100 / 2 + 0.5 ) ) \& ); \& \& MCE::Flow\->run_seq( sub { sleep 0.5 }, 1, 100, 2 ); \& MCE::Flow\->finish(); \& \& print "\en"; \& \& \-\- Output \& \& [######### ] 48.0% ETA: 0:03 .Ve .PP For files and file handles, workers send the actual size of the data read versus counting rows. .PP .Vb 2 \& use Time::HiRes \*(Aqsleep\*(Aq; \& use MCE; \& \& sub make_progress { \& my ($total_size) = @_; \& return sub { \& my ($completed_size) = @_; \& printf "%0.1f%%\en", $completed_size / $total_size * 100; \& }; \& } \& \& my $input_file = "/path/to/input_file.txt"; \& \& MCE\->new( \& chunk_size => 5, \& max_workers => 4, \& input_data => $input_file, \& progress => make_progress( \-s $input_file ), \& user_func => sub { sleep 0.03 } \& )\->run(); .Ve .PP For consistency, here is the example using MCE::Flow, again with ProgressBar::Stack. .PP .Vb 3 \& use Time::HiRes \*(Aqsleep\*(Aq; \& use ProgressBar::Stack; \& use MCE::Flow; \& \& sub make_progress { \& my ($total_size) = @_; \& init_progress(); \& return sub { \& my ($completed_size) = @_; \& update_progress sprintf("%0.1f", $completed_size / $total_size * 100); \& }; \& } \& \& my $input_file = "/path/to/input_file.txt"; \& \& MCE::Flow\->init( \& chunk_size => 5, \& max_workers => 4, \& progress => make_progress( \-s $input_file ) \& ); \& \& MCE::Flow\->run_file( sub { sleep 0.03 }, $input_file ); \& MCE::Flow\->finish(); .Ve .PP The next demonstration processes three arrays consecutively. For this one, \s-1MCE\s0 workers persist after running. This needs \s-1MCE 1.814\s0 or later to run. Otherwise, the progress output is not shown in \s-1MCE 1.813.\s0 .PP .Vb 3 \& use Time::HiRes \*(Aqsleep\*(Aq; \& use ProgressBar::Stack; \& use MCE; \& \& sub make_progress { \& my ($total_size, $message) = @_; \& init_progress(); \& return sub { \& my ($completed_size) = @_; \& update_progress( \& sprintf("%0.1f", $completed_size / $total_size * 100), \& $message \& ); \& }; \& } \& \& my $mce = MCE\->new( \& chunk_size => 10, \& max_workers => 4, \& user_func => sub { sleep 0.5 } \& )\->spawn(); \& \& my @a1 = ( 1 .. 200 ); \& my @a2 = ( 1 .. 500 ); \& my @a3 = ( 1 .. 300 ); \& \& $mce\->process({ progress => make_progress(scalar(@a1), "array 1") }, \e@a1); \& \& print "\en"; \& \& $mce\->process({ progress => make_progress(scalar(@a2), "array 2") }, \e@a2); \& \& print "\en"; \& \& $mce\->process({ progress => make_progress(scalar(@a3), "array 3") }, \e@a3); \& \& print "\en"; \& \& $mce\->shutdown; \& \& \-\- Output \& \& [####################] 100.0% ETA: 0:00 array 1 \& [####################] 100.0% ETA: 0:00 array 2 \& [####################] 100.0% ETA: 0:00 array 3 .Ve .PP When size is not know, such as reading from \f(CW\*(C`STDIN\*(C'\fR, the only thing one can do is report the size completed thus far. .PP .Vb 1 \& # 1 kibibyte equals 1024 bytes \& \& progress => sub { \& my ($completed_size) = @_; \& printf "%0.1f kibibytes\en", $completed_size / 1024; \& } .Ve .SH "SEE ALSO" .IX Header "SEE ALSO" .IP "\(bu" 3 MCE::Examples .SH "INDEX" .IX Header "INDEX" \&\s-1MCE\s0 .SH "AUTHOR" .IX Header "AUTHOR" Mario E. Roy,