.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.28) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' . ds C` . ds C' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is turned on, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .\" .\" Avoid warning from groff about undefined register 'F'. .de IX .. .nr rF 0 .if \n(.g .if rF .nr rF 1 .if (\n(rF:(\n(.g==0)) \{ . if \nF \{ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . if !\nF==2 \{ . nr % 0 . nr F 2 . \} . \} .\} .rr rF .\" .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). .\" Fear. Run. Save yourself. No user-serviceable parts. . \" fudge factors for nroff and troff .if n \{\ . ds #H 0 . ds #V .8m . ds #F .3m . ds #[ \f1 . ds #] \fP .\} .if t \{\ . ds #H ((1u-(\\\\n(.fu%2u))*.13m) . ds #V .6m . ds #F 0 . ds #[ \& . ds #] \& .\} . \" simple accents for nroff and troff .if n \{\ . ds ' \& . ds ` \& . ds ^ \& . ds , \& . ds ~ ~ . ds / .\} .if t \{\ . ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" . ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' . ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' . ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' . ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' . ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' .\} . \" troff and (daisy-wheel) nroff accents .ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' .ds 8 \h'\*(#H'\(*b\h'-\*(#H' .ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] .ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' .ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' .ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] .ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] .ds ae a\h'-(\w'a'u*4/10)'e .ds Ae A\h'-(\w'A'u*4/10)'E . \" corrections for vroff .if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' .if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' . \" for low resolution devices (crt and lpr) .if \n(.H>23 .if \n(.V>19 \ \{\ . ds : e . ds 8 ss . ds o a . ds d- d\h'-1'\(ga . ds D- D\h'-1'\(hy . ds th \o'bp' . ds Th \o'LP' . ds ae ae . ds Ae AE .\} .rm #[ #] #H #V #F C .\" ======================================================================== .\" .IX Title "MCE::Flow 3pm" .TH MCE::Flow 3pm "2014-10-24" "perl v5.20.1" "User Contributed Perl Documentation" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" MCE::Flow \- Parallel flow model for building creative applications .SH "VERSION" .IX Header "VERSION" This document describes MCE::Flow version 1.517 .SH "DESCRIPTION" .IX Header "DESCRIPTION" MCE::Flow is great for writing custom apps to maximize on all available cores. This module was created to help one harness user_tasks within \s-1MCE.\s0 .PP It's trivial to parallelize with mce_stream as shown below. .PP .Vb 2 \& ## Native map function \& my @a = map { $_ * 4 } map { $_ * 3 } map { $_ * 2 } 1..10000; \& \& ## Same as with MCE::Stream (processing from right to left) \& @a = mce_stream \& sub { $_ * 4 }, sub { $_ * 3 }, sub { $_ * 2 }, 1..10000; \& \& ## Pass an array reference to have writes occur simultaneously \& mce_stream \e@a, \& sub { $_ * 4 }, sub { $_ * 3 }, sub { $_ * 2 }, 1..10000; .Ve .PP However, let's have MCE::Flow compute the same in parallel. MCE::Queue will be used for data flow among the sub-tasks. Also, take a look at MCE::Step for transparent use of MCE::Queue. .PP .Vb 2 \& use MCE::Flow; \& use MCE::Queue; .Ve .PP This calls for preserving output order. Remember to set \f(CW$_order_id\fR to 1 before running. .PP .Vb 1 \& my ($_gather_ref, $_order_id, %_tmp); \& \& sub _preserve_order { \& \& $_tmp{$_[1]} = $_[0]; \& \& while (1) { \& last unless exists $_tmp{$_order_id}; \& push @{ $_gather_ref }, @{ $_tmp{$_order_id} }; \& delete $_tmp{$_order_id++}; \& } \& \& return; \& } .Ve .PP Two queues are needed for data flow between the 3 sub-tasks. Notice task_end and how the value from \f(CW$task_name\fR is used for determining which task has ended. .PP .Vb 2 \& my $b = MCE::Queue\->new; \& my $c = MCE::Queue\->new; \& \& sub task_end { \& my ($mce, $task_id, $task_name) = @_; \& \& if (defined $mce\->{user_tasks}\->[$task_id + 1]) { \& my $N_workers = $mce\->{user_tasks}\->[$task_id + 1]\->{max_workers}; \& \& if ($task_name eq \*(Aqa\*(Aq) { \& $b\->enqueue((undef) x $N_workers); \& } \& elsif ($task_name eq \*(Aqb\*(Aq) { \& $c\->enqueue((undef) x $N_workers); \& } \& } \& \& return; \& } .Ve .PP Next are the 3 sub-tasks. The first one reads input and begins the flow. The 2nd task dequeues, performs the calculation, and enqueues into the next. Finally, the last task calls the gather method. Gather can be called as often as needed. .PP Although serialization is done for you automatically, it's done here to save from double serialization. This is the fastest approach for passing data between sub-tasks, when wanting to run with the least overhead. .PP .Vb 2 \& sub task_a { \& my @ans; my ($mce, $chunk_ref, $chunk_id) = @_; \& \& push @ans, map { $_ * 2 } @{ $chunk_ref }; \& $b\->enqueue(MCE\->freeze([ \e@ans, $chunk_id ])); \& } \& \& sub task_b { \& my ($mce) = @_; \& \& while (1) { \& my $chunk = $b\->dequeue; last unless defined $chunk; \& my @ans; $chunk = MCE\->thaw($chunk); \& \& push @ans, map { $_ * 3 } @{ $chunk\->[0] }; \& $c\->enqueue(MCE\->freeze([ \e@ans, $chunk\->[1] ])); \& } \& \& return; \& } \& \& sub task_c { \& my ($mce) = @_; \& \& while (1) { \& my $chunk = $c\->dequeue; last unless defined $chunk; \& my @ans; $chunk = MCE\->thaw($chunk); \& \& push @ans, map { $_ * 4 } @{ $chunk\->[0] }; \& MCE\->gather(\e@ans, $chunk\->[1]); \& } \& \& return; \& } .Ve .PP In summary, MCE::Flow builds out a \s-1MCE\s0 instance behind the scene and starts running. Both task_name (shown below) and max_workers can take an anonymous array for specifying the values individually for each sub-task. .PP .Vb 1 \& my @a; $_gather_ref = \e@a; $_order_id = 1; \& \& mce_flow { \& gather => \e&_preserve_order, task_name => [ \*(Aqa\*(Aq, \*(Aqb\*(Aq, \*(Aqc\*(Aq ], \& task_end => \e&task_end \& \& }, \e&task_a, \e&task_b, \e&task_c, 1..10000; \& \& print "@a\en"; .Ve .PP If speed is not a concern and wanting to rid of all the \s-1MCE\-\s0>freeze and \&\s-1MCE\-\s0>thaw statements, simply enqueue and dequeue 2 items at a time. Or better yet, see MCE::Step introduced in \s-1MCE 1.506.\s0 .PP .Vb 1 \& $b\->enqueue(\e@ans, $chunk_id) \& \& ... \& \& my ($chunk_ref, $chunk_id) = $b\->dequeue(2); \& last unless defined $chunk_ref; \& \& ... .Ve .PP The task_end must be updated as well due to workers dequeuing 2 items at a time. Therefore, we must double the number of undefs into the queue. .PP .Vb 6 \& if ($task_name eq \*(Aqa\*(Aq) { \& $b\->enqueue((undef) x ($N_workers * 2)); \& } \& elsif ($task_name eq \*(Aqb\*(Aq) { \& $c\->enqueue((undef) x ($N_workers * 2)); \& } .Ve .SH "SYNOPSIS when CHUNK_SIZE EQUALS 1" .IX Header "SYNOPSIS when CHUNK_SIZE EQUALS 1" Although MCE::Loop may be preferred for running using a single code block, the text below also applies to this module, particularly for the first block. .PP All models in \s-1MCE\s0 default to 'auto' for chunk_size. The arguments for the block are the same as writing a user_func block for the core \s-1API.\s0 .PP Beginning with \s-1MCE 1.5,\s0 the next input item is placed into the input scalar variable \f(CW$_\fR when chunk_size equals 1. Otherwise, \f(CW$_\fR points to \f(CW$chunk_ref\fR containing many items. Basically, line 2 below may be omitted from your code when using \f(CW$_\fR. One can call \s-1MCE\-\s0>chunk_id to obtain the current chunk id. .PP .Vb 9 \& line 1: user_func => sub { \& line 2: my ($mce, $chunk_ref, $chunk_id) = @_; \& line 3: \& line 4: $_ points to $chunk_ref\->[0] \& line 5: in MCE 1.5 when chunk_size == 1 \& line 6: \& line 7: $_ points to $chunk_ref \& line 8: in MCE 1.5 when chunk_size > 1 \& line 9: } .Ve .PP Follow this synopsis when chunk_size equals one. Looping is not required from inside the first block. Hence, the block is called once per each item. .PP .Vb 2 \& ## Exports mce_flow, mce_flow_f, and mce_flow_s \& use MCE::Flow; \& \& MCE::Flow::init { \& chunk_size => 1 \& }; \& \& ## Array or array_ref \& mce_flow sub { do_work($_) }, 1..10000; \& mce_flow sub { do_work($_) }, [ 1..10000 ]; \& \& ## File_path, glob_ref, or scalar_ref \& mce_flow_f sub { chomp; do_work($_) }, "/path/to/file"; \& mce_flow_f sub { chomp; do_work($_) }, $file_handle; \& mce_flow_f sub { chomp; do_work($_) }, \e$scalar; \& \& ## Sequence of numbers (begin, end [, step, format]) \& mce_flow_s sub { do_work($_) }, 1, 10000, 5; \& mce_flow_s sub { do_work($_) }, [ 1, 10000, 5 ]; \& \& mce_flow_s sub { do_work($_) }, { \& begin => 1, end => 10000, step => 5, format => undef \& }; .Ve .SH "SYNOPSIS when CHUNK_SIZE is GREATER THAN 1" .IX Header "SYNOPSIS when CHUNK_SIZE is GREATER THAN 1" Follow this synopsis when chunk_size equals 'auto' or is greater than 1. This means having to loop through the chunk from inside the first block. .PP .Vb 1 \& use MCE::Flow; \& \& MCE::Flow::init { ## Chunk_size defaults to \*(Aqauto\*(Aq when \& chunk_size => \*(Aqauto\*(Aq ## not specified. Therefore, the init \& }; ## function may be omitted. \& \& ## Syntax is shown for mce_flow for demonstration purposes. \& ## Looping inside the block is the same for mce_flow_f and \& ## mce_flow_s. \& \& mce_flow sub { do_work($_) for (@{ $_ }) }, 1..10000; \& \& ## Same as above, resembles code using the core API. \& \& mce_flow sub { \& my ($mce, $chunk_ref, $chunk_id) = @_; \& \& for (@{ $chunk_ref }) { \& do_work($_); \& } \& \& }, 1..10000; .Ve .PP Chunking reduces the number of \s-1IPC\s0 calls behind the scene. Think in terms of chunks whenever processing a large amount of data. For relatively small data, choosing 1 for chunk_size is fine. .SH "OVERRIDING DEFAULTS" .IX Header "OVERRIDING DEFAULTS" The following list 5 options which may be overridden when loading the module. .PP .Vb 1 \& use Sereal qw(encode_sereal decode_sereal); \& \& use MCE::Flow \& max_workers => 8, ## Default \*(Aqauto\*(Aq \& chunk_size => 500, ## Default \*(Aqauto\*(Aq \& tmp_dir => "/path/to/app/tmp", ## $MCE::Signal::tmp_dir \& freeze => \e&encode_sereal, ## \e&Storable::freeze \& thaw => \e&decode_sereal ## \e&Storable::thaw \& ; .Ve .PP There is a simpler way to enable Sereal with \s-1MCE 1.5.\s0 The following will attempt to use Sereal if available, otherwise will default back to using Storable for serialization. .PP .Vb 1 \& use MCE::Flow Sereal => 1; \& \& MCE::Flow::init { \& chunk_size => 1 \& }; \& \& ## Serialization is through Sereal if available. \& my %answer = mce_flow sub { MCE\->gather( $_, sqrt $_ ) }, 1..10000; .Ve .SH "CUSTOMIZING MCE" .IX Header "CUSTOMIZING MCE" .IP "init" 3 .IX Item "init" The init function accepts a hash of \s-1MCE\s0 options. Unlike with MCE::Stream, both the gather and bounds_only options may be specified when calling init (not shown below). .Sp .Vb 1 \& use MCE::Flow; \& \& MCE::Flow::init { \& chunk_size => 1, max_workers => 4, \& \& user_begin => sub { \& print "## ", MCE\->wid, " started\en"; \& }, \& \& user_end => sub { \& print "## ", MCE\->wid, " completed\en"; \& } \& }; \& \& my %a = mce_flow sub { MCE\->gather($_, $_ * $_) }, 1..100; \& \& print "\en", "@a{1..100}", "\en"; \& \& \-\- Output \& \& ## 1 started \& ## 2 started \& ## 3 started \& ## 4 started \& ## 4 completed \& ## 1 completed \& ## 2 completed \& ## 3 completed \& \& 1 4 9 16 25 36 49 64 81 100 121 144 169 196 225 256 289 324 361 \& 400 441 484 529 576 625 676 729 784 841 900 961 1024 1089 1156 \& 1225 1296 1369 1444 1521 1600 1681 1764 1849 1936 2025 2116 2209 \& 2304 2401 2500 2601 2704 2809 2916 3025 3136 3249 3364 3481 3600 \& 3721 3844 3969 4096 4225 4356 4489 4624 4761 4900 5041 5184 5329 \& 5476 5625 5776 5929 6084 6241 6400 6561 6724 6889 7056 7225 7396 \& 7569 7744 7921 8100 8281 8464 8649 8836 9025 9216 9409 9604 9801 \& 10000 .Ve .PP Like with MCE::Flow::init above, \s-1MCE\s0 options may be specified using an anonymous hash for the first argument. Notice how both max_workers and task_name can take an anonymous array for setting values individually for each code block. .PP Unlike MCE::Stream which processes from right-to-left, MCE::Flow begins with the first code block, thus processing from left-to-right. .PP .Vb 1 \& use MCE::Flow; \& \& my @a = mce_flow { \& max_workers => [ 3, 4, 2, ], task_name => [ \*(Aqa\*(Aq, \*(Aqb\*(Aq, \*(Aqc\*(Aq ], \& \& user_end => sub { \& my ($task_id, $task_name) = (MCE\->task_id, MCE\->task_name); \& MCE\->print("$task_id \- $task_name completed\en"); \& }, \& \& task_end => sub { \& my ($mce, $task_id, $task_name) = @_; \& MCE\->print("$task_id \- $task_name ended\en"); \& } \& }, \& sub { sleep 1; }, ## 3 workers, named a \& sub { sleep 2; }, ## 4 workers, named b \& sub { sleep 3; }; ## 2 workers, named c \& \& \-\- Output \& \& 0 \- a completed \& 0 \- a completed \& 0 \- a completed \& 0 \- a ended \& 1 \- b completed \& 1 \- b completed \& 1 \- b completed \& 1 \- b completed \& 1 \- b ended \& 2 \- c completed \& 2 \- c completed \& 2 \- c ended .Ve .SH "API DOCUMENTATION" .IX Header "API DOCUMENTATION" Although input data is optional for MCE::Flow, the following assumes chunk_size equals 1 in order to demonstrate all the possibilities of passing input data. .IP "mce_flow sub { code }, list" 3 .IX Item "mce_flow sub { code }, list" Input data can be defined using a list or passing a reference to an array. .Sp .Vb 2 \& mce_flow sub { $_ }, 1..1000; \& mce_flow sub { $_ }, [ 1..1000 ]; .Ve .IP "mce_flow_f sub { code }, file" 3 .IX Item "mce_flow_f sub { code }, file" The fastest of these is the /path/to/file. Workers communicate the next offset position among themselves without any interaction from the manager process. .Sp .Vb 3 \& mce_flow_f sub { $_ }, "/path/to/file"; \& mce_flow_f sub { $_ }, $file_handle; \& mce_flow_f sub { $_ }, \e$scalar; .Ve .IP "mce_flow_s sub { code }, sequence" 3 .IX Item "mce_flow_s sub { code }, sequence" Sequence can be defined as a list, an array reference, or a hash reference. The functions require both begin and end values to run. Step and format are optional. The format is passed to sprintf (% may be omitted below). .Sp .Vb 1 \& my ($beg, $end, $step, $fmt) = (10, 20, 0.1, "%4.1f"); \& \& mce_flow_s sub { $_ }, $beg, $end, $step, $fmt; \& mce_flow_s sub { $_ }, [ $beg, $end, $step, $fmt ]; \& \& mce_flow_s sub { $_ }, { \& begin => $beg, end => $end, step => $step, format => $fmt \& }; .Ve .IP "mce_flow { input_data => iterator }, sub { code }" 3 .IX Item "mce_flow { input_data => iterator }, sub { code }" An iterator reference can by specified for input data. Notice the anonymous hash as the first argument to mce_flow. The only other way is to specify input_data via MCE::Flow::init. This prevents MCE::Flow from configuring the iterator reference as another user task which will not work. .Sp Iterators are described under \*(L"\s-1SYNTAX\s0 for \s-1INPUT_DATA\*(R"\s0 at MCE::Core. .Sp .Vb 3 \& MCE::Flow::init { \& input_data => iterator \& }; \& \& mce_flow sub { $_ }; .Ve .PP The sequence engine can compute the 'begin' and 'end' items only, for the chunk, leaving out the items in between with the bounds_only option (boundaries only). This option applies to sequence and has no effect when chunk_size equals 1. .PP The time to run for \s-1MCE\s0 below is 0.006s. This becomes 0.827s without the bounds_only option due to computing all items in between as well, thus creating a very large array. Basically, specify bounds_only => 1 when boundaries is all you need for looping inside the block; e.g Monte Carlo simulations. Time was measured using 1 worker to emphasize the difference. .PP .Vb 1 \& use MCE::Flow; \& \& MCE::Flow::init { \& max_workers => 1, \& # chunk_size => \*(Aqauto\*(Aq, ## btw, \*(Aqauto\*(Aq will never drop below 2 \& chunk_size => 1_250_000, \& bounds_only => 1 \& }; \& \& ## For sequence, the input scalar $_ points to $chunk_ref \& ## when chunk_size > 1, otherwise equals $chunk_ref\->[0]. \& ## \& ## mce_flow_s sub { \& ## my $begin = $_\->[0]; my $end = $_\->[\-1]; \& ## \& ## for ($begin .. $end) { \& ## ... have fun with MCE ... \& ## } \& ## \& ## }, 1, 10_000_000; \& \& mce_flow_s sub { \& my ($mce, $chunk_ref, $chunk_id) = @_; \& \& ## $chunk_ref contains just 2 items, not 1_250_000 \& \& my $begin = $chunk_ref\->[ 0]; \& my $end = $chunk_ref\->[\-1]; ## or $chunk_ref\->[1] \& \& MCE\->printf("%7d .. %8d\en", $begin, $end); \& \& }, 1, 10_000_000; \& \& \-\- Output \& \& 1 .. 1250000 \& 1250001 .. 2500000 \& 2500001 .. 3750000 \& 3750001 .. 5000000 \& 5000001 .. 6250000 \& 6250001 .. 7500000 \& 7500001 .. 8750000 \& 8750001 .. 10000000 .Ve .SH "GATHERING DATA" .IX Header "GATHERING DATA" Unlike MCE::Map where gather and output order are done for you automatically, the gather method is used to have results sent back to the manager process. .PP .Vb 1 \& use MCE::Flow chunk_size => 1; \& \& ## Output order is not guaranteed. \& my @a = mce_flow sub { MCE\->gather($_ * 2) }, 1..100; \& print "@a\en\en"; \& \& ## However, one can store to a hash by gathering 2 items per \& ## each gather call (key, value). \& my %h1 = mce_flow sub { MCE\->gather($_, $_ * 2) }, 1..100; \& print "@h1{1..100}\en\en"; \& \& ## This does the same thing due to chunk_id starting at one. \& my %h2 = mce_flow sub { MCE\->gather(MCE\->chunk_id, $_ * 2) }, 1..100; \& print "@h2{1..100}\en\en"; .Ve .PP The gather method can be called multiple times within the block unlike return which would leave the block. Therefore, think of gather as yielding results immediately to the manager process without actually leaving the block. .PP .Vb 1 \& use MCE::Flow chunk_size => 1, max_workers => 3; \& \& my @hosts = qw( \& hosta hostb hostc hostd hoste \& ); \& \& my %h3 = mce_flow sub { \& my ($output, $error, $status); my $host = $_; \& \& ## Do something with $host; \& $output = "Worker ". MCE\->wid .": Hello from $host"; \& \& if (MCE\->chunk_id % 3 == 0) { \& ## Simulating an error condition \& local $? = 1; $status = $?; \& $error = "Error from $host" \& } \& else { \& $status = 0; \& } \& \& ## Ensure unique keys (key, value) when gathering to \& ## a hash. \& MCE\->gather("$host.out", $output); \& MCE\->gather("$host.err", $error) if (defined $error); \& MCE\->gather("$host.sta", $status); \& \& }, @hosts; \& \& foreach my $host (@hosts) { \& print $h3{"$host.out"}, "\en"; \& print $h3{"$host.err"}, "\en" if (exists $h3{"$host.err"}); \& print "Exit status: ", $h3{"$host.sta"}, "\en\en"; \& } \& \& \-\- Output \& \& Worker 3: Hello from hosta \& Exit status: 0 \& \& Worker 2: Hello from hostb \& Exit status: 0 \& \& Worker 1: Hello from hostc \& Error from hostc \& Exit status: 1 \& \& Worker 3: Hello from hostd \& Exit status: 0 \& \& Worker 2: Hello from hoste \& Exit status: 0 .Ve .PP The following uses an anonymous array containing 3 elements when gathering data. Serialization is automatic behind the scene. .PP .Vb 1 \& my %h3 = mce_flow sub { \& \& ... \& \& MCE\->gather($host, [$output, $error, $status]); \& \& }, @hosts; \& \& foreach my $host (@hosts) { \& print $h3{$host}\->[0], "\en"; \& print $h3{$host}\->[1], "\en" if (defined $h3{$host}\->[1]); \& print "Exit status: ", $h3{$host}\->[2], "\en\en"; \& } .Ve .PP Perhaps you want more control with gather such as appending to an array while retaining output order. Although MCE::Map comes to mind, some folks want \*(L"full\*(R" control. And here we go... but this time around in chunking style... :) .PP The two options passed to MCE::Flow are optional as they default to 'auto'. The beauty of chunking data is that \s-1IPC\s0 occurs once per chunk versus once per item. Although \s-1IPC\s0 is quite fast, chunking becomes beneficial the larger the data becomes. Hence, the reason for the demonstration below. .PP .Vb 1 \& use MCE::Flow chunk_size => \*(Aqauto\*(Aq, max_workers => \*(Aqauto\*(Aq; \& \& my (%_tmp, $_gather_ref, $_order_id); \& \& sub preserve_order { \& $_tmp{ (shift) } = \e@_; \& \& while (1) { \& last unless exists $_tmp{$_order_id}; \& push @{ $_gather_ref }, @{ $_tmp{$_order_id} }; \& delete $_tmp{$_order_id++}; \& } \& \& return; \& } \& \& ## Workers persist after running. Therefore, not recommended to \& ## use a closure for gather unless calling MCE::Flow::init each \& ## time inside the loop. Use this demonstration when wanting \& ## MCE::Flow to maintain output order. \& \& MCE::Flow::init { gather => \e&preserve_order }; \& \& for (1..2) { \& my @m2; \& \& ## Remember to set $_order_id back to 1 prior to running. \& $_gather_ref = \e@m2; $_order_id = 1; \& \& mce_flow sub { \& my @a; my ($mce, $chunk_ref, $chunk_id) = @_; \& \& ## Compute the entire chunk data at once. \& push @a, map { $_ * 2 } @{ $chunk_ref }; \& \& ## Afterwards, invoke the gather feature, which \& ## will direct the data to the callback function. \& MCE\->gather(MCE\->chunk_id, @a); \& \& }, 1..100000; \& \& print scalar @m2, "\en"; \& } .Ve .PP All 6 models support 'auto' for chunk_size whereas the core \s-1API\s0 doesn't. Think of the models as the basis for providing \s-1JIT\s0 for \s-1MCE.\s0 They create the instance and tune max_workers plus chunk_size automatically irregardless of the hardware being run on. .PP The following does the same thing using the core \s-1API.\s0 .PP .Vb 1 \& use MCE; \& \& ... \& \& my $mce = MCE\->new( \& max_workers => \*(Aqauto\*(Aq, chunk_size => 8000, \& gather => \e&preserve_order, \& \& user_func => sub { \& my @a; my ($mce, $chunk_ref, $chunk_id) = @_; \& \& ## Compute the entire chunk data at once. \& push @a, map { $_ * 2 } @{ $chunk_ref }; \& \& ## Afterwards, invoke the gather feature, which \& ## will direct the data to the callback function. \& MCE\->gather(MCE\->chunk_id, @a); \& } \& ); \& \& $mce\->process([1..100000]); \& \& ... .Ve .SH "MANUAL SHUTDOWN" .IX Header "MANUAL SHUTDOWN" .IP "finish" 3 .IX Item "finish" \&\s-1MCE\s0 workers remain persistent as much as possible after running. Shutdown occurs when the script exits. One can manually shutdown \s-1MCE\s0 by simply calling finish after running. This resets the \s-1MCE\s0 instance. .Sp .Vb 1 \& use MCE::Flow; \& \& MCE::Flow::init { \& chunk_size => 20, max_workers => \*(Aqauto\*(Aq \& }; \& \& mce_flow sub { ... }, 1..100; \& \& MCE::Flow::finish; .Ve .SH "INDEX" .IX Header "INDEX" \&\s-1MCE\s0 .SH "AUTHOR" .IX Header "AUTHOR" Mario E. Roy, .SH "LICENSE" .IX Header "LICENSE" This program is free software; you can redistribute it and/or modify it under the terms of either: the \s-1GNU\s0 General Public License as published by the Free Software Foundation; or the Artistic License. .PP See for more information.