.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.28) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' . ds C` . ds C' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is turned on, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .\" .\" Avoid warning from groff about undefined register 'F'. .de IX .. .nr rF 0 .if \n(.g .if rF .nr rF 1 .if (\n(rF:(\n(.g==0)) \{ . if \nF \{ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . if !\nF==2 \{ . nr % 0 . nr F 2 . \} . \} .\} .rr rF .\" .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). .\" Fear. Run. Save yourself. No user-serviceable parts. . \" fudge factors for nroff and troff .if n \{\ . ds #H 0 . ds #V .8m . ds #F .3m . ds #[ \f1 . ds #] \fP .\} .if t \{\ . ds #H ((1u-(\\\\n(.fu%2u))*.13m) . ds #V .6m . ds #F 0 . ds #[ \& . ds #] \& .\} . \" simple accents for nroff and troff .if n \{\ . ds ' \& . ds ` \& . ds ^ \& . ds , \& . ds ~ ~ . ds / .\} .if t \{\ . ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" . ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' . ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' . ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' . ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' . ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' .\} . \" troff and (daisy-wheel) nroff accents .ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' .ds 8 \h'\*(#H'\(*b\h'-\*(#H' .ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] .ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' .ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' .ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] .ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] .ds ae a\h'-(\w'a'u*4/10)'e .ds Ae A\h'-(\w'A'u*4/10)'E . \" corrections for vroff .if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' .if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' . \" for low resolution devices (crt and lpr) .if \n(.H>23 .if \n(.V>19 \ \{\ . ds : e . ds 8 ss . ds o a . ds d- d\h'-1'\(ga . ds D- D\h'-1'\(hy . ds th \o'bp' . ds Th \o'LP' . ds ae ae . ds Ae AE .\} .rm #[ #] #H #V #F C .\" ======================================================================== .\" .IX Title "VM::EC2::Staging::Manager 3pm" .TH VM::EC2::Staging::Manager 3pm "2014-10-24" "perl v5.20.1" "User Contributed Perl Documentation" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" VM::EC2::Staging::Manager \- Automate VMs and volumes for moving data in and out of cloud. .SH "SYNOPSIS" .IX Header "SYNOPSIS" .Vb 1 \& use VM::EC2::Staging::Manager; \& \& my $ec2 = VM::EC2\->new(\-region=>\*(Aqus\-east\-1\*(Aq); \& my $staging = $ec2\->staging_manager(\-on_exit => \*(Aqstop\*(Aq, # default, stop servers when process exists \& \-verbose => 1, # default, verbose progress messages \& \-scan => 1, # default, scan region for existing staging servers and volumes \& \-image_name => \*(Aqubuntu\-precise\-12.04\*(Aq, # default server image \& \-user_name => \*(Aqubuntu\*(Aq, # default server login name \& ); \& \& # Assuming an EBS image named ami\-12345 is located in the US, copy it into \& # the South American region, returning the AMI ID in South America \& my $new_image = $staging\->copy_image(\*(Aqami\-12345\*(Aq,\*(Aqsa\-east\-1\*(Aq); \& \& # provision a new server, using defaults. Name will be assigned automatically \& my $server = $staging\->provision_server(\-availability_zone => \*(Aqus\-east\-1a\*(Aq); \& \& # retrieve a new server named "my_server", if one exists. If not, it creates one \& # using the specified options \& my $server = $staging\->get_server(\-name => \*(Aqmy_server\*(Aq, \& \-availability_zone => \*(Aqus\-east\-1a\*(Aq, \& \-instance_type => \*(Aqt1.micro\*(Aq); \& \& # open up an ssh session in an xterm \& $server\->shell; \& \& # run a command over ssh on the server. See VM::EC2::Staging::Server \& $server\->ssh(\*(Aqwhoami\*(Aq); \& \& # run a command over ssh on the server, returning the result as an array of lines or a \& # scalar string, similar to backticks (\`\`) \& my @password_lines = $server\->scmd(\*(Aqcat /etc/passwd\*(Aq); \& \& # run a command on the server and read from it using a filehandle \& my $fh = $server\->scmd_read(\*(Aqls \-R /usr/lib\*(Aq); \& while (<$fh>) { # do something } \& \& # run a command on the server and write to it using a filehandle \& my $fh = $server\->scmd_write(\*(Aqsudo \-s "cat >>/etc/fstab"\*(Aq); \& print $fh "/dev/sdf3 /mnt/demo ext3 0 2\en"; \& close $fh; \& \& # Provision a new volume named "Pictures". Will automatically be mounted to a staging server in \& # the specified zone. Server will be created if needed. \& my $volume = $staging\->provision_volume(\-name => \*(AqPictures\*(Aq, \& \-fstype => \*(Aqext4\*(Aq, \& \-availability_zone => \*(Aqus\-east\-1a\*(Aq, \& \-size => 2) or die $staging\->error_str; \& \& # gets an existing volume named "Pictures" if it exists. Otherwise provisions a new volume; \& my $volume = $staging\->get_volume(\-name => \*(AqPictures\*(Aq, \& \-fstype => \*(Aqext4\*(Aq, \& \-availability_zone => \*(Aqus\-east\-1a\*(Aq, \& \-size => 2) or die $staging\->error_str; \& \& # copy contents of local directory /opt/test to remote volume $volume using rsync \& # See VM::EC2::Staging::Volume \& $volume\->put(\*(Aq/opt/test/\*(Aq); \& \& # same thing, but first creating a subdirectory on the remote volume \& $volume\->put(\*(Aq/opt/test/\*(Aq => \*(Aq./mirrors/\*(Aq); \& \& # copy contents of remote volume $volume to local directory /tmp/test using rsync \& $volume\->get(\*(Aq/tmp/test\*(Aq); \& \& # same thing, but from a subdirectory of the remote volume \& $volume\->get(\*(Aq./mirrors/\*(Aq => \*(Aq/tmp/test\*(Aq); \& \& # server to server transfer (works both within and between availability regions) \& my $south_america = VM::EC2\->new(\-region=>\*(Aqsa\-east\-1\*(Aq)\->staging_manager; # create a staging manager in Sao Paolo \& my $volume2 = $south_america\->provision_volume(\-name => \*(AqVideos\*(Aq, \& \-availability_zone => \*(Aqsa\-east\-1a\*(Aq, \& \-size => 2); \& $staging\->rsync("$volume/mirrors" => "$volume2/us\-east"); \& \& $staging\->stop_all_servers(); \& $staging\->start_all_servers(); \& $staging\->terminate_all_servers(); \& $staging\->force_terminate_all_servers(); .Ve .SH "DESCRIPTION" .IX Header "DESCRIPTION" VM::EC2::Staging::Manager manages a set of \s-1EC2\s0 volumes and servers in a single \s-1AWS\s0 region. It was primarily designed to simplify the process of provisioning and populating volumes, but it also provides a handy set of ssh commands that allow you to run remote commands programmatically. .PP The manager also allows you to copy EBS-backed AMIs and their attached volumes from one region to another, something that is otherwise difficult to do. .PP The main classes are: .PP .Vb 2 \& VM::EC2::Staging::Manager \-\- A set of volume and server resources in \& a single AWS region. \& \& VM::EC2::Staging::Server \-\- A staging server running somewhere in the \& region. It is a VM::EC2::Instance \& extended to provide remote command and \& copy facilities. \& \& VM::EC2::Staging::Volume \-\- A staging disk volume running somewhere in the \& region. It is a VM::EC2::Volume \& extended to provide remote copy \& facilities. .Ve .PP Staging servers can provision volumes, format them, mount them, copy data between local and remote (virtual) machines, and execute secure shell commands. Staging volumes can mount themselves on servers, run a variety of filesystem-oriented commands, and invoke commands on the servers to copy data around locally and remotely. .PP See VM::EC2::Staging::Server and VM::EC2::Staging::Volume for the full details. .SH "Constructors" .IX Header "Constructors" The following methods allow you to create new VM::EC2::Staging::Manager instances. Be aware that only one manager is allowed per \s-1EC2\s0 region; attempting to create additional managers in the same region will return the same one each time. .ie n .SS "$manager = $ec2\->staging_manager(@args)" .el .SS "\f(CW$manager\fP = \f(CW$ec2\fP\->staging_manager(@args)" .IX Subsection "$manager = $ec2->staging_manager(@args)" This is a simplified way to create a staging manager. First create the \&\s-1EC2\s0 object in the desired region, and then call its \fIstaging_manager()\fR method: .PP .Vb 1 \& $manager = VM::EC2\->new(\-region=>\*(Aqus\-west\-2\*(Aq)\->staging_manager() .Ve .PP The \fIstaging_manager()\fR method is only known to \s-1VM::EC2\s0 objects if you first \*(L"use\*(R" VM::EC2::Staging::Manager. .IP "Required Arguments" 4 .IX Item "Required Arguments" None. .IP "Optional Arguments" 4 .IX Item "Optional Arguments" The optional arguments change the way that the manager creates new servers and volumes. .Sp .Vb 4 \& \-on_exit What to do with running servers when the manager goes \& out of scope or the script exits. One of \*(Aqrun\*(Aq, \& \*(Aqstop\*(Aq (default), or \*(Aqterminate\*(Aq. "run" keeps all \& created instances running, so beware! \& \& \-architecture Architecture for newly\-created server \& instances (default "i386"). Can be overridden in calls to get_server() \& and provision_server(). \& \& \-instance_type Type of newly\-created servers (default "m1.small"). Can be overridden \& in calls to get_server() and provision_server(). \& \& \-root_type Root type for newly\-created servers (default depends \& on the \-on_exit behavior; "ebs" for exit behavior of \& "stop" and "instance\-store" for exit behavior of "run" \& or "terminate". \& \& \-image_name Name or ami ID of the AMI to use for creating the \& instances of new servers. Defaults to \*(Aqubuntu\-precise\-12.04\*(Aq. \& If the image name begins with "ami\-", then it is \& treated as an AMI ID. Otherwise it is treated as \& a name pattern and will be used to search the AMI \& name field using the wildcard search "*$name*". \& Names work better than AMI ids here, because the \& latter change from one region to another. If multiple \& matching image candidates are found, then an alpha \& sort on the name is used to find the image with the \& highest alpha sort value, which happens to work with \& Ubuntu images to find the latest release. \& \& \-availability_zone Availability zone for newly\-created \& servers. Default is undef, in which case a random \& zone is selected. \& \& \-username Username to use for ssh connections. Defaults to \& "ubuntu". Note that this user must be able to use \& sudo on the instance without providing a password, \& or functionality of this module will be limited. \& \& \-verbose Integer level of verbosity. Level 1 prints warning \& messages. Level 2 (the default) adds informational \& messages as well. Level 3 adds verbose debugging \& messages. Level 0 suppresses all messages. \& \& \-quiet (deprecated) If true, turns off all verbose messages. \& \& \-scan Boolean, default true. If true, scans region for \& volumes and servers created by earlier manager \& instances. \& \& \-reuse_key Boolean, default true. If true, creates a single \& ssh keypair for each region and reuses it. Note that \& the private key is kept on the local computer in the \& directory ~/.vm\-ec2\-staging, and so additional \& keypairs may be created if you use this module on \& multiple local machines. If this option is false, \& then a new keypair will be created for every server \& you partition. \& \& \-reuse_volumes Boolean, default true. If this flag is true, then \& calls to provision_volume() will return existing \& volumes if they share the same name as the requested \& volume. If no suitable existing volume exists, then \& the most recent snapshot of this volume is used to \& create it in the specified availability zone. Only \& if no volume or snapshot exist will a new volume be \& created from scratch. \& \& \-dotdir Path to the directory that contains keyfiles and other \& stable configuration information for this module. \& Defaults to ~/.vm_ec2_staging. You may wish to change \& this to, say, a private dropbox directory or an NFS\-mount \& in order to share keyfiles among machines. Be aware of \& the security implications of sharing private key files. \& \& \-server_class By default, staging server objects created by the manager \& are of class type VM::EC2::Staging::Server. If you create \& a custom server subclass, you need to let the manager know \& about it by passing the class name to this argument. \& \& \-volume_class By default, staging volume objects created by the manager \& are of class type VM::EC2::Staging::Volume. If you create \& a custom volume subclass, you need to let the manager know \& about it by passing the class name to this argument. .Ve .ie n .SS "$manager = VM::EC2::Staging::Manager(\-ec2 => $ec2,@args)" .el .SS "\f(CW$manager\fP = VM::EC2::Staging::Manager(\-ec2 => \f(CW$ec2\fP,@args)" .IX Subsection "$manager = VM::EC2::Staging::Manager(-ec2 => $ec2,@args)" This is a more traditional constructur for the staging manager. .IP "Required Arguments" 4 .IX Item "Required Arguments" .Vb 1 \& \-ec2 A VM::EC2 object. .Ve .IP "Optional Arguments" 4 .IX Item "Optional Arguments" All of the arguments listed in the description of \&\s-1VM::EC2\-\s0>\fIstaging_manager()\fR. .SH "Interzone Copying of AMIs and Snapshots" .IX Header "Interzone Copying of AMIs and Snapshots" This library provides convenience methods for copying whole AMIs as well as individual snapshots from one zone to another. It does this by gathering information about the AMI/snapshot in the source zone, creating staging servers in the source and target zones, and then copying the volume data from the source to the target. If an AMI/snapshot does not use a recognized filesystem (e.g. it is part of an \s-1LVM\s0 or \s-1RAID\s0 disk set), then block level copying of the entire device is used. Otherwise, \fIrsync()\fR is used to minimize data transfer fees. .PP Note that interzone copying of instance-backed AMIs is \fBnot\fR supported. Only EBS-backed images can be copied in this way. .PP See also the command-line script migrate\-ebs\-image.pl that comes with this package. .ie n .SS "$new_image_id = $manager\->copy_image($source_image,$destination_zone,@register_options)" .el .SS "\f(CW$new_image_id\fP = \f(CW$manager\fP\->copy_image($source_image,$destination_zone,@register_options)" .IX Subsection "$new_image_id = $manager->copy_image($source_image,$destination_zone,@register_options)" This method copies the \s-1AMI\s0 indicated by \f(CW$source_image\fR from the zone that \f(CW$manager\fR belongs to, into the indicated \f(CW$destination_zone\fR, and returns the \s-1AMI ID\s0 of the new image in the destination zone. .PP \&\f(CW$source_image\fR may be an \s-1AMI ID,\s0 or a VM::EC2::Image object. .PP \&\f(CW$destination_zone\fR may be a simple region name, such as \*(L"us\-west\-2\*(R", or a VM::EC2::Region object (as returned by \s-1VM::EC2\-\s0>describe_regions), or a VM::EC2::Staging::Manager object that is associated with the desired region. The latter form gives you control over the nature of the staging instances created in the destination zone. For example, if you wish to use 'm1.large' high\-I/O instances in both the source and destination reasons, you would proceed like this: .PP .Vb 7 \& my $source = VM::EC2\->new(\-region=>\*(Aqus\-east\-1\*(Aq \& )\->staging_manager(\-instance_type=>\*(Aqm1.large\*(Aq, \& \-on_exit =>\*(Aqterminate\*(Aq); \& my $destination = VM::EC2\->new(\-region=>\*(Aqus\-west\-2\*(Aq \& )\->staging_manager(\-instance_type=>\*(Aqm1.large\*(Aq, \& \-on_exit =>\*(Aqterminate\*(Aq); \& my $new_image = $source\->copy_image(\*(Aqami\-123456\*(Aq => $destination); .Ve .PP If present, the named argument list \f(CW@register_options\fR will be passed to \fIregister_image()\fR and used to override options in the destination image. This can be used to set ephemeral device mappings, which cannot currently be detected and transferred automatically by \fIcopy_image()\fR: .PP .Vb 3 \& $new_image =$source\->copy_image(\*(Aqami\-123456\*(Aq => \*(Aqus\-west\-2\*(Aq, \& \-description => \*(AqMy AMI western style\*(Aq, \& \-block_devices => \*(Aq/dev/sde=ephemeral0\*(Aq); .Ve .ie n .SS "$dest_kernel = $manager\->match_kernel($src_kernel,$dest_zone)" .el .SS "\f(CW$dest_kernel\fP = \f(CW$manager\fP\->match_kernel($src_kernel,$dest_zone)" .IX Subsection "$dest_kernel = $manager->match_kernel($src_kernel,$dest_zone)" Find a kernel in \f(CW$dest_zone\fR that matches the \f(CW$src_kernel\fR in the current zone. \f(CW$dest_zone\fR can be a VM::EC2::Staging manager object, a region name, or a VM::EC2::Region object. .ie n .SS "$new_snapshot_id = $manager\->copy_snapshot($source_snapshot,$destination_zone)" .el .SS "\f(CW$new_snapshot_id\fP = \f(CW$manager\fP\->copy_snapshot($source_snapshot,$destination_zone)" .IX Subsection "$new_snapshot_id = $manager->copy_snapshot($source_snapshot,$destination_zone)" This method copies the \s-1EBS\s0 snapshot indicated by \f(CW$source_snapshot\fR from the zone that \f(CW$manager\fR belongs to, into the indicated \&\f(CW$destination_zone\fR, and returns the \s-1ID\s0 of the new snapshot in the destination zone. .PP \&\f(CW$source_snapshot\fR may be an string \s-1ID,\s0 or a VM::EC2::Snapshot object. .PP \&\f(CW$destination_zone\fR may be a simple region name, such as \*(L"us\-west\-2\*(R", or a VM::EC2::Region object (as returned by \s-1VM::EC2\-\s0>describe_regions), or a VM::EC2::Staging::Manager object that is associated with the desired region. .PP Note that this call uses the Amazon CopySnapshot \s-1API\s0 call that was introduced in 2012\-12\-01 and no longer involves the creation of staging servers in the source and destination regions. .SH "Instance Methods for Managing Staging Servers" .IX Header "Instance Methods for Managing Staging Servers" These methods allow you to create and interrogate staging servers. They each return one or more VM::EC2::Staging::Server objects. See VM::EC2::Staging::Server for more information about what you can do with these servers once they are running. .ie n .SS "$server = $manager\->provision_server(%options)" .el .SS "\f(CW$server\fP = \f(CW$manager\fP\->provision_server(%options)" .IX Subsection "$server = $manager->provision_server(%options)" Create a new VM::EC2::Staging::Server object according to the passed options, which override the default options provided by the Manager object. .PP .Vb 2 \& \-name Name for this server, which can be used to retrieve \& it later with a call to get_server(). \& \& \-architecture Architecture for the newly\-created server \& instances (e.g. "i386"). If not specified, then defaults \& to the default_architecture() value. If explicitly \& specified as undef, then the architecture of the matching \& image will be used. \& \& \-instance_type Type of the newly\-created server (e.g. "m1.small"). \& \& \-root_type Root type for the server ("ebs" or "instance\-store"). \& \& \-image_name Name or ami ID of the AMI to use for creating the \& instance for the server. If the image name begins with \& "ami\-", then it is treated as an AMI ID. Otherwise it \& is treated as a name pattern and will be used to \& search the AMI name field using the wildcard search \& "*$name*". Names work better than AMI ids here, \& because the latter change from one region to \& another. If multiple matching image candidates are \& found, then an alpha sort on the name is used to find \& the image with the highest alpha sort value, which \& happens to work with Ubuntu images to find the latest \& release. \& \& \-availability_zone Availability zone for the server, or undef to \& choose an availability zone randomly. \& \& \-username Username to use for ssh connections. Defaults to \& "ubuntu". Note that this user must be able to use \& sudo on the instance without providing a password, \& or functionality of this server will be limited. .Ve .PP In addition, you may use any of the options recognized by \&\s-1VM::EC2\-\s0>\fIrun_instances()\fR (e.g. \-block_devices). .ie n .SS "$server = $manager\->get_server(\-name=>$name,%other_options)" .el .SS "\f(CW$server\fP = \f(CW$manager\fP\->get_server(\-name=>$name,%other_options)" .IX Subsection "$server = $manager->get_server(-name=>$name,%other_options)" .ie n .SS "$server = $manager\->get_server($name)" .el .SS "\f(CW$server\fP = \f(CW$manager\fP\->get_server($name)" .IX Subsection "$server = $manager->get_server($name)" Return an existing VM::EC2::Staging::Server object having the indicated symbolic name, or create a new server if one with this name does not already exist. The server's instance characteristics will be configured according to the options passed to the manager at create time (e.g. \-availability_zone, \-instance_type). These options can be overridden by \f(CW%other_args\fR. See \fIprovision_volume()\fR for details. .ie n .SS "$server = $manager\->get_server_in_zone(\-zone=>$availability_zone,%other_options)" .el .SS "\f(CW$server\fP = \f(CW$manager\fP\->get_server_in_zone(\-zone=>$availability_zone,%other_options)" .IX Subsection "$server = $manager->get_server_in_zone(-zone=>$availability_zone,%other_options)" .ie n .SS "$server = $manager\->get_server_in_zone($availability_zone)" .el .SS "\f(CW$server\fP = \f(CW$manager\fP\->get_server_in_zone($availability_zone)" .IX Subsection "$server = $manager->get_server_in_zone($availability_zone)" Return an existing VM::EC2::Staging::Server running in the indicated symbolic name, or create a new server if one with this name does not already exist. The server's instance characteristics will be configured according to the options passed to the manager at create time (e.g. \-availability_zone, \-instance_type). These options can be overridden by \f(CW%other_args\fR. See \fIprovision_server()\fR for details. .ie n .SS "$server = $manager\->find_server_by_instance($instance_id)" .el .SS "\f(CW$server\fP = \f(CW$manager\fP\->find_server_by_instance($instance_id)" .IX Subsection "$server = $manager->find_server_by_instance($instance_id)" Given an \s-1EC2\s0 instanceId, return the corresponding VM::EC2::Staging::Server, if any. .ie n .SS "@servers $manager\->servers" .el .SS "\f(CW@servers\fP \f(CW$manager\fP\->servers" .IX Subsection "@servers $manager->servers" Return all registered VM::EC2::Staging::Servers in the zone managed by the manager. .ie n .SS "$manager\->start_all_servers" .el .SS "\f(CW$manager\fP\->start_all_servers" .IX Subsection "$manager->start_all_servers" Start all VM::EC2::Staging::Servers that are currently in the \*(L"stop\*(R" state. .ie n .SS "$manager\->stop_all_servers" .el .SS "\f(CW$manager\fP\->stop_all_servers" .IX Subsection "$manager->stop_all_servers" Stop all VM::EC2::Staging::Servers that are currently in the \*(L"running\*(R" state. .ie n .SS "$manager\->terminate_all_servers" .el .SS "\f(CW$manager\fP\->terminate_all_servers" .IX Subsection "$manager->terminate_all_servers" Terminate all VM::EC2::Staging::Servers and unregister them. .ie n .SS "$manager\->force_terminate_all_servers" .el .SS "\f(CW$manager\fP\->force_terminate_all_servers" .IX Subsection "$manager->force_terminate_all_servers" Force termination of all VM::EC2::Staging::Servers, even if the internal registration system indicates that some may be in use by other Manager instances. .ie n .SS "$manager\->wait_for_servers(@servers)" .el .SS "\f(CW$manager\fP\->wait_for_servers(@servers)" .IX Subsection "$manager->wait_for_servers(@servers)" Wait until all the servers on the list \f(CW@servers\fR are up and able to accept ssh commands. You may wish to wrap this in an eval{} and timeout in order to avoid waiting indefinitely. .SH "Instance Methods for Managing Staging Volumes" .IX Header "Instance Methods for Managing Staging Volumes" These methods allow you to create and interrogate staging volumes. They each return one or more VM::EC2::Staging::Volume objects. See VM::EC2::Staging::Volume for more information about what you can do with these staging volume objects. .ie n .SS "$volume = $manager\->provision_volume(%options)" .el .SS "\f(CW$volume\fP = \f(CW$manager\fP\->provision_volume(%options)" .IX Subsection "$volume = $manager->provision_volume(%options)" Create and register a new VM::EC2::Staging::Volume and mount it on a staging server in the appropriate availability zone. A new staging server will be created for this purpose if one does not already exist. .PP If you provide a symbolic name for the volume and the manager has previously snapshotted a volume by the same name, then the snapshot will be used to create the volume (this behavior can be suppressed by passing \-reuse=>0). This allows for the following pattern for efficiently updating a snapshotted volume: .PP .Vb 5 \& my $vol = $manager\->provision_volume(\-name=>\*(AqMyPictures\*(Aq, \& \-size=>10); \& $vol\->put(\*(Aq/usr/local/my_pictures/\*(Aq); # will do an rsync from local directory \& $vol\->create_snapshot; # write out to a snapshot \& $vol\->delete; .Ve .PP You may also explicitly specify a volumeId or snapshotId. The former allows you to place an existing volume under management of VM::EC2::Staging::Manager and returns a corresponding staging volume object. The latter creates the staging volume from the indicated snapshot, irregardless of whether the snapshot was created by the staging manager at an earlier time. .PP Newly-created staging volumes are automatically formatted as ext4 filesystems and mounted on the staging server under /mnt/Staging/$name, where \f(CW$name\fR is the staging volume's symbolic name. The filesystem type and the mountpoint can be modified with the \&\-fstype and \-mount arguments, respectively. In addition, you may specify an \-fstype of \*(L"raw\*(R", in which case the volume will be attached to a staging server (creating the server first if necessary) but not formatted or mounted. This is useful when creating multi-volume \s-1RAID\s0 or \s-1LVM\s0 setups. .PP Options: .PP .Vb 4 \& \-name Name of the staging volume. A fatal error issues if a staging \& volume by this name already exists (use get_volume() to \& avoid this). If no name is provided, then a random \& unique one is chosen for you. \& \& \-availability_zone \& Availability zone in which to create this \& volume. If none is specified, then a zone is chosen that \& reuses an existing staging server, if any. \& \& \-size Size of the desired volume, in GB. \& \& \-fstype Filesystem type for the volume, ext4 by default. Supported \& types are ext2, ext3, ext4, xfs, reiserfs, jfs, hfs, \& ntfs, vfat, msdos, and raw. \& \& \-mount Mount point for this volume on the staging server (e.g. /opt/bin). \& Use with care, as there are no checks to prevent you from mounting \& two staging volumes on top of each other or mounting over essential \& operating system paths. \& \& \-label Volume label. Only applies to filesystems that support labels \& (all except hfs, vfat, msdos and raw). \& \& \-volume_id Create the staging volume from an existing EBS volume with \& the specified ID. Most other options are ignored in this \& case. \& \& \-snapshot_id \& Create the staging volume from an existing EBS \& snapshot. If a size is specified that is larger than the \& snapshot, then the volume and its filesystem will be \& automatically extended (this only works for ext volumes \& at the moment). Shrinking of volumes is not currently \& supported. \& \& \-reuse If true, then the most recent snapshot created from a staging \& volume of the same name is used to create the \& volume. This is the default. Pass 0 to disable this \& behavior. .Ve .PP The \fB\-reuse\fR argument is intended to support the following use case in which you wish to rsync a directory on a host system somewhere to an \s-1EBS\s0 snapshot, without maintaining a live server and volume on \s-1EC2:\s0 .PP .Vb 7 \& my $volume = $manager\->provision_volume(\-name=>\*(Aqbackup_1\*(Aq, \& \-reuse => 1, \& \-fstype => \*(Aqext3\*(Aq, \& \-size => 10); \& $volume\->put(\*(Aqfred@gw.harvard.edu:my_music\*(Aq); \& $volume\->create_snapshot(\*(AqMusic Backup \*(Aq.localtime); \& $volume\->delete; .Ve .PP The next time this script is run, the \*(L"backup_1\*(R" volume will be recreated from the most recent snapshot, minimizing copying. A new snapshot is created, and the staging volume is deleted. .ie n .SS "$volume = $manager\->get_volume(\-name=>$name,%other_options)" .el .SS "\f(CW$volume\fP = \f(CW$manager\fP\->get_volume(\-name=>$name,%other_options)" .IX Subsection "$volume = $manager->get_volume(-name=>$name,%other_options)" .ie n .SS "$volume = $manager\->get_volume($name)" .el .SS "\f(CW$volume\fP = \f(CW$manager\fP\->get_volume($name)" .IX Subsection "$volume = $manager->get_volume($name)" Return an existing VM::EC2::Staging::Volume object with the indicated symbolic name, or else create a new volume if one with this name does not already exist. The volume's characteristics will be configured according to the options in \f(CW%other_args\fR. See \fIprovision_volume()\fR for details. If called with no arguments, this method returns Volume object with default characteristics and a randomly-assigned name. .ie n .SS "$result = $manager\->rsync($src1,$src2,$src3...,$dest)" .el .SS "\f(CW$result\fP = \f(CW$manager\fP\->rsync($src1,$src2,$src3...,$dest)" .IX Subsection "$result = $manager->rsync($src1,$src2,$src3...,$dest)" This method provides remote synchronization (rsync) file-level copying between one or more source locations and a destination location via an ssh tunnel. Copying among arbitrary combinations of local and remote filesystems is supported, with the caveat that the remote filesystems must be contained on volumes and servers managed by this module (see below for a workaround). .PP You may provide two or more directory paths. The last path will be treated as the copy destination, and the source paths will be treated as copy sources. All copying is performed using the \-avz options, which activates recursive directory copying in which ownership, modification times and permissions are preserved, and compresses the data to reduce network usage. Verbosity is set so that the names of copied files are printed to \s-1STDERR.\s0 If you do not wish this, then use call the manager's \fIquiet()\fR method with a true value. .PP Source paths can be formatted in one of several ways: .PP .Vb 6 \& /absolute/path \& Copy the contents of the directory /absolute/path located on the \& local machine to the destination. This will create a \& subdirectory named "path" on the destination disk. Add a slash \& to the end of the path (i.e. "/absolute/path/") in order to \& avoid creating this subdirectory on the destination disk. \& \& ./relative/path \& Relative paths work the way you expect, and depend on the current \& working directory. The terminating slash rule applies. \& \& $staging_volume \& Pass a VM::EC2::Staging::Volume to copy the contents of the \& volume to the destination disk starting at the root of the \& volume. Note that you do *not* need to have any knowledge of the \& mount point for this volume in order to copy its contents. \& \& $staging_volume:/absolute/path \& $staging_volume:absolute/path \& $staging_volume/absolute/path \& All these syntaxes accomplish the same thing, which is to \& copy a subdirectory of a staging volume to the destination disk. \& The root of the volume is its top level, regardless of where it \& is mounted on the staging server. Because of string \& interpolation magic, you can enclose staging volume object names \& in quotes in order to construct the path, as in \& "$picture_volume:/family/vacations/". As in local paths, a \& terminating slash indicates that the contents of the last \& directory in the path are to be copied without creating the \& enclosing directory on the desetination. Note that you do *not* \& need to have any knowledge of the mount point for this volume in \& order to copy its contents. \& \& $staging_server:/absolute/path \& Pass a staging server object and absolute path to copy the contents \& of this path to the destination disk. Because of string interpolation \& you can include server objects in quotes: "$my_server:/opt" \& \& $staging_server:relative/path \& This form will copy data from paths relative to the remote user\*(Aqs home \& directory on the staging server. Typically not very useful, but supported. .Ve .PP The same syntax is supported for destination paths, except that it makes no difference whether a path has a trailing slash or not. .PP As with the rsync command, if you proceed a path with a single colon (:/my/path), it is a short hand to use the previous server/volume/host in the source list. .PP When specifying multiple source directories, all source directories must reside on the same local or remote machine. This is legal: .PP .Vb 3 \& $manager\->rsync("$picture_volume:/family/vacations", \& "$picture_volume:/family/picnics" \& => "$backup_volume:/recent_backups"); .Ve .PP This is not: .PP .Vb 3 \& $manager\->rsync("$picture_volume:/family/vacations", \& "$audio_volume:/beethoven" \& => "$backup_volume:/recent_backups"); .Ve .PP When specifying multiple sources, you may give the volume or server once for the first source and then start additional source paths with a \*(L":\*(R" to indicate the same volume or server is to be used: .PP .Vb 3 \& $manager\->rsync("$picture_volume:/family/vacations", \& ":/family/picnics" \& => "$backup_volume:/recent_backups"); .Ve .PP When copying to/from the local machine, the rsync process will run as the user that the script was launched by. However, on remote servers managed by the staging manager, the rsync process will run as superuser. .PP The \fIrsync()\fR method will also accept regular remote \s-1DNS\s0 names and \s-1IP\s0 addresses, optionally preceded by a username: .PP .Vb 1 \& $manager\->rsync("$picture_volume:/family/vacations" => \*(Aqfred@gw.harvard.edu:/tmp\*(Aq) .Ve .PP When called in this way, the method does what it can to avoid prompting for a password or passphrase on the non-managed host (gw.harvard.edu in the above example). This includes turning off strict host checking and forwarding the user agent information from the local machine. .ie n .SS "$result = $manager\->rsync(\e@options,$src1,$src2,$src3...,$dest)" .el .SS "\f(CW$result\fP = \f(CW$manager\fP\->rsync(\e@options,$src1,$src2,$src3...,$dest)" .IX Subsection "$result = $manager->rsync(@options,$src1,$src2,$src3...,$dest)" This is a variant of the rsync command in which extra options can be passed to rsync by providing an array reference as the first argument. For example: .PP .Vb 3 \& $manager\->rsync([\*(Aq\-\-exclude\*(Aq => \*(Aq*~\*(Aq], \& \*(Aq/usr/local/backups\*(Aq, \& "$my_server:/usr/local"); .Ve .ie n .SS "$manager\->dd($source_vol=>$dest_vol)" .el .SS "\f(CW$manager\fP\->dd($source_vol=>$dest_vol)" .IX Subsection "$manager->dd($source_vol=>$dest_vol)" This method performs block-level copying of the contents of \&\f(CW$source_vol\fR to \f(CW$dest_vol\fR by using dd over an \s-1SSH\s0 tunnel, where both source and destination volumes are VM::EC2::Staging::Volume objects. The volumes must be attached to a server but not mounted. Everything in the volume, including its partition table, is copied, allowing you to make an exact image of a disk. .PP The volumes do \fBnot\fR actually need to reside on this server, but can be attached to any staging server in the zone. .ie n .SS "$volume = $manager\->find_volume_by_volid($volume_id)" .el .SS "\f(CW$volume\fP = \f(CW$manager\fP\->find_volume_by_volid($volume_id)" .IX Subsection "$volume = $manager->find_volume_by_volid($volume_id)" Given an \s-1EC2\s0 volumeId, return the corresponding VM::EC2::Staging::Volume, if any. .ie n .SS "$volume = $manager\->find_volume_by_name($name)" .el .SS "\f(CW$volume\fP = \f(CW$manager\fP\->find_volume_by_name($name)" .IX Subsection "$volume = $manager->find_volume_by_name($name)" Given a staging name (assigned at volume creation time), return the corresponding VM::EC2::Staging::Volume, if any. .ie n .SS "@volumes = $manager\->volumes" .el .SS "\f(CW@volumes\fP = \f(CW$manager\fP\->volumes" .IX Subsection "@volumes = $manager->volumes" Return all VM::EC2::Staging::Volumes managed in this zone. .SH "Instance Methods for Accessing Configuration Options" .IX Header "Instance Methods for Accessing Configuration Options" This section documents accessor methods that allow you to examine or change configuration options that were set at create time. Called with an argument, the accessor changes the option and returns the option's previous value. Called without an argument, the accessor returns the option's current value. .ie n .SS "$on_exit = $manager\->on_exit([$new_behavior])" .el .SS "\f(CW$on_exit\fP = \f(CW$manager\fP\->on_exit([$new_behavior])" .IX Subsection "$on_exit = $manager->on_exit([$new_behavior])" Get or set the \*(L"on_exit\*(R" option, which specifies what to do with existing staging servers when the staging manager is destroyed. Valid values are \*(L"terminate\*(R", \*(L"stop\*(R" and \*(L"run\*(R". .ie n .SS "$reuse_key = $manager\->reuse_key([$boolean])" .el .SS "\f(CW$reuse_key\fP = \f(CW$manager\fP\->reuse_key([$boolean])" .IX Subsection "$reuse_key = $manager->reuse_key([$boolean])" Get or set the \*(L"reuse_key\*(R" option, which if true uses the same internally-generated ssh keypair for all running instances. If false, then a new keypair will be created for each staging server. The keypair will be destroyed automatically when the staging server terminates (but only if the staging manager initiates the termination itself). .ie n .SS "$username = $manager\->username([$new_username])" .el .SS "\f(CW$username\fP = \f(CW$manager\fP\->username([$new_username])" .IX Subsection "$username = $manager->username([$new_username])" Get or set the username used to log into staging servers. .ie n .SS "$architecture = $manager\->architecture([$new_architecture])" .el .SS "\f(CW$architecture\fP = \f(CW$manager\fP\->architecture([$new_architecture])" .IX Subsection "$architecture = $manager->architecture([$new_architecture])" Get or set the architecture (i386, x86_64) to use for launching new staging servers. .ie n .SS "$root_type = $manager\->root_type([$new_type])" .el .SS "\f(CW$root_type\fP = \f(CW$manager\fP\->root_type([$new_type])" .IX Subsection "$root_type = $manager->root_type([$new_type])" Get or set the instance root type for new staging servers (\*(L"instance-store\*(R", \*(L"ebs\*(R"). .ie n .SS "$instance_type = $manager\->instance_type([$new_type])" .el .SS "\f(CW$instance_type\fP = \f(CW$manager\fP\->instance_type([$new_type])" .IX Subsection "$instance_type = $manager->instance_type([$new_type])" Get or set the instance type to use for new staging servers (e.g. \*(L"t1.micro\*(R"). I recommend that you use \*(L"m1.small\*(R" (the default) or larger instance types because of the extremely slow I/O of the micro instance. In addition, micro instances running Ubuntu have a known bug that prevents them from unmounting and remounting \s-1EBS\s0 volumes repeatedly on the same block device. This can lead to hangs when the staging manager tries to create volumes. .ie n .SS "$reuse_volumes = $manager\->reuse_volumes([$new_boolean])" .el .SS "\f(CW$reuse_volumes\fP = \f(CW$manager\fP\->reuse_volumes([$new_boolean])" .IX Subsection "$reuse_volumes = $manager->reuse_volumes([$new_boolean])" This gets or sets the \*(L"reuse_volumes\*(R" option, which if true causes the \&\fIprovision_volumes()\fR call to create staging volumes from existing \s-1EBS\s0 volumes and snapshots that share the same staging manager symbolic name. See the discussion under \s-1VM::EC2\-\s0>\fIstaging_manager()\fR, and VM::EC2::Staging::Manager\->\fIprovision_volume()\fR. .ie n .SS "$name = $manager\->image_name([$new_name])" .el .SS "\f(CW$name\fP = \f(CW$manager\fP\->image_name([$new_name])" .IX Subsection "$name = $manager->image_name([$new_name])" This gets or sets the \*(L"image_name\*(R" option, which is the \s-1AMI ID\s0 or \s-1AMI\s0 name to use when creating new staging servers. Names beginning with \&\*(L"ami\-\*(R" are treated as \s-1AMI\s0 IDs, and everything else is treated as a pattern match on the \s-1AMI\s0 name. .ie n .SS "$zone = $manager\->availability_zone([$new_zone])" .el .SS "\f(CW$zone\fP = \f(CW$manager\fP\->availability_zone([$new_zone])" .IX Subsection "$zone = $manager->availability_zone([$new_zone])" Get or set the default availability zone to use when creating new servers and volumes. An undef value allows the staging manager to choose the zone in a way that minimizes resources. .ie n .SS "$class_name = $manager\->volume_class([$new_class])" .el .SS "\f(CW$class_name\fP = \f(CW$manager\fP\->volume_class([$new_class])" .IX Subsection "$class_name = $manager->volume_class([$new_class])" Get or set the name of the perl package that implements staging volumes, VM::EC2::Staging::Volume by default. Staging volumes created by the manager will have this class type. .ie n .SS "$class_name = $manager\->server_class([$new_class])" .el .SS "\f(CW$class_name\fP = \f(CW$manager\fP\->server_class([$new_class])" .IX Subsection "$class_name = $manager->server_class([$new_class])" Get or set the name of the perl package that implements staging servers, VM::EC2::Staging::Server by default. Staging servers created by the manager will have this class type. .ie n .SS "$boolean = $manager\->scan([$boolean])" .el .SS "\f(CW$boolean\fP = \f(CW$manager\fP\->scan([$boolean])" .IX Subsection "$boolean = $manager->scan([$boolean])" Get or set the \*(L"scan\*(R" flag, which if true will cause the zone to be scanned quickly for existing managed servers and volumes when the manager is first created. .ie n .SS "$path = $manager\->dot_directory([$new_directory])" .el .SS "\f(CW$path\fP = \f(CW$manager\fP\->dot_directory([$new_directory])" .IX Subsection "$path = $manager->dot_directory([$new_directory])" Get or set the dot directory which holds private key files. .SH "Internal Methods" .IX Header "Internal Methods" This section documents internal methods that are not normally called by end-user scripts but may be useful in subclasses. In addition, there are a number of undocumented internal methods that begin with the \*(L"_\*(R" character. Explore the source code to learn about these. .ie n .SS "$ok = $manager\->environment_ok" .el .SS "\f(CW$ok\fP = \f(CW$manager\fP\->environment_ok" .IX Subsection "$ok = $manager->environment_ok" This performs a check on the environment in which the module is running. For this module to work properly, the ssh, rsync and dd programs must be found in the \s-1PATH.\s0 If all three programs are found, then this method returns true. .PP This method can be called as an instance method or class method. .ie n .SS "$name = $manager\->default_verbosity" .el .SS "\f(CW$name\fP = \f(CW$manager\fP\->default_verbosity" .IX Subsection "$name = $manager->default_verbosity" Returns the default verbosity level (2: warning+informational messages). This is overridden using \-verbose at create time. .ie n .SS "$name = $manager\->default_exit_behavior" .el .SS "\f(CW$name\fP = \f(CW$manager\fP\->default_exit_behavior" .IX Subsection "$name = $manager->default_exit_behavior" Return the default exit behavior (\*(L"stop\*(R") when the manager terminates. Intended to be overridden in subclasses. .ie n .SS "$name = $manager\->default_image_name" .el .SS "\f(CW$name\fP = \f(CW$manager\fP\->default_image_name" .IX Subsection "$name = $manager->default_image_name" Return the default image name ('ubuntu\-precise\-12.04') for use in creating new instances. Intended to be overridden in subclasses. .ie n .SS "$name = $manager\->default_user_name" .el .SS "\f(CW$name\fP = \f(CW$manager\fP\->default_user_name" .IX Subsection "$name = $manager->default_user_name" Return the default user name ('ubuntu') for use in creating new instances. Intended to be overridden in subclasses. .ie n .SS "$name = $manager\->default_architecture" .el .SS "\f(CW$name\fP = \f(CW$manager\fP\->default_architecture" .IX Subsection "$name = $manager->default_architecture" Return the default instance architecture ('i386') for use in creating new instances. Intended to be overridden in subclasses. .ie n .SS "$name = $manager\->default_root_type" .el .SS "\f(CW$name\fP = \f(CW$manager\fP\->default_root_type" .IX Subsection "$name = $manager->default_root_type" Return the default instance root type ('instance\-store') for use in creating new instances. Intended to be overridden in subclasses. Note that this value is ignored if the exit behavior is \*(L"stop\*(R", in which case an ebs-backed instance will be used. Also, the m1.micro instance type does not come in an instance-store form, so ebs will be used in this case as well. .ie n .SS "$name = $manager\->default_instance_type" .el .SS "\f(CW$name\fP = \f(CW$manager\fP\->default_instance_type" .IX Subsection "$name = $manager->default_instance_type" Return the default instance type ('m1.small') for use in creating new instances. Intended to be overridden in subclasses. We default to m1.small rather than a micro instance because the I/O in m1.small is far faster than in t1.micro. .ie n .SS "$name = $manager\->default_reuse_keys" .el .SS "\f(CW$name\fP = \f(CW$manager\fP\->default_reuse_keys" .IX Subsection "$name = $manager->default_reuse_keys" Return the default value of the \-reuse_keys argument ('true'). This value allows the manager to create an ssh keypair once, and use the same one for all servers it creates over time. If false, then a new keypair is created for each server and then discarded when the server terminates. .ie n .SS "$name = $manager\->default_reuse_volumes" .el .SS "\f(CW$name\fP = \f(CW$manager\fP\->default_reuse_volumes" .IX Subsection "$name = $manager->default_reuse_volumes" Return the default value of the \-reuse_volumes argument ('true'). This value instructs the manager to use the symbolic name of the volume to return an existing volume whenever a request is made to provision a new one of the same name. .ie n .SS "$path = $manager\->default_dot_directory_path" .el .SS "\f(CW$path\fP = \f(CW$manager\fP\->default_dot_directory_path" .IX Subsection "$path = $manager->default_dot_directory_path" Return the default value of the \-dotdir argument (\*(L"$ENV{\s-1HOME\s0}/.vm\-ec2\-staging\*(R"). This value instructs the manager to use the symbolic name of the volume to return an existing volume whenever a request is made to provision a new one of the same name. .ie n .SS "$class_name = $manager\->default_volume_class" .el .SS "\f(CW$class_name\fP = \f(CW$manager\fP\->default_volume_class" .IX Subsection "$class_name = $manager->default_volume_class" Return the class name for staging volumes created by the manager, VM::EC2::Staging::Volume by default. If you wish a subclass of VM::EC2::Staging::Manager to create a different type of volume, override this method. .ie n .SS "$class_name = $manager\->default_server_class" .el .SS "\f(CW$class_name\fP = \f(CW$manager\fP\->default_server_class" .IX Subsection "$class_name = $manager->default_server_class" Return the class name for staging servers created by the manager, VM::EC2::Staging::Server by default. If you wish a subclass of VM::EC2::Staging::Manager to create a different type of volume, override this method. .ie n .SS "$server = $manager\->register_server($server)" .el .SS "\f(CW$server\fP = \f(CW$manager\fP\->register_server($server)" .IX Subsection "$server = $manager->register_server($server)" Register a VM::EC2::Staging::Server object. Usually called internally. .ie n .SS "$manager\->unregister_server($server)" .el .SS "\f(CW$manager\fP\->unregister_server($server)" .IX Subsection "$manager->unregister_server($server)" Forget about the existence of VM::EC2::Staging::Server. Usually called internally. .ie n .SS "$manager\->register_volume($volume)" .el .SS "\f(CW$manager\fP\->register_volume($volume)" .IX Subsection "$manager->register_volume($volume)" Register a VM::EC2::Staging::Volume object. Usually called internally. .ie n .SS "$manager\->unregister_volume($volume)" .el .SS "\f(CW$manager\fP\->unregister_volume($volume)" .IX Subsection "$manager->unregister_volume($volume)" Forget about a VM::EC2::Staging::Volume object. Usually called internally. .ie n .SS "$pid = $manager\->pid([$new_pid])" .el .SS "\f(CW$pid\fP = \f(CW$manager\fP\->pid([$new_pid])" .IX Subsection "$pid = $manager->pid([$new_pid])" Get or set the process \s-1ID\s0 of the script that is running the manager. This is used internally to detect the case in which the script has forked, in which case we do not want to invoke the manager class's destructor in the child process (because it may stop or terminate servers still in use by the parent process). .ie n .SS "$path = $manager\->dotdir([$new_dotdir])" .el .SS "\f(CW$path\fP = \f(CW$manager\fP\->dotdir([$new_dotdir])" .IX Subsection "$path = $manager->dotdir([$new_dotdir])" Low-level version of \fIdot_directory()\fR, differing only in the fact that dot_directory will automatically create the path, including subdirectories. .ie n .SS "$manager\->scan_region" .el .SS "\f(CW$manager\fP\->scan_region" .IX Subsection "$manager->scan_region" Synchronize internal list of managed servers and volumes with the \s-1EC2\s0 region. Called automatically during \fInew()\fR and needed only if servers & volumes are changed from outside the module while it is running. .ie n .SS "$group = $manager\->security_group" .el .SS "\f(CW$group\fP = \f(CW$manager\fP\->security_group" .IX Subsection "$group = $manager->security_group" Returns or creates a security group with the permissions needed used to manage staging servers. Usually called internally. .ie n .SS "$keypair = $manager\->keypair" .el .SS "\f(CW$keypair\fP = \f(CW$manager\fP\->keypair" .IX Subsection "$keypair = $manager->keypair" Returns or creates the ssh keypair used internally by the manager to to access staging servers. Usually called internally. .ie n .SS "$name = $manager\->new_volume_name" .el .SS "\f(CW$name\fP = \f(CW$manager\fP\->new_volume_name" .IX Subsection "$name = $manager->new_volume_name" Returns a new random name for volumes provisioned without a \-name argument. Currently names are in of the format \*(L"volume\-12345678\*(R", where the numeric part are 8 random hex digits. Although no attempt is made to prevent naming collisions, the large number of possible names makes this unlikely. .ie n .SS "$name = $manager\->new_server_name" .el .SS "\f(CW$name\fP = \f(CW$manager\fP\->new_server_name" .IX Subsection "$name = $manager->new_server_name" Returns a new random name for server provisioned without a \-name argument. Currently names are in of the format \*(L"server\-12345678\*(R", where the numeric part are 8 random hex digits. Although no attempt is made to prevent naming collisions, the large number of possible names makes this unlikely. .ie n .SS "$description = $manager\->volume_description($volume)" .el .SS "\f(CW$description\fP = \f(CW$manager\fP\->volume_description($volume)" .IX Subsection "$description = $manager->volume_description($volume)" This method is called to assign a description to newly-created volumes. The current format is \*(L"Staging volume for Foo created by VM::EC2::Staging::Manager\*(R", where Foo is the volume's symbolic name. .ie n .SS "$manager\->debug(""Debugging message\en"")" .el .SS "\f(CW$manager\fP\->debug(``Debugging message\en'')" .IX Subsection "$manager->debug(Debugging messagen)" .ie n .SS "$manager\->info(""Informational message\en"")" .el .SS "\f(CW$manager\fP\->info(``Informational message\en'')" .IX Subsection "$manager->info(Informational messagen)" .ie n .SS "$manager\->warn(""Warning message\en"")" .el .SS "\f(CW$manager\fP\->warn(``Warning message\en'')" .IX Subsection "$manager->warn(Warning messagen)" Prints an informational message to standard error if current \&\fIverbosity()\fR level allows. .ie n .SS "$verbosity = $manager\->verbosity([$new_value])" .el .SS "\f(CW$verbosity\fP = \f(CW$manager\fP\->verbosity([$new_value])" .IX Subsection "$verbosity = $manager->verbosity([$new_value])" The \fIverbosity()\fR method get/sets a flag that sets the level of informational messages. .SH "SEE ALSO" .IX Header "SEE ALSO" \&\s-1VM::EC2\s0 VM::EC2::Staging::Server VM::EC2::Staging::Volume migrate\-ebs\-image.pl .SH "AUTHOR" .IX Header "AUTHOR" Lincoln Stein . .PP Copyright (c) 2012 Ontario Institute for Cancer Research .PP This package and its accompanying libraries is free software; you can redistribute it and/or modify it under the terms of the \s-1GPL \s0(either version 1, or at your option, any later version) or the Artistic License 2.0. Refer to \s-1LICENSE\s0 for the full license text. In addition, please see \s-1DISCLAIMER\s0.txt for disclaimers of warranty.