.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.28) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' . ds C` . ds C' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is turned on, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .\" .\" Avoid warning from groff about undefined register 'F'. .de IX .. .nr rF 0 .if \n(.g .if rF .nr rF 1 .if (\n(rF:(\n(.g==0)) \{ . if \nF \{ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . if !\nF==2 \{ . nr % 0 . nr F 2 . \} . \} .\} .rr rF .\" .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). .\" Fear. Run. Save yourself. No user-serviceable parts. . \" fudge factors for nroff and troff .if n \{\ . ds #H 0 . ds #V .8m . ds #F .3m . ds #[ \f1 . ds #] \fP .\} .if t \{\ . ds #H ((1u-(\\\\n(.fu%2u))*.13m) . ds #V .6m . ds #F 0 . ds #[ \& . ds #] \& .\} . \" simple accents for nroff and troff .if n \{\ . ds ' \& . ds ` \& . ds ^ \& . ds , \& . ds ~ ~ . ds / .\} .if t \{\ . ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" . ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' . ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' . ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' . ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' . ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' .\} . \" troff and (daisy-wheel) nroff accents .ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' .ds 8 \h'\*(#H'\(*b\h'-\*(#H' .ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] .ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' .ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' .ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] .ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] .ds ae a\h'-(\w'a'u*4/10)'e .ds Ae A\h'-(\w'A'u*4/10)'E . \" corrections for vroff .if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' .if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' . \" for low resolution devices (crt and lpr) .if \n(.H>23 .if \n(.V>19 \ \{\ . ds : e . ds 8 ss . ds o a . ds d- d\h'-1'\(ga . ds D- D\h'-1'\(hy . ds th \o'bp' . ds Th \o'LP' . ds ae ae . ds Ae AE .\} .rm #[ #] #H #V #F C .\" ======================================================================== .\" .IX Title "Web::Scraper 3pm" .TH Web::Scraper 3pm "2014-10-22" "perl v5.20.1" "User Contributed Perl Documentation" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" Web::Scraper \- Web Scraping Toolkit using HTML and CSS Selectors or XPath expressions .SH "SYNOPSIS" .IX Header "SYNOPSIS" .Vb 3 \& use URI; \& use Web::Scraper; \& use Encode; \& \& # First, create your scraper block \& my $authors = scraper { \& # Parse all TDs inside \*(Aqtable[width="100%]"\*(Aq, store them into \& # an array \*(Aqauthors\*(Aq. We embed other scrapers for each TD. \& process \*(Aqtable[width="100%"] td\*(Aq, "authors[]" => scraper { \& # And, in each TD, \& # get the URI of "a" element \& process "a", uri => \*(Aq@href\*(Aq; \& # get text inside "small" element \& process "small", fullname => \*(AqTEXT\*(Aq; \& }; \& }; \& \& my $res = $authors\->scrape( URI\->new("http://search.cpan.org/author/?A") ); \& \& # iterate the array \*(Aqauthors\*(Aq \& for my $author (@{$res\->{authors}}) { \& # output is like: \& # Andy Adler http://search.cpan.org/~aadler/ \& # Aaron K Dancygier http://search.cpan.org/~aakd/ \& # Aamer Akhter http://search.cpan.org/~aakhter/ \& print Encode::encode("utf8", "$author\->{fullname}\et$author\->{uri}\en"); \& } .Ve .PP The structure would resemble this (visually) { authors => [ { fullname => \f(CW$fullname\fR, link => \f(CW$uri\fR }, { fullname => \f(CW$fullname\fR, link => \f(CW$uri\fR }, ] } .SH "DESCRIPTION" .IX Header "DESCRIPTION" Web::Scraper is a web scraper toolkit, inspired by Ruby's equivalent Scrapi. It provides a DSL-ish interface for traversing \s-1HTML\s0 documents and returning a neatly arranged Perl data structure. .PP The \fIscraper\fR and \fIprocess\fR blocks provide a method to define what segments of a document to extract. It understands \s-1HTML\s0 and \s-1CSS\s0 Selectors as well as XPath expressions. .SH "METHODS" .IX Header "METHODS" .SS "scraper" .IX Subsection "scraper" .Vb 1 \& $scraper = scraper { ... }; .Ve .PP Creates a new Web::Scraper object by wrapping the \s-1DSL\s0 code that will be fired when \fIscrape\fR method is called. .SS "scrape" .IX Subsection "scrape" .Vb 5 \& $res = $scraper\->scrape(URI\->new($uri)); \& $res = $scraper\->scrape($html_content); \& $res = $scraper\->scrape(\e$html_content); \& $res = $scraper\->scrape($http_response); \& $res = $scraper\->scrape($html_element); .Ve .PP Retrieves the \s-1HTML\s0 from \s-1URI,\s0 HTTP::Response, HTML::Tree or text strings and creates a \s-1DOM\s0 object, then fires the callback scraper code to retrieve the data structure. .PP If you pass \s-1URI\s0 or HTTP::Response object, Web::Scraper will automatically guesses the encoding of the content by looking at Content-Type headers and \s-1META\s0 tags. Otherwise you need to decode the \&\s-1HTML\s0 to Unicode before passing it to \fIscrape\fR method. .PP You can optionally pass the base \s-1URL\s0 when you pass the \s-1HTML\s0 content as a string instead of \s-1URI\s0 or HTTP::Response. .PP .Vb 1 \& $res = $scraper\->scrape($html_content, "http://example.com/foo"); .Ve .PP This way Web::Scraper can resolve the relative links found in the document. .SS "process" .IX Subsection "process" .Vb 5 \& scraper { \& process "tag.class", key => \*(AqTEXT\*(Aq; \& process \*(Aq//tag[contains(@foo, "bar")]\*(Aq, key2 => \*(Aq@attr\*(Aq; \& process \*(Aq//comment()\*(Aq, \*(Aqcomments[]\*(Aq => \*(AqTEXT\*(Aq; \& }; .Ve .PP \&\fIprocess\fR is the method to find matching elements from \s-1HTML\s0 with \s-1CSS\s0 selector or XPath expression, then extract text or attributes into the result stash. .PP If the first argument begins with \*(L"//\*(R" or \*(L"id(\*(R" it's treated as an XPath expression and otherwise \s-1CSS\s0 selector. .PP .Vb 3 \& # 2008/12/21 \& # date => "2008/12/21" \& process ".date", date => \*(AqTEXT\*(Aq; \& \& #
\& # link => URI\->new("http://example.com/") \& process ".body > a", link => \*(Aq@href\*(Aq; \& \& # \& # comment => " HTML Comment here " \& # \& # NOTES: A comment nodes are accessed when installed \& # the HTML::TreeBuilder::XPath (version >= 0.14) and/or \& # the HTML::TreeBuilder::LibXML (version >= 0.13) \& process "//div[contains(@class, \*(Aqbody\*(Aq)]/comment()", comment => \*(AqTEXT\*(Aq; \& \& # \& # link => URI\->new("http://example.com/"), text => "foo" \& process ".body > a", link => \*(Aq@href\*(Aq, text => \*(AqTEXT\*(Aq; \& \& #