.\" Automatically generated by Pod::Man 2.25 (Pod::Simple 3.16) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is turned on, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .ie \nF \{\ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . nr % 0 . rr F .\} .el \{\ . de IX .. .\} .\" .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). .\" Fear. Run. Save yourself. No user-serviceable parts. . \" fudge factors for nroff and troff .if n \{\ . ds #H 0 . ds #V .8m . ds #F .3m . ds #[ \f1 . ds #] \fP .\} .if t \{\ . ds #H ((1u-(\\\\n(.fu%2u))*.13m) . ds #V .6m . ds #F 0 . ds #[ \& . ds #] \& .\} . \" simple accents for nroff and troff .if n \{\ . ds ' \& . ds ` \& . ds ^ \& . ds , \& . ds ~ ~ . ds / .\} .if t \{\ . ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" . ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' . ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' . ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' . ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' . ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' .\} . \" troff and (daisy-wheel) nroff accents .ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' .ds 8 \h'\*(#H'\(*b\h'-\*(#H' .ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] .ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' .ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' .ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] .ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] .ds ae a\h'-(\w'a'u*4/10)'e .ds Ae A\h'-(\w'A'u*4/10)'E . \" corrections for vroff .if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' .if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' . \" for low resolution devices (crt and lpr) .if \n(.H>23 .if \n(.V>19 \ \{\ . ds : e . ds 8 ss . ds o a . ds d- d\h'-1'\(ga . ds D- D\h'-1'\(hy . ds th \o'bp' . ds Th \o'LP' . ds ae ae . ds Ae AE .\} .rm #[ #] #H #V #F C .\" ======================================================================== .\" .IX Title "Web::Scraper 3pm" .TH Web::Scraper 3pm "2011-11-19" "perl v5.14.2" "User Contributed Perl Documentation" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" Web::Scraper \- Web Scraping Toolkit using HTML and CSS Selectors or XPath expressions .SH "SYNOPSIS" .IX Header "SYNOPSIS" .Vb 2 \& use URI; \& use Web::Scraper; \& \& # First, create your scraper block \& my $tweets = scraper { \& # Parse all LIs with the class "status", store them into a resulting \& # array \*(Aqtweets\*(Aq. We embed another scraper for each tweet. \& process "li.status", "tweets[]" => scraper { \& # And, in that array, pull in the elementy with the class \& # "entry\-content", "entry\-date" and the link \& process ".entry\-content", body => \*(AqTEXT\*(Aq; \& process ".entry\-date", when => \*(AqTEXT\*(Aq; \& process \*(Aqa[rel="bookmark"]\*(Aq, link => \*(Aq@href\*(Aq; \& }; \& }; \& \& my $res = $tweets\->scrape( URI\->new("http://twitter.com/miyagawa") ); \& \& # The result has the populated tweets array \& for my $tweet (@{$res\->{tweets}}) { \& print "$tweet\->{body} $tweet\->{when} (link: $tweet\->{link})\en"; \& } .Ve .PP The structure would resemble this (visually) { tweets => [ { body => \f(CW$body\fR, when => \f(CW$date\fR, link => \f(CW$uri\fR }, { body => \f(CW$body\fR, when => \f(CW$date\fR, link => \f(CW$uri\fR }, ] } .SH "DESCRIPTION" .IX Header "DESCRIPTION" Web::Scraper is a web scraper toolkit, inspired by Ruby's equivalent Scrapi. It provides a DSL-ish interface for traversing \s-1HTML\s0 documents and returning a neatly arranged Perl data strcuture. .PP The \fIscraper\fR and \fIprocess\fR blocks provide a method to define what segments of a document to extract. It understands \s-1HTML\s0 and \s-1CSS\s0 Selectors as well as XPath expressions. .SH "METHODS" .IX Header "METHODS" .SS "scraper" .IX Subsection "scraper" .Vb 1 \& $scraper = scraper { ... }; .Ve .PP Creates a new Web::Scraper object by wrapping the \s-1DSL\s0 code that will be fired when \fIscrape\fR method is called. .SS "scrape" .IX Subsection "scrape" .Vb 5 \& $res = $scraper\->scrape(URI\->new($uri)); \& $res = $scraper\->scrape($html_content); \& $res = $scraper\->scrape(\e$html_content); \& $res = $scraper\->scrape($http_response); \& $res = $scraper\->scrape($html_element); .Ve .PP Retrieves the \s-1HTML\s0 from \s-1URI\s0, HTTP::Response, HTML::Tree or text strings and creates a \s-1DOM\s0 object, then fires the callback scraper code to retrieve the data structure. .PP If you pass \s-1URI\s0 or HTTP::Response object, Web::Scraper will automatically guesses the encoding of the content by looking at Content-Type headers and \s-1META\s0 tags. Otherwise you need to decode the \&\s-1HTML\s0 to Unicode before passing it to \fIscrape\fR method. .PP You can optionally pass the base \s-1URL\s0 when you pass the \s-1HTML\s0 content as a string instead of \s-1URI\s0 or HTTP::Response. .PP .Vb 1 \& $res = $scraper\->scrape($html_content, "http://example.com/foo"); .Ve .PP This way Web::Scraper can resolve the relative links found in the document. .SS "process" .IX Subsection "process" .Vb 5 \& scraper { \& process "tag.class", key => \*(AqTEXT\*(Aq; \& process \*(Aq//tag[contains(@foo, "bar")]\*(Aq, key2 => \*(Aq@attr\*(Aq; \& process \*(Aq//comment()\*(Aq, \*(Aqcomments[]\*(Aq => \*(AqTEXT\*(Aq; \& }; .Ve .PP \&\fIprocess\fR is the method to find matching elements from \s-1HTML\s0 with \s-1CSS\s0 selector or XPath expression, then extract text or attributes into the result stash. .PP If the first argument begins with \*(L"//\*(R" or \*(L"id(\*(R" it's treated as an XPath expression and otherwise \s-1CSS\s0 selector. .PP .Vb 3 \& # 2008/12/21 \& # date => "2008/12/21" \& process ".date", date => \*(AqTEXT\*(Aq; \& \& #
foo
\& # link => URI\->new("http://example.com/") \& process ".body > a", link => \*(Aq@href\*(Aq; \& \& #
foo
\& # comment => " HTML Comment here " \& # \& # NOTES: A comment nodes are accessed when installed \& # the HTML::TreeBuilder::XPath (version >= 0.14) and/or \& # the HTML::TreeBuilder::LibXML (version >= 0.13) \& process "//div[contains(@class, \*(Aqbody\*(Aq)]/comment()", comment => \*(AqTEXT\*(Aq; \& \& #
foo
\& # link => URI\->new("http://example.com/"), text => "foo" \& process ".body > a", link => \*(Aq@href\*(Aq, text => \*(AqTEXT\*(Aq; \& \& # \& # list => [ "foo", "bar" ] \& process "li", "list[]" => "TEXT"; \& \& # \& # list => [ { id => "1", text => "foo" }, { id => "2", text => "bar" } ]; \& process "li", "list[]" => { id => \*(Aq@id\*(Aq, text => "TEXT" }; .Ve .SH "EXAMPLES" .IX Header "EXAMPLES" There are many examples in the \f(CW\*(C`eg/\*(C'\fR dir packaged in this distribution. It is recommended to look through these. .SH "NESTED SCRAPERS" .IX Header "NESTED SCRAPERS" \&\s-1TBD\s0 .SH "FILTERS" .IX Header "FILTERS" \&\s-1TBD\s0 .SH "AUTHOR" .IX Header "AUTHOR" Tatsuhiko Miyagawa .SH "LICENSE" .IX Header "LICENSE" This library is free software; you can redistribute it and/or modify it under the same terms as Perl itself. .SH "SEE ALSO" .IX Header "SEE ALSO" .PP HTML::TreeBuilder::XPath