You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-user@hadoop.apache.org by jibjoice <su...@hotmail.com> on 2007/12/14 01:43:27 UTC

Nutch crawl problem

i use nutch-0.9, hadoop-0.12.2 and i use this command "bin/nutch crawl
urls -dir crawled -depth 3" have error :

- crawl started in: crawled
- rootUrlDir = input
- threads = 10
- depth = 3
- Injector: starting
- Injector: crawlDb: crawled/crawldb
- Injector: urlDir: input
- Injector: Converting injected urls to crawl db entries.
- Total input paths to process : 1
- Running job: job_0001
- map 0% reduce 0%
- map 100% reduce 0%
- map 100% reduce 100%
- Job complete: job_0001
- Counters: 6
- Map-Reduce Framework
- Map input records=3
- Map output records=1
- Map input bytes=22
- Map output bytes=52
- Reduce input records=1
- Reduce output records=1
- Injector: Merging injected urls into crawl db.
- Total input paths to process : 2
- Running job: job_0002
- map 0% reduce 0%
- map 100% reduce 0%
- map 100% reduce 58%
- map 100% reduce 100%
- Job complete: job_0002
- Counters: 6
- Map-Reduce Framework
- Map input records=3
- Map output records=1
- Map input bytes=60
- Map output bytes=52
- Reduce input records=1
- Reduce output records=1
- Injector: done
- Generator: Selecting best-scoring urls due for fetch.
- Generator: starting
- Generator: segment: crawled/segments/25501213164325
- Generator: filtering: false
- Generator: topN: 2147483647
- Total input paths to process : 2
- Running job: job_0003
- map 0% reduce 0%
- map 100% reduce 0%
- map 100% reduce 100%
- Job complete: job_0003
- Counters: 6
- Map-Reduce Framework
- Map input records=3
- Map output records=1
- Map input bytes=59
- Map output bytes=77
- Reduce input records=1
- Reduce output records=1
- Generator: 0 records selected for fetching, exiting ...
- Stopping at depth=0 - no more URLs to fetch.
- No URLs to fetch - check your seed list and URL filters.
- crawl finished: crawled

but sometime i crawl some url it has error indexes time that

- Indexer: done
- Dedup: starting
- Dedup: adding indexes in: crawled/indexes
- Total input paths to process : 2
- Running job: job_0025
- map 0% reduce 0%
- Task Id : task_0025_m_000001_0, Status : FAILED
task_0025_m_000001_0: - Error running child
task_0025_m_000001_0: java.lang.ArrayIndexOutOfBoundsException: -1
task_0025_m_000001_0: at
org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
task_0025_m_000001_0: at
org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
r.next(DeleteDuplicates.java:176)
task_0025_m_000001_0: at
org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
task_0025_m_000001_0: at
org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
task_0025_m_000001_0: at org.apache.hadoop.mapred.MapTask.run
(MapTask.java:175)
task_0025_m_000001_0: at
org.apache.hadoop.mapred.TaskTracker$Child.main
(TaskTracker.java:1445)
- Task Id : task_0025_m_000000_0, Status : FAILED
task_0025_m_000000_0: - Error running child
task_0025_m_000000_0: java.lang.ArrayIndexOutOfBoundsException: -1
task_0025_m_000000_0: at
org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
task_0025_m_000000_0: at
org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
r.next(DeleteDuplicates.java:176)
task_0025_m_000000_0: at
org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
task_0025_m_000000_0: at
org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
task_0025_m_000000_0: at org.apache.hadoop.mapred.MapTask.run
(MapTask.java:175)
task_0025_m_000000_0: at
org.apache.hadoop.mapred.TaskTracker$Child.main
(TaskTracker.java:1445)
- Task Id : task_0025_m_000000_1, Status : FAILED
task_0025_m_000000_1: - Error running child
task_0025_m_000000_1: java.lang.ArrayIndexOutOfBoundsException: -1
task_0025_m_000000_1: at
org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
task_0025_m_000000_1: at
org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
r.next(DeleteDuplicates.java:176)
task_0025_m_000000_1: at
org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
task_0025_m_000000_1: at
org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
task_0025_m_000000_1: at org.apache.hadoop.mapred.MapTask.run
(MapTask.java:175)
task_0025_m_000000_1: at
org.apache.hadoop.mapred.TaskTracker$Child.main
(TaskTracker.java:1445)
- Task Id : task_0025_m_000001_1, Status : FAILED
task_0025_m_000001_1: - Error running child
task_0025_m_000001_1: java.lang.ArrayIndexOutOfBoundsException: -1
task_0025_m_000001_1: at
org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
task_0025_m_000001_1: at
org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
r.next(DeleteDuplicates.java:176)
task_0025_m_000001_1: at
org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
task_0025_m_000001_1: at
org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
task_0025_m_000001_1: at org.apache.hadoop.mapred.MapTask.run
(MapTask.java:175)
task_0025_m_000001_1: at
org.apache.hadoop.mapred.TaskTracker$Child.main
(TaskTracker.java:1445)
- Task Id : task_0025_m_000001_2, Status : FAILED
task_0025_m_000001_2: - Error running child
task_0025_m_000001_2: java.lang.ArrayIndexOutOfBoundsException: -1
task_0025_m_000001_2: at
org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
task_0025_m_000001_2: at
org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
r.next(DeleteDuplicates.java:176)
task_0025_m_000001_2: at
org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
task_0025_m_000001_2: at
org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
task_0025_m_000001_2: at org.apache.hadoop.mapred.MapTask.run
(MapTask.java:175)
task_0025_m_000001_2: at
org.apache.hadoop.mapred.TaskTracker$Child.main
(TaskTracker.java:1445)
- Task Id : task_0025_m_000000_2, Status : FAILED
task_0025_m_000000_2: - Error running child
task_0025_m_000000_2: java.lang.ArrayIndexOutOfBoundsException: -1
task_0025_m_000000_2: at
org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
task_0025_m_000000_2: at
org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
r.next(DeleteDuplicates.java:176)
task_0025_m_000000_2: at
org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
task_0025_m_000000_2: at
org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
task_0025_m_000000_2: at org.apache.hadoop.mapred.MapTask.run
(MapTask.java:175)
task_0025_m_000000_2: at
org.apache.hadoop.mapred.TaskTracker$Child.main
(TaskTracker.java:1445)
- map 100% reduce 100%
- Task Id : task_0025_m_000001_3, Status : FAILED
task_0025_m_000001_3: - Error running child
task_0025_m_000001_3: java.lang.ArrayIndexOutOfBoundsException: -1
task_0025_m_000001_3: at
org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
task_0025_m_000001_3: at
org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
r.next(DeleteDuplicates.java:176)
task_0025_m_000001_3: at
org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
task_0025_m_000001_3: at
org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
task_0025_m_000001_3: at org.apache.hadoop.mapred.MapTask.run
(MapTask.java:175)
task_0025_m_000001_3: at
org.apache.hadoop.mapred.TaskTracker$Child.main
(TaskTracker.java:1445)
- Task Id : task_0025_m_000000_3, Status : FAILED
task_0025_m_000000_3: - Error running child
task_0025_m_000000_3: java.lang.ArrayIndexOutOfBoundsException: -1
task_0025_m_000000_3: at
org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
task_0025_m_000000_3: at
org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
r.next(DeleteDuplicates.java:176)
task_0025_m_000000_3: at
org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
task_0025_m_000000_3: at
org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
task_0025_m_000000_3: at org.apache.hadoop.mapred.MapTask.run
(MapTask.java:175)
task_0025_m_000000_3: at
org.apache.hadoop.mapred.TaskTracker$Child.main
(TaskTracker.java:1445)
Exception in thread "main" java.io.IOException: Job failed!
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:604)
at org.apache.nutch.indexer.DeleteDuplicates.dedup
(DeleteDuplicates.java:439)
at org.apache.nutch.crawl.Crawl.main(Crawl.java:135)

how i solve it?

-- 
View this message in context: http://www.nabble.com/Nutch-crawl-problem-tp14327978p14327978.html
Sent from the Hadoop Users mailing list archive at Nabble.com.


Re: Nutch crawl problem

Posted by jibjoice <su...@hotmail.com>.
i crawl "http://lucene.apache.org" and in conf/crawl-urlfilter.txt i set that
"+^http://([a-z0-9]*\.)*apache.org/" when i use command "bin/nutch crawl
urls -dir crawled -depth 3" have error that

- crawl started in: crawled
- rootUrlDir = urls
- threads = 10
- depth = 3
- Injector: starting
- Injector: crawlDb: crawled/crawldb
- Injector: urlDir: urls
- Injector: Converting injected urls to crawl db entries.
Exception in thread "main" org.apache.hadoop.mapred.InvalidInputException:
Input path doesnt exist : /user/nutch/urls
        at
org.apache.hadoop.mapred.InputFormatBase.validateInput(InputFormatBase.java:138)
        at org.apache.hadoop.mapred.JobClient.submitJob(JobClient.java:326)
        at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:543)
        at org.apache.nutch.crawl.Injector.inject(Injector.java:162)
        at org.apache.nutch.crawl.Crawl.main(Crawl.java:115)
-bash-3.1$ bin/nutch crawl inputs -dir crawled -depth 3
- crawl started in: crawled
- rootUrlDir = inputs
- threads = 10
- depth = 3
- Injector: starting
- Injector: crawlDb: crawled/crawldb
- Injector: urlDir: inputs
- Injector: Converting injected urls to crawl db entries.
- Total input paths to process : 1
- Running job: job_0001
-  map 0% reduce 0%
-  map 100% reduce 0%
-  map 100% reduce 16%
-  map 100% reduce 58%
-  map 100% reduce 100%
- Job complete: job_0001
- Counters: 6
-   Map-Reduce Framework
-     Map input records=3
-     Map output records=1
-     Map input bytes=25
-     Map output bytes=55
-     Reduce input records=1
-     Reduce output records=1
- Injector: Merging injected urls into crawl db.
- Total input paths to process : 2
- Running job: job_0002
-  map 0% reduce 0%
- Task Id : task_0002_m_000000_0, Status : FAILED
task_0002_m_000000_0: - Plugins: looking in: /nutch/search/build/plugins
task_0002_m_000000_0: - Plugin Auto-activation mode: [true]
task_0002_m_000000_0: - Registered Plugins:
task_0002_m_000000_0: -         the nutch core extension points
(nutch-extensionpoints)
task_0002_m_000000_0: -         Basic Query Filter (query-basic)
task_0002_m_000000_0: -         Basic URL Normalizer (urlnormalizer-basic)
task_0002_m_000000_0: -         Basic Indexing Filter (index-basic)
task_0002_m_000000_0: -         Html Parse Plug-in (parse-html)
task_0002_m_000000_0: -         Basic Summarizer Plug-in (summary-basic)
task_0002_m_000000_0: -         Site Query Filter (query-site)
task_0002_m_000000_0: -         HTTP Framework (lib-http)
task_0002_m_000000_0: -         Text Parse Plug-in (parse-text)
task_0002_m_000000_0: -         Regex URL Filter (urlfilter-regex)
task_0002_m_000000_0: -         Pass-through URL Normalizer
(urlnormalizer-pass)
task_0002_m_000000_0: -         Http Protocol Plug-in (protocol-http)
task_0002_m_000000_0: -         Regex URL Normalizer (urlnormalizer-regex)
task_0002_m_000000_0: -         OPIC Scoring Plug-in (scoring-opic)
task_0002_m_000000_0: -         CyberNeko HTML Parser (lib-nekohtml)
task_0002_m_000000_0: -         JavaScript Parser (parse-js)
task_0002_m_000000_0: -         URL Query Filter (query-url)
task_0002_m_000000_0: -         Regex URL Filter Framework
(lib-regex-filter)
task_0002_m_000000_0: - Registered Extension-Points:
task_0002_m_000000_0: -         Nutch Summarizer
(org.apache.nutch.searcher.Summarizer)
task_0002_m_000000_0: -         Nutch URL Normalizer
(org.apache.nutch.net.URLNormalizer)
task_0002_m_000000_0: -         Nutch Protocol
(org.apache.nutch.protocol.Protocol)
task_0002_m_000000_0: -         Nutch Analysis
(org.apache.nutch.analysis.NutchAnalyzer)
task_0002_m_000000_0: -         Nutch URL Filter
(org.apache.nutch.net.URLFilter)
task_0002_m_000000_0: -         Nutch Indexing Filter
(org.apache.nutch.indexer.IndexingFilter)
task_0002_m_000000_0: -         Nutch Online Search Results Clustering
Plugin (org.apache.nutch.clustering.OnlineClusterer)
task_0002_m_000000_0: -         HTML Parse Filter
(org.apache.nutch.parse.HtmlParseFilter)
task_0002_m_000000_0: -         Nutch Content Parser
(org.apache.nutch.parse.Parser)
task_0002_m_000000_0: -         Nutch Scoring
(org.apache.nutch.scoring.ScoringFilter)
task_0002_m_000000_0: -         Nutch Query Filter
(org.apache.nutch.searcher.QueryFilter)
task_0002_m_000000_0: -         Ontology Model Loader
(org.apache.nutch.ontology.Ontology)
task_0002_m_000000_0: - found resource crawl-urlfilter.txt at
file:/nutch/search/conf/crawl-urlfilter.txt
-  map 50% reduce 0%
-  map 100% reduce 0%
-  map 100% reduce 8%
-  map 100% reduce 25%
-  map 100% reduce 58%
-  map 100% reduce 100%
- Job complete: job_0002
- Counters: 6
-   Map-Reduce Framework
-     Map input records=3
-     Map output records=1
-     Map input bytes=63
-     Map output bytes=55
-     Reduce input records=1
-     Reduce output records=1
- Injector: done
- Generator: Selecting best-scoring urls due for fetch.
- Generator: starting
- Generator: segment: crawled/segments/25510102165746
- Generator: filtering: false
- Generator: topN: 2147483647
- Total input paths to process : 2
- Running job: job_0003
-  map 0% reduce 0%
-  map 50% reduce 0%
-  map 100% reduce 0%
-  map 100% reduce 8%
-  map 100% reduce 16%
-  map 100% reduce 58%
-  map 100% reduce 100%
- Job complete: job_0003
- Counters: 6
-   Map-Reduce Framework
-     Map input records=3
-     Map output records=1
-     Map input bytes=62
-     Map output bytes=80
-     Reduce input records=1
-     Reduce output records=1
- Generator: Partitioning selected urls by host, for politeness.
- Total input paths to process : 2
- Running job: job_0004
-  map 0% reduce 0%
-  map 50% reduce 0%
-  map 100% reduce 0%
- Task Id : task_0004_r_000000_0, Status : FAILED
- Task Id : task_0004_r_000001_0, Status : FAILED
-  map 100% reduce 8%
-  map 100% reduce 0%
- Task Id : task_0004_r_000000_1, Status : FAILED
- Task Id : task_0004_r_000001_1, Status : FAILED
-  map 100% reduce 8%
-  map 100% reduce 0%
- Task Id : task_0004_r_000000_2, Status : FAILED

now i use hadoop-0.12.2, nutch-0.9 and java jdk1.6.0. Why? i can't solve it
1 month ago.
-- 
View this message in context: http://www.nabble.com/Nutch-crawl-problem-tp14327978p14575918.html
Sent from the Hadoop Users mailing list archive at Nabble.com.


Re: Nutch crawl problem

Posted by jibjoice <su...@hotmail.com>.
why i can crawl http://game.search.com but i can't crawl
http://www.search.com? conf/crawl-urlfilter is

# skip file:, ftp:, & mailto: urls
-^(file|ftp|mailto):

# skip image and other suffixes we can't yet parse
#-\.(png|PNG|ico|ICO|css|sit|eps|wmf|zip|mpg|gz|rpm|tgz|mov|MOV|exe|bmp|BMP)$

# skip URLs containing certain characters as probable queries, etc.
-[?*!@=]

# skip URLs with slash-delimited segment that repeats 3+ times, to break
loops
-.*(/.+?)/.*?\1/.*?\1/

# accept hosts in MY.DOMAIN.NAME
#+^http://([a-z0-9]*\.)*search.com/

# skip everything else
+.
 
and some host i can't crawl because have error "Generator: 0 records
selected for fetching, exiting ..." i set the same config for all host.why?
-- 
View this message in context: http://www.nabble.com/Nutch-crawl-problem-tp14327978p14657080.html
Sent from the Hadoop Users mailing list archive at Nabble.com.


Re: Nutch crawl problem

Posted by jibjoice <su...@hotmail.com>.
i crawl "http://lucene.apache.org" and in conf/crawl-urlfilter.txt i set that
"+^http://([a-z0-9]*\.)*apache.org/" when i use command "bin/nutch crawl
urls -dir crawled -depth 3" have error that 

- crawl started in: crawled 
- rootUrlDir = urls 
- threads = 10 
- depth = 3 
- Injector: starting 
- Injector: crawlDb: crawled/crawldb 
- Injector: urlDir: urls 
- Injector: Converting injected urls to crawl db entries. 
Exception in thread "main" org.apache.hadoop.mapred.InvalidInputException:
Input path doesnt exist : /user/nutch/urls 
        at
org.apache.hadoop.mapred.InputFormatBase.validateInput(InputFormatBase.java:138) 
        at org.apache.hadoop.mapred.JobClient.submitJob(JobClient.java:326) 
        at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:543) 
        at org.apache.nutch.crawl.Injector.inject(Injector.java:162) 
        at org.apache.nutch.crawl.Crawl.main(Crawl.java:115) 
-bash-3.1$ bin/nutch crawl inputs -dir crawled -depth 3 
- crawl started in: crawled 
- rootUrlDir = inputs 
- threads = 10 
- depth = 3 
- Injector: starting 
- Injector: crawlDb: crawled/crawldb 
- Injector: urlDir: inputs 
- Injector: Converting injected urls to crawl db entries. 
- Total input paths to process : 1 
- Running job: job_0001 
-  map 0% reduce 0% 
-  map 100% reduce 0% 
-  map 100% reduce 16% 
-  map 100% reduce 58% 
-  map 100% reduce 100% 
- Job complete: job_0001 
- Counters: 6 
-   Map-Reduce Framework 
-     Map input records=3 
-     Map output records=1 
-     Map input bytes=25 
-     Map output bytes=55 
-     Reduce input records=1 
-     Reduce output records=1 
- Injector: Merging injected urls into crawl db. 
- Total input paths to process : 2 
- Running job: job_0002 
-  map 0% reduce 0% 
- Task Id : task_0002_m_000000_0, Status : FAILED 
task_0002_m_000000_0: - Plugins: looking in: /nutch/search/build/plugins 
task_0002_m_000000_0: - Plugin Auto-activation mode: [true] 
task_0002_m_000000_0: - Registered Plugins: 
task_0002_m_000000_0: -         the nutch core extension points
(nutch-extensionpoints) 
task_0002_m_000000_0: -         Basic Query Filter (query-basic) 
task_0002_m_000000_0: -         Basic URL Normalizer (urlnormalizer-basic) 
task_0002_m_000000_0: -         Basic Indexing Filter (index-basic) 
task_0002_m_000000_0: -         Html Parse Plug-in (parse-html) 
task_0002_m_000000_0: -         Basic Summarizer Plug-in (summary-basic) 
task_0002_m_000000_0: -         Site Query Filter (query-site) 
task_0002_m_000000_0: -         HTTP Framework (lib-http) 
task_0002_m_000000_0: -         Text Parse Plug-in (parse-text) 
task_0002_m_000000_0: -         Regex URL Filter (urlfilter-regex) 
task_0002_m_000000_0: -         Pass-through URL Normalizer
(urlnormalizer-pass) 
task_0002_m_000000_0: -         Http Protocol Plug-in (protocol-http) 
task_0002_m_000000_0: -         Regex URL Normalizer (urlnormalizer-regex) 
task_0002_m_000000_0: -         OPIC Scoring Plug-in (scoring-opic) 
task_0002_m_000000_0: -         CyberNeko HTML Parser (lib-nekohtml) 
task_0002_m_000000_0: -         JavaScript Parser (parse-js) 
task_0002_m_000000_0: -         URL Query Filter (query-url) 
task_0002_m_000000_0: -         Regex URL Filter Framework
(lib-regex-filter) 
task_0002_m_000000_0: - Registered Extension-Points: 
task_0002_m_000000_0: -         Nutch Summarizer
(org.apache.nutch.searcher.Summarizer) 
task_0002_m_000000_0: -         Nutch URL Normalizer
(org.apache.nutch.net.URLNormalizer) 
task_0002_m_000000_0: -         Nutch Protocol
(org.apache.nutch.protocol.Protocol) 
task_0002_m_000000_0: -         Nutch Analysis
(org.apache.nutch.analysis.NutchAnalyzer) 
task_0002_m_000000_0: -         Nutch URL Filter
(org.apache.nutch.net.URLFilter) 
task_0002_m_000000_0: -         Nutch Indexing Filter
(org.apache.nutch.indexer.IndexingFilter) 
task_0002_m_000000_0: -         Nutch Online Search Results Clustering
Plugin (org.apache.nutch.clustering.OnlineClusterer) 
task_0002_m_000000_0: -         HTML Parse Filter
(org.apache.nutch.parse.HtmlParseFilter) 
task_0002_m_000000_0: -         Nutch Content Parser
(org.apache.nutch.parse.Parser) 
task_0002_m_000000_0: -         Nutch Scoring
(org.apache.nutch.scoring.ScoringFilter) 
task_0002_m_000000_0: -         Nutch Query Filter
(org.apache.nutch.searcher.QueryFilter) 
task_0002_m_000000_0: -         Ontology Model Loader
(org.apache.nutch.ontology.Ontology) 
task_0002_m_000000_0: - found resource crawl-urlfilter.txt at
file:/nutch/search/conf/crawl-urlfilter.txt 
-  map 50% reduce 0% 
-  map 100% reduce 0% 
-  map 100% reduce 8% 
-  map 100% reduce 25% 
-  map 100% reduce 58% 
-  map 100% reduce 100% 
- Job complete: job_0002 
- Counters: 6 
-   Map-Reduce Framework 
-     Map input records=3 
-     Map output records=1 
-     Map input bytes=63 
-     Map output bytes=55 
-     Reduce input records=1 
-     Reduce output records=1 
- Injector: done 
- Generator: Selecting best-scoring urls due for fetch. 
- Generator: starting 
- Generator: segment: crawled/segments/25510102165746 
- Generator: filtering: false 
- Generator: topN: 2147483647 
- Total input paths to process : 2 
- Running job: job_0003 
-  map 0% reduce 0% 
-  map 50% reduce 0% 
-  map 100% reduce 0% 
-  map 100% reduce 8% 
-  map 100% reduce 16% 
-  map 100% reduce 58% 
-  map 100% reduce 100% 
- Job complete: job_0003 
- Counters: 6 
-   Map-Reduce Framework 
-     Map input records=3 
-     Map output records=1 
-     Map input bytes=62 
-     Map output bytes=80 
-     Reduce input records=1 
-     Reduce output records=1 
- Generator: Partitioning selected urls by host, for politeness. 
- Total input paths to process : 2 
- Running job: job_0004 
-  map 0% reduce 0% 
-  map 50% reduce 0% 
-  map 100% reduce 0% 
- Task Id : task_0004_r_000000_0, Status : FAILED 
- Task Id : task_0004_r_000001_0, Status : FAILED 
-  map 100% reduce 8% 
-  map 100% reduce 0% 
- Task Id : task_0004_r_000000_1, Status : FAILED 
- Task Id : task_0004_r_000001_1, Status : FAILED 
-  map 100% reduce 8% 
-  map 100% reduce 0% 
- Task Id : task_0004_r_000000_2, Status : FAILED 

now i use hadoop-0.12.2, nutch-0.9 and java jdk1.6.0. Why? i can't solve it
1 month ago.

-- 
View this message in context: http://www.nabble.com/Nutch-crawl-problem-tp14327978p14589912.html
Sent from the Hadoop Users mailing list archive at Nabble.com.


Re: Nutch crawl problem

Posted by pvvpr <pv...@research.iiit.ac.in>.
since it is complaining about logging, check if the path log4j is trying
to access is valid and the hadoop user has permissions to access it.

>
> i can't solve it now
>
> jibjoice wrote:
>>
>> i follow this link "http://wiki.apache.org/nutch/NutchHadoopTutorial" so
>> i
>> think it's not about the conf/crawl-urlfilter.txt file when i use this
>> command "bin/nutch crawl urls -dir crawled -depth 3" again it shows :
>> Generator: Selecting best-scoring urls due for fetch.
>> Generator: starting
>> Generator: segment: crawled/segments/25501221110712
>> Generator: filtering: false
>> Generator: topN: 2147483647
>> Generator: Partitioning selected urls by host, for politeness.
>> Generator: done.
>> Fetcher: starting
>> Fetcher: segment: crawled/segments/25501221110712
>> Fetcher: done
>> CrawlDb update: starting
>> CrawlDb update: db: crawled/crawldb
>> CrawlDb update: segments: [crawled/segments/25501221110712]
>> CrawlDb update: additions allowed: true
>> CrawlDb update: URL normalizing: true
>> CrawlDb update: URL filtering: true
>> CrawlDb update: Merging segment data into db.
>> CrawlDb update: done
>> Generator: Selecting best-scoring urls due for fetch.
>> Generator: starting
>> Generator: segment: crawled/segments/25501221110908
>> Generator: filtering: false
>> Generator: topN: 2147483647
>> Generator: Partitioning selected urls by host, for politeness.
>> Generator: done.
>> Fetcher: starting
>> Fetcher: segment: crawled/segments/25501221110908
>> Fetcher: done
>> CrawlDb update: starting
>> CrawlDb update: db: crawled/crawldb
>> CrawlDb update: segments: [crawled/segments/25501221110908]
>> CrawlDb update: additions allowed: true
>> CrawlDb update: URL normalizing: true
>> CrawlDb update: URL filtering: true
>> CrawlDb update: Merging segment data into db.
>> CrawlDb update: done
>> LinkDb: starting
>> LinkDb: linkdb: crawled/linkdb
>> LinkDb: URL normalize: true
>> LinkDb: URL filter: true
>> LinkDb: adding segment: /user/nutch/crawled/segments/25501221110519
>> LinkDb: adding segment: /user/nutch/crawled/segments/25501221110712
>> LinkDb: adding segment: /user/nutch/crawled/segments/25501221110908
>> LinkDb: done
>> Indexer: starting
>> Indexer: linkdb: crawled/linkdb
>> Indexer: adding segment: /user/nutch/crawled/segments/25501221110519
>> Indexer: adding segment: /user/nutch/crawled/segments/25501221110712
>> Indexer: adding segment: /user/nutch/crawled/segments/25501221110908
>> Indexer: done
>> Dedup: starting
>> Dedup: adding indexes in: crawled/indexes
>> task_0017_m_000000_0: log4j:ERROR Either File or DatePattern options are
>> not set for appender [DRFA].
>> task_0017_m_000001_0: log4j:ERROR setFile(null,true) call failed.
>> task_0017_m_000001_0: java.io.FileNotFoundException: /nutch/search/logs
>> (Is a directory)
>> task_0017_m_000001_0:   at java.io.FileOutputStream.openAppend(Native
>> Method)
>> task_0017_m_000001_0:   at
>> java.io.FileOutputStream.<init>(FileOutputStream.java:177)
>> task_0017_m_000001_0:   at
>> java.io.FileOutputStream.<init>(FileOutputStream.java:102)
>> task_0017_m_000001_0:   at
>> org.apache.log4j.FileAppender.setFile(FileAppender.java:289)
>> task_0017_m_000001_0:   at
>> org.apache.log4j.FileAppender.activateOptions(FileAppender.java:163)
>> task_0017_m_000001_0:   at
>> org.apache.log4j.DailyRollingFileAppender.activateOptions(DailyRollingFileAppender.java:215)
>> task_0017_m_000001_0:   at
>> org.apache.log4j.config.PropertySetter.activate(PropertySetter.java:256)
>> task_0017_m_000001_0:   at
>> org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:132)
>> task_0017_m_000001_0:   at
>> org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:96)
>> task_0017_m_000001_0:   at
>> org.apache.log4j.PropertyConfigurator.parseAppender(PropertyConfigurator.java:654)
>> task_0017_m_000001_0:   at
>> org.apache.log4j.PropertyConfigurator.parseCategory(PropertyConfigurator.java:612)
>> task_0017_m_000001_0:   at
>> org.apache.log4j.PropertyConfigurator.configureRootCategory(PropertyConfigurator.java:509)
>> task_0017_m_000001_0:   at
>> org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:415)
>> task_0017_m_000001_0:   at
>> org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:441)
>> task_0017_m_000001_0:   at
>> org.apache.log4j.helpers.OptionConverter.selectAndConfigure(OptionConverter.java:468)
>> task_0017_m_000001_0:   at
>> org.apache.log4j.LogManager.<clinit>(LogManager.java:122)
>> task_0017_m_000001_0:   at
>> org.apache.log4j.Logger.getLogger(Logger.java:104)
>> task_0017_m_000001_0:   at
>> org.apache.commons.logging.impl.Log4JLogger.getLogger(Log4JLogger.java:229)
>> task_0017_m_000001_0:   at
>> org.apache.commons.logging.impl.Log4JLogger.<init>(Log4JLogger.java:65)
>> task_0017_m_000001_0:   at
>> sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
>> task_0017_m_000001_0:   at
>> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:39)
>> task_0017_m_000001_0:   at
>> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:27)
>> task_0017_m_000001_0:   at
>> java.lang.reflect.Constructor.newInstance(Constructor.java:513)
>> task_0017_m_000001_0:   at
>> org.apache.commons.logging.impl.LogFactoryImpl.newInstance(LogFactoryImpl.java:529)
>> task_0017_m_000001_0:   at
>> org.apache.commons.logging.impl.LogFactoryImpl.getInstance(LogFactoryImpl.java:235)
>> task_0017_m_000001_0:   at
>> org.apache.commons.logging.LogFactory.getLog(LogFactory.java:370)
>> task_0017_m_000001_0:   at
>> org.apache.hadoop.mapred.TaskTracker.<clinit>(TaskTracker.java:82)
>> task_0017_m_000001_0:   at
>> org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:1423)
>> task_0017_m_000001_0: log4j:ERROR Either File or DatePattern options are
>> not set for appender [DRFA].
>> task_0017_m_000000_1: log4j:ERROR setFile(null,true) call failed.
>> task_0017_m_000000_1: java.io.FileNotFoundException: /nutch/search/logs
>> (Is a directory)
>> task_0017_m_000000_1:   at java.io.FileOutputStream.openAppend(Native
>> Method)
>> task_0017_m_000000_1:   at
>> java.io.FileOutputStream.<init>(FileOutputStream.java:177)
>> task_0017_m_000000_1:   at
>> java.io.FileOutputStream.<init>(FileOutputStream.java:102)
>> task_0017_m_000000_1:   at
>> org.apache.log4j.FileAppender.setFile(FileAppender.java:289)
>> task_0017_m_000000_1:   at
>> org.apache.log4j.FileAppender.activateOptions(FileAppender.java:163)
>> task_0017_m_000000_1:   at
>> org.apache.log4j.DailyRollingFileAppender.activateOptions(DailyRollingFileAppender.java:215)
>> task_0017_m_000000_1:   at
>> org.apache.log4j.config.PropertySetter.activate(PropertySetter.java:256)
>> task_0017_m_000000_1:   at
>> org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:132)
>> task_0017_m_000000_1:   at
>> org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:96)
>> task_0017_m_000000_1:   at
>> org.apache.log4j.PropertyConfigurator.parseAppender(PropertyConfigurator.java:654)
>> task_0017_m_000000_1:   at
>> org.apache.log4j.PropertyConfigurator.parseCategory(PropertyConfigurator.java:612)
>> task_0017_m_000000_1:   at
>> org.apache.log4j.PropertyConfigurator.configureRootCategory(PropertyConfigurator.java:509)
>> task_0017_m_000000_1:   at
>> org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:415)
>> task_0017_m_000000_1:   at
>> org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:441)
>> task_0017_m_000000_1:   at
>> org.apache.log4j.helpers.OptionConverter.selectAndConfigure(OptionConverter.java:468)
>> task_0017_m_000000_1:   at
>> org.apache.log4j.LogManager.<clinit>(LogManager.java:122)
>> task_0017_m_000000_1:   at
>> org.apache.log4j.Logger.getLogger(Logger.java:104)
>> task_0017_m_000000_1:   at
>> org.apache.commons.logging.impl.Log4JLogger.getLogger(Log4JLogger.java:229)
>> task_0017_m_000000_1:   at
>> org.apache.commons.logging.impl.Log4JLogger.<init>(Log4JLogger.java:65)
>> task_0017_m_000000_1:   at
>> sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
>> task_0017_m_000000_1:   at
>> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:39)
>> task_0017_m_000000_1:   at
>> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:27)
>> task_0017_m_000000_1:   at
>> java.lang.reflect.Constructor.newInstance(Constructor.java:513)
>> task_0017_m_000000_1:   at
>> org.apache.commons.logging.impl.LogFactoryImpl.newInstance(LogFactoryImpl.java:529)
>> task_0017_m_000000_1:   at
>> org.apache.commons.logging.impl.LogFactoryImpl.getInstance(LogFactoryImpl.java:235)
>> task_0017_m_000000_3:   at
>> org.apache.commons.logging.LogFactory.getLog(LogFactory.java:370)
>> task_0017_m_000000_3:   at
>> org.apache.hadoop.mapred.TaskTracker.<clinit>(TaskTracker.java:82)
>> task_0017_m_000000_3:   at
>> org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:1423)
>> task_0017_m_000000_3: log4j:ERROR Either File or DatePattern options are
>> not set for appender [DRFA].
>> Exception in thread "main" java.io.IOException: Job failed!
>>         at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:604)
>>         at
>> org.apache.nutch.indexer.DeleteDuplicates.dedup(DeleteDuplicates.java:439)
>>         at org.apache.nutch.crawl.Crawl.main(Crawl.java:135)
>>
>> i don't know what happen?
>>
>>
>> pvvpr wrote:
>>>
>>> I think you need to check the conf/crawl-urlfilter.txt file
>>>
>>> On Thursday 20 December 2007 04:55, jibjoice wrote:
>>>> please, help me to solve it
>>>>
>>>> jibjoice wrote:
>>>> > where i should solve this? why it generated 0 records?
>>>> >
>>>> > pvvpr wrote:
>>>> >> basically your indexes are empty since no URLs were generated and
>>>> >> fetched. See
>>>> >> this,
>>>> >>
>>>> >>> > - Generator: 0 records selected for fetching, exiting ...
>>>> >>> > - Stopping at depth=0 - no more URLs to fetch.
>>>> >>> > - No URLs to fetch - check your seed list and URL filters.
>>>> >>> > - crawl finished: crawled
>>>> >>
>>>> >> when no pages are indexed, dedup throws Exception
>>>> >>
>>>> >> On Tuesday 18 December 2007 21:33, jibjoice wrote:
>>>> >>> i can't solve it now, pls help me
>>>> >>>
>>>> >>> jibjoice wrote:
>>>> >>> > i use nutch-0.9, hadoop-0.12.2 and i use this command "bin/nutch
>>>> >>> > crawl urls -dir crawled -depth 3" have error :
>>>> >>> >
>>>> >>> > - crawl started in: crawled
>>>> >>> > - rootUrlDir = input
>>>> >>> > - threads = 10
>>>> >>> > - depth = 3
>>>> >>> > - Injector: starting
>>>> >>> > - Injector: crawlDb: crawled/crawldb
>>>> >>> > - Injector: urlDir: input
>>>> >>> > - Injector: Converting injected urls to crawl db entries.
>>>> >>> > - Total input paths to process : 1
>>>> >>> > - Running job: job_0001
>>>> >>> > - map 0% reduce 0%
>>>> >>> > - map 100% reduce 0%
>>>> >>> > - map 100% reduce 100%
>>>> >>> > - Job complete: job_0001
>>>> >>> > - Counters: 6
>>>> >>> > - Map-Reduce Framework
>>>> >>> > - Map input records=3
>>>> >>> > - Map output records=1
>>>> >>> > - Map input bytes=22
>>>> >>> > - Map output bytes=52
>>>> >>> > - Reduce input records=1
>>>> >>> > - Reduce output records=1
>>>> >>> > - Injector: Merging injected urls into crawl db.
>>>> >>> > - Total input paths to process : 2
>>>> >>> > - Running job: job_0002
>>>> >>> > - map 0% reduce 0%
>>>> >>> > - map 100% reduce 0%
>>>> >>> > - map 100% reduce 58%
>>>> >>> > - map 100% reduce 100%
>>>> >>> > - Job complete: job_0002
>>>> >>> > - Counters: 6
>>>> >>> > - Map-Reduce Framework
>>>> >>> > - Map input records=3
>>>> >>> > - Map output records=1
>>>> >>> > - Map input bytes=60
>>>> >>> > - Map output bytes=52
>>>> >>> > - Reduce input records=1
>>>> >>> > - Reduce output records=1
>>>> >>> > - Injector: done
>>>> >>> > - Generator: Selecting best-scoring urls due for fetch.
>>>> >>> > - Generator: starting
>>>> >>> > - Generator: segment: crawled/segments/25501213164325
>>>> >>> > - Generator: filtering: false
>>>> >>> > - Generator: topN: 2147483647
>>>> >>> > - Total input paths to process : 2
>>>> >>> > - Running job: job_0003
>>>> >>> > - map 0% reduce 0%
>>>> >>> > - map 100% reduce 0%
>>>> >>> > - map 100% reduce 100%
>>>> >>> > - Job complete: job_0003
>>>> >>> > - Counters: 6
>>>> >>> > - Map-Reduce Framework
>>>> >>> > - Map input records=3
>>>> >>> > - Map output records=1
>>>> >>> > - Map input bytes=59
>>>> >>> > - Map output bytes=77
>>>> >>> > - Reduce input records=1
>>>> >>> > - Reduce output records=1
>>>> >>> > - Generator: 0 records selected for fetching, exiting ...
>>>> >>> > - Stopping at depth=0 - no more URLs to fetch.
>>>> >>> > - No URLs to fetch - check your seed list and URL filters.
>>>> >>> > - crawl finished: crawled
>>>> >>> >
>>>> >>> > but sometime i crawl some url it has error indexes time that
>>>> >>> >
>>>> >>> > - Indexer: done
>>>> >>> > - Dedup: starting
>>>> >>> > - Dedup: adding indexes in: crawled/indexes
>>>> >>> > - Total input paths to process : 2
>>>> >>> > - Running job: job_0025
>>>> >>> > - map 0% reduce 0%
>>>> >>> > - Task Id : task_0025_m_000001_0, Status : FAILED
>>>> >>> > task_0025_m_000001_0: - Error running child
>>>> >>> > task_0025_m_000001_0: java.lang.ArrayIndexOutOfBoundsException:
>>>> -1
>>>> >>> > task_0025_m_000001_0: at
>>>> >>> >
>>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>>> >>> > task_0025_m_000001_0: at
>>>> >>> >
>>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>>> >>> > r.next(DeleteDuplicates.java:176)
>>>> >>> > task_0025_m_000001_0: at
>>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>>> >>> > task_0025_m_000001_0: at
>>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>>> >>> > task_0025_m_000001_0: at org.apache.hadoop.mapred.MapTask.run
>>>> >>> > (MapTask.java:175)
>>>> >>> > task_0025_m_000001_0: at
>>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>>> >>> > (TaskTracker.java:1445)
>>>> >>> > - Task Id : task_0025_m_000000_0, Status : FAILED
>>>> >>> > task_0025_m_000000_0: - Error running child
>>>> >>> > task_0025_m_000000_0: java.lang.ArrayIndexOutOfBoundsException:
>>>> -1
>>>> >>> > task_0025_m_000000_0: at
>>>> >>> >
>>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>>> >>> > task_0025_m_000000_0: at
>>>> >>> >
>>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>>> >>> > r.next(DeleteDuplicates.java:176)
>>>> >>> > task_0025_m_000000_0: at
>>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>>> >>> > task_0025_m_000000_0: at
>>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>>> >>> > task_0025_m_000000_0: at org.apache.hadoop.mapred.MapTask.run
>>>> >>> > (MapTask.java:175)
>>>> >>> > task_0025_m_000000_0: at
>>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>>> >>> > (TaskTracker.java:1445)
>>>> >>> > - Task Id : task_0025_m_000000_1, Status : FAILED
>>>> >>> > task_0025_m_000000_1: - Error running child
>>>> >>> > task_0025_m_000000_1: java.lang.ArrayIndexOutOfBoundsException:
>>>> -1
>>>> >>> > task_0025_m_000000_1: at
>>>> >>> >
>>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>>> >>> > task_0025_m_000000_1: at
>>>> >>> >
>>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>>> >>> > r.next(DeleteDuplicates.java:176)
>>>> >>> > task_0025_m_000000_1: at
>>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>>> >>> > task_0025_m_000000_1: at
>>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>>> >>> > task_0025_m_000000_1: at org.apache.hadoop.mapred.MapTask.run
>>>> >>> > (MapTask.java:175)
>>>> >>> > task_0025_m_000000_1: at
>>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>>> >>> > (TaskTracker.java:1445)
>>>> >>> > - Task Id : task_0025_m_000001_1, Status : FAILED
>>>> >>> > task_0025_m_000001_1: - Error running child
>>>> >>> > task_0025_m_000001_1: java.lang.ArrayIndexOutOfBoundsException:
>>>> -1
>>>> >>> > task_0025_m_000001_1: at
>>>> >>> >
>>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>>> >>> > task_0025_m_000001_1: at
>>>> >>> >
>>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>>> >>> > r.next(DeleteDuplicates.java:176)
>>>> >>> > task_0025_m_000001_1: at
>>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>>> >>> > task_0025_m_000001_1: at
>>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>>> >>> > task_0025_m_000001_1: at org.apache.hadoop.mapred.MapTask.run
>>>> >>> > (MapTask.java:175)
>>>> >>> > task_0025_m_000001_1: at
>>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>>> >>> > (TaskTracker.java:1445)
>>>> >>> > - Task Id : task_0025_m_000001_2, Status : FAILED
>>>> >>> > task_0025_m_000001_2: - Error running child
>>>> >>> > task_0025_m_000001_2: java.lang.ArrayIndexOutOfBoundsException:
>>>> -1
>>>> >>> > task_0025_m_000001_2: at
>>>> >>> >
>>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>>> >>> > task_0025_m_000001_2: at
>>>> >>> >
>>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>>> >>> > r.next(DeleteDuplicates.java:176)
>>>> >>> > task_0025_m_000001_2: at
>>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>>> >>> > task_0025_m_000001_2: at
>>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>>> >>> > task_0025_m_000001_2: at org.apache.hadoop.mapred.MapTask.run
>>>> >>> > (MapTask.java:175)
>>>> >>> > task_0025_m_000001_2: at
>>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>>> >>> > (TaskTracker.java:1445)
>>>> >>> > - Task Id : task_0025_m_000000_2, Status : FAILED
>>>> >>> > task_0025_m_000000_2: - Error running child
>>>> >>> > task_0025_m_000000_2: java.lang.ArrayIndexOutOfBoundsException:
>>>> -1
>>>> >>> > task_0025_m_000000_2: at
>>>> >>> >
>>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>>> >>> > task_0025_m_000000_2: at
>>>> >>> >
>>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>>> >>> > r.next(DeleteDuplicates.java:176)
>>>> >>> > task_0025_m_000000_2: at
>>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>>> >>> > task_0025_m_000000_2: at
>>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>>> >>> > task_0025_m_000000_2: at org.apache.hadoop.mapred.MapTask.run
>>>> >>> > (MapTask.java:175)
>>>> >>> > task_0025_m_000000_2: at
>>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>>> >>> > (TaskTracker.java:1445)
>>>> >>> > - map 100% reduce 100%
>>>> >>> > - Task Id : task_0025_m_000001_3, Status : FAILED
>>>> >>> > task_0025_m_000001_3: - Error running child
>>>> >>> > task_0025_m_000001_3: java.lang.ArrayIndexOutOfBoundsException:
>>>> -1
>>>> >>> > task_0025_m_000001_3: at
>>>> >>> >
>>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>>> >>> > task_0025_m_000001_3: at
>>>> >>> >
>>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>>> >>> > r.next(DeleteDuplicates.java:176)
>>>> >>> > task_0025_m_000001_3: at
>>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>>> >>> > task_0025_m_000001_3: at
>>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>>> >>> > task_0025_m_000001_3: at org.apache.hadoop.mapred.MapTask.run
>>>> >>> > (MapTask.java:175)
>>>> >>> > task_0025_m_000001_3: at
>>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>>> >>> > (TaskTracker.java:1445)
>>>> >>> > - Task Id : task_0025_m_000000_3, Status : FAILED
>>>> >>> > task_0025_m_000000_3: - Error running child
>>>> >>> > task_0025_m_000000_3: java.lang.ArrayIndexOutOfBoundsException:
>>>> -1
>>>> >>> > task_0025_m_000000_3: at
>>>> >>> >
>>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>>> >>> > task_0025_m_000000_3: at
>>>> >>> >
>>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>>> >>> > r.next(DeleteDuplicates.java:176)
>>>> >>> > task_0025_m_000000_3: at
>>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>>> >>> > task_0025_m_000000_3: at
>>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>>> >>> > task_0025_m_000000_3: at org.apache.hadoop.mapred.MapTask.run
>>>> >>> > (MapTask.java:175)
>>>> >>> > task_0025_m_000000_3: at
>>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>>> >>> > (TaskTracker.java:1445)
>>>> >>> > Exception in thread "main" java.io.IOException: Job failed!
>>>> >>> > at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:604)
>>>> >>> > at org.apache.nutch.indexer.DeleteDuplicates.dedup
>>>> >>> > (DeleteDuplicates.java:439)
>>>> >>> > at org.apache.nutch.crawl.Crawl.main(Crawl.java:135)
>>>> >>> >
>>>> >>> > how i solve it?
>>>
>>>
>>>
>>>
>>
>>
>
> --
> View this message in context:
> http://www.nabble.com/Nutch-crawl-problem-tp14327978p14492766.html
> Sent from the Hadoop Users mailing list archive at Nabble.com.
>
>
>




Re: Nutch crawl problem

Posted by jibjoice <su...@hotmail.com>.
i can't solve it now

jibjoice wrote:
> 
> i follow this link "http://wiki.apache.org/nutch/NutchHadoopTutorial" so i
> think it's not about the conf/crawl-urlfilter.txt file when i use this
> command "bin/nutch crawl urls -dir crawled -depth 3" again it shows :
> Generator: Selecting best-scoring urls due for fetch.
> Generator: starting
> Generator: segment: crawled/segments/25501221110712
> Generator: filtering: false
> Generator: topN: 2147483647
> Generator: Partitioning selected urls by host, for politeness.
> Generator: done.
> Fetcher: starting
> Fetcher: segment: crawled/segments/25501221110712
> Fetcher: done
> CrawlDb update: starting
> CrawlDb update: db: crawled/crawldb
> CrawlDb update: segments: [crawled/segments/25501221110712]
> CrawlDb update: additions allowed: true
> CrawlDb update: URL normalizing: true
> CrawlDb update: URL filtering: true
> CrawlDb update: Merging segment data into db.
> CrawlDb update: done
> Generator: Selecting best-scoring urls due for fetch.
> Generator: starting
> Generator: segment: crawled/segments/25501221110908
> Generator: filtering: false
> Generator: topN: 2147483647
> Generator: Partitioning selected urls by host, for politeness.
> Generator: done.
> Fetcher: starting
> Fetcher: segment: crawled/segments/25501221110908
> Fetcher: done
> CrawlDb update: starting
> CrawlDb update: db: crawled/crawldb
> CrawlDb update: segments: [crawled/segments/25501221110908]
> CrawlDb update: additions allowed: true
> CrawlDb update: URL normalizing: true
> CrawlDb update: URL filtering: true
> CrawlDb update: Merging segment data into db.
> CrawlDb update: done
> LinkDb: starting
> LinkDb: linkdb: crawled/linkdb
> LinkDb: URL normalize: true
> LinkDb: URL filter: true
> LinkDb: adding segment: /user/nutch/crawled/segments/25501221110519
> LinkDb: adding segment: /user/nutch/crawled/segments/25501221110712
> LinkDb: adding segment: /user/nutch/crawled/segments/25501221110908
> LinkDb: done
> Indexer: starting
> Indexer: linkdb: crawled/linkdb
> Indexer: adding segment: /user/nutch/crawled/segments/25501221110519
> Indexer: adding segment: /user/nutch/crawled/segments/25501221110712
> Indexer: adding segment: /user/nutch/crawled/segments/25501221110908
> Indexer: done
> Dedup: starting
> Dedup: adding indexes in: crawled/indexes
> task_0017_m_000000_0: log4j:ERROR Either File or DatePattern options are
> not set for appender [DRFA].
> task_0017_m_000001_0: log4j:ERROR setFile(null,true) call failed.
> task_0017_m_000001_0: java.io.FileNotFoundException: /nutch/search/logs
> (Is a directory)
> task_0017_m_000001_0:   at java.io.FileOutputStream.openAppend(Native
> Method)
> task_0017_m_000001_0:   at
> java.io.FileOutputStream.<init>(FileOutputStream.java:177)
> task_0017_m_000001_0:   at
> java.io.FileOutputStream.<init>(FileOutputStream.java:102)
> task_0017_m_000001_0:   at
> org.apache.log4j.FileAppender.setFile(FileAppender.java:289)
> task_0017_m_000001_0:   at
> org.apache.log4j.FileAppender.activateOptions(FileAppender.java:163)
> task_0017_m_000001_0:   at
> org.apache.log4j.DailyRollingFileAppender.activateOptions(DailyRollingFileAppender.java:215)
> task_0017_m_000001_0:   at
> org.apache.log4j.config.PropertySetter.activate(PropertySetter.java:256)
> task_0017_m_000001_0:   at
> org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:132)
> task_0017_m_000001_0:   at
> org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:96)
> task_0017_m_000001_0:   at
> org.apache.log4j.PropertyConfigurator.parseAppender(PropertyConfigurator.java:654)
> task_0017_m_000001_0:   at
> org.apache.log4j.PropertyConfigurator.parseCategory(PropertyConfigurator.java:612)
> task_0017_m_000001_0:   at
> org.apache.log4j.PropertyConfigurator.configureRootCategory(PropertyConfigurator.java:509)
> task_0017_m_000001_0:   at
> org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:415)
> task_0017_m_000001_0:   at
> org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:441)
> task_0017_m_000001_0:   at
> org.apache.log4j.helpers.OptionConverter.selectAndConfigure(OptionConverter.java:468)
> task_0017_m_000001_0:   at
> org.apache.log4j.LogManager.<clinit>(LogManager.java:122)
> task_0017_m_000001_0:   at
> org.apache.log4j.Logger.getLogger(Logger.java:104)
> task_0017_m_000001_0:   at
> org.apache.commons.logging.impl.Log4JLogger.getLogger(Log4JLogger.java:229)
> task_0017_m_000001_0:   at
> org.apache.commons.logging.impl.Log4JLogger.<init>(Log4JLogger.java:65)
> task_0017_m_000001_0:   at
> sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
> task_0017_m_000001_0:   at
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:39)
> task_0017_m_000001_0:   at
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:27)
> task_0017_m_000001_0:   at
> java.lang.reflect.Constructor.newInstance(Constructor.java:513)
> task_0017_m_000001_0:   at
> org.apache.commons.logging.impl.LogFactoryImpl.newInstance(LogFactoryImpl.java:529)
> task_0017_m_000001_0:   at
> org.apache.commons.logging.impl.LogFactoryImpl.getInstance(LogFactoryImpl.java:235)
> task_0017_m_000001_0:   at
> org.apache.commons.logging.LogFactory.getLog(LogFactory.java:370)
> task_0017_m_000001_0:   at
> org.apache.hadoop.mapred.TaskTracker.<clinit>(TaskTracker.java:82)
> task_0017_m_000001_0:   at
> org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:1423)
> task_0017_m_000001_0: log4j:ERROR Either File or DatePattern options are
> not set for appender [DRFA].
> task_0017_m_000000_1: log4j:ERROR setFile(null,true) call failed.
> task_0017_m_000000_1: java.io.FileNotFoundException: /nutch/search/logs
> (Is a directory)
> task_0017_m_000000_1:   at java.io.FileOutputStream.openAppend(Native
> Method)
> task_0017_m_000000_1:   at
> java.io.FileOutputStream.<init>(FileOutputStream.java:177)
> task_0017_m_000000_1:   at
> java.io.FileOutputStream.<init>(FileOutputStream.java:102)
> task_0017_m_000000_1:   at
> org.apache.log4j.FileAppender.setFile(FileAppender.java:289)
> task_0017_m_000000_1:   at
> org.apache.log4j.FileAppender.activateOptions(FileAppender.java:163)
> task_0017_m_000000_1:   at
> org.apache.log4j.DailyRollingFileAppender.activateOptions(DailyRollingFileAppender.java:215)
> task_0017_m_000000_1:   at
> org.apache.log4j.config.PropertySetter.activate(PropertySetter.java:256)
> task_0017_m_000000_1:   at
> org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:132)
> task_0017_m_000000_1:   at
> org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:96)
> task_0017_m_000000_1:   at
> org.apache.log4j.PropertyConfigurator.parseAppender(PropertyConfigurator.java:654)
> task_0017_m_000000_1:   at
> org.apache.log4j.PropertyConfigurator.parseCategory(PropertyConfigurator.java:612)
> task_0017_m_000000_1:   at
> org.apache.log4j.PropertyConfigurator.configureRootCategory(PropertyConfigurator.java:509)
> task_0017_m_000000_1:   at
> org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:415)
> task_0017_m_000000_1:   at
> org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:441)
> task_0017_m_000000_1:   at
> org.apache.log4j.helpers.OptionConverter.selectAndConfigure(OptionConverter.java:468)
> task_0017_m_000000_1:   at
> org.apache.log4j.LogManager.<clinit>(LogManager.java:122)
> task_0017_m_000000_1:   at
> org.apache.log4j.Logger.getLogger(Logger.java:104)
> task_0017_m_000000_1:   at
> org.apache.commons.logging.impl.Log4JLogger.getLogger(Log4JLogger.java:229)
> task_0017_m_000000_1:   at
> org.apache.commons.logging.impl.Log4JLogger.<init>(Log4JLogger.java:65)
> task_0017_m_000000_1:   at
> sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
> task_0017_m_000000_1:   at
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:39)
> task_0017_m_000000_1:   at
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:27)
> task_0017_m_000000_1:   at
> java.lang.reflect.Constructor.newInstance(Constructor.java:513)
> task_0017_m_000000_1:   at
> org.apache.commons.logging.impl.LogFactoryImpl.newInstance(LogFactoryImpl.java:529)
> task_0017_m_000000_1:   at
> org.apache.commons.logging.impl.LogFactoryImpl.getInstance(LogFactoryImpl.java:235)
> task_0017_m_000000_3:   at
> org.apache.commons.logging.LogFactory.getLog(LogFactory.java:370)
> task_0017_m_000000_3:   at
> org.apache.hadoop.mapred.TaskTracker.<clinit>(TaskTracker.java:82)
> task_0017_m_000000_3:   at
> org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:1423)
> task_0017_m_000000_3: log4j:ERROR Either File or DatePattern options are
> not set for appender [DRFA].
> Exception in thread "main" java.io.IOException: Job failed!
>         at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:604)
>         at
> org.apache.nutch.indexer.DeleteDuplicates.dedup(DeleteDuplicates.java:439)
>         at org.apache.nutch.crawl.Crawl.main(Crawl.java:135)
> 
> i don't know what happen?
> 
> 
> pvvpr wrote:
>> 
>> I think you need to check the conf/crawl-urlfilter.txt file
>> 
>> On Thursday 20 December 2007 04:55, jibjoice wrote:
>>> please, help me to solve it
>>>
>>> jibjoice wrote:
>>> > where i should solve this? why it generated 0 records?
>>> >
>>> > pvvpr wrote:
>>> >> basically your indexes are empty since no URLs were generated and
>>> >> fetched. See
>>> >> this,
>>> >>
>>> >>> > - Generator: 0 records selected for fetching, exiting ...
>>> >>> > - Stopping at depth=0 - no more URLs to fetch.
>>> >>> > - No URLs to fetch - check your seed list and URL filters.
>>> >>> > - crawl finished: crawled
>>> >>
>>> >> when no pages are indexed, dedup throws Exception
>>> >>
>>> >> On Tuesday 18 December 2007 21:33, jibjoice wrote:
>>> >>> i can't solve it now, pls help me
>>> >>>
>>> >>> jibjoice wrote:
>>> >>> > i use nutch-0.9, hadoop-0.12.2 and i use this command "bin/nutch
>>> >>> > crawl urls -dir crawled -depth 3" have error :
>>> >>> >
>>> >>> > - crawl started in: crawled
>>> >>> > - rootUrlDir = input
>>> >>> > - threads = 10
>>> >>> > - depth = 3
>>> >>> > - Injector: starting
>>> >>> > - Injector: crawlDb: crawled/crawldb
>>> >>> > - Injector: urlDir: input
>>> >>> > - Injector: Converting injected urls to crawl db entries.
>>> >>> > - Total input paths to process : 1
>>> >>> > - Running job: job_0001
>>> >>> > - map 0% reduce 0%
>>> >>> > - map 100% reduce 0%
>>> >>> > - map 100% reduce 100%
>>> >>> > - Job complete: job_0001
>>> >>> > - Counters: 6
>>> >>> > - Map-Reduce Framework
>>> >>> > - Map input records=3
>>> >>> > - Map output records=1
>>> >>> > - Map input bytes=22
>>> >>> > - Map output bytes=52
>>> >>> > - Reduce input records=1
>>> >>> > - Reduce output records=1
>>> >>> > - Injector: Merging injected urls into crawl db.
>>> >>> > - Total input paths to process : 2
>>> >>> > - Running job: job_0002
>>> >>> > - map 0% reduce 0%
>>> >>> > - map 100% reduce 0%
>>> >>> > - map 100% reduce 58%
>>> >>> > - map 100% reduce 100%
>>> >>> > - Job complete: job_0002
>>> >>> > - Counters: 6
>>> >>> > - Map-Reduce Framework
>>> >>> > - Map input records=3
>>> >>> > - Map output records=1
>>> >>> > - Map input bytes=60
>>> >>> > - Map output bytes=52
>>> >>> > - Reduce input records=1
>>> >>> > - Reduce output records=1
>>> >>> > - Injector: done
>>> >>> > - Generator: Selecting best-scoring urls due for fetch.
>>> >>> > - Generator: starting
>>> >>> > - Generator: segment: crawled/segments/25501213164325
>>> >>> > - Generator: filtering: false
>>> >>> > - Generator: topN: 2147483647
>>> >>> > - Total input paths to process : 2
>>> >>> > - Running job: job_0003
>>> >>> > - map 0% reduce 0%
>>> >>> > - map 100% reduce 0%
>>> >>> > - map 100% reduce 100%
>>> >>> > - Job complete: job_0003
>>> >>> > - Counters: 6
>>> >>> > - Map-Reduce Framework
>>> >>> > - Map input records=3
>>> >>> > - Map output records=1
>>> >>> > - Map input bytes=59
>>> >>> > - Map output bytes=77
>>> >>> > - Reduce input records=1
>>> >>> > - Reduce output records=1
>>> >>> > - Generator: 0 records selected for fetching, exiting ...
>>> >>> > - Stopping at depth=0 - no more URLs to fetch.
>>> >>> > - No URLs to fetch - check your seed list and URL filters.
>>> >>> > - crawl finished: crawled
>>> >>> >
>>> >>> > but sometime i crawl some url it has error indexes time that
>>> >>> >
>>> >>> > - Indexer: done
>>> >>> > - Dedup: starting
>>> >>> > - Dedup: adding indexes in: crawled/indexes
>>> >>> > - Total input paths to process : 2
>>> >>> > - Running job: job_0025
>>> >>> > - map 0% reduce 0%
>>> >>> > - Task Id : task_0025_m_000001_0, Status : FAILED
>>> >>> > task_0025_m_000001_0: - Error running child
>>> >>> > task_0025_m_000001_0: java.lang.ArrayIndexOutOfBoundsException: -1
>>> >>> > task_0025_m_000001_0: at
>>> >>> >
>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> >>> > task_0025_m_000001_0: at
>>> >>> >
>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> >>> > r.next(DeleteDuplicates.java:176)
>>> >>> > task_0025_m_000001_0: at
>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> >>> > task_0025_m_000001_0: at
>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> >>> > task_0025_m_000001_0: at org.apache.hadoop.mapred.MapTask.run
>>> >>> > (MapTask.java:175)
>>> >>> > task_0025_m_000001_0: at
>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> >>> > (TaskTracker.java:1445)
>>> >>> > - Task Id : task_0025_m_000000_0, Status : FAILED
>>> >>> > task_0025_m_000000_0: - Error running child
>>> >>> > task_0025_m_000000_0: java.lang.ArrayIndexOutOfBoundsException: -1
>>> >>> > task_0025_m_000000_0: at
>>> >>> >
>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> >>> > task_0025_m_000000_0: at
>>> >>> >
>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> >>> > r.next(DeleteDuplicates.java:176)
>>> >>> > task_0025_m_000000_0: at
>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> >>> > task_0025_m_000000_0: at
>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> >>> > task_0025_m_000000_0: at org.apache.hadoop.mapred.MapTask.run
>>> >>> > (MapTask.java:175)
>>> >>> > task_0025_m_000000_0: at
>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> >>> > (TaskTracker.java:1445)
>>> >>> > - Task Id : task_0025_m_000000_1, Status : FAILED
>>> >>> > task_0025_m_000000_1: - Error running child
>>> >>> > task_0025_m_000000_1: java.lang.ArrayIndexOutOfBoundsException: -1
>>> >>> > task_0025_m_000000_1: at
>>> >>> >
>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> >>> > task_0025_m_000000_1: at
>>> >>> >
>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> >>> > r.next(DeleteDuplicates.java:176)
>>> >>> > task_0025_m_000000_1: at
>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> >>> > task_0025_m_000000_1: at
>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> >>> > task_0025_m_000000_1: at org.apache.hadoop.mapred.MapTask.run
>>> >>> > (MapTask.java:175)
>>> >>> > task_0025_m_000000_1: at
>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> >>> > (TaskTracker.java:1445)
>>> >>> > - Task Id : task_0025_m_000001_1, Status : FAILED
>>> >>> > task_0025_m_000001_1: - Error running child
>>> >>> > task_0025_m_000001_1: java.lang.ArrayIndexOutOfBoundsException: -1
>>> >>> > task_0025_m_000001_1: at
>>> >>> >
>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> >>> > task_0025_m_000001_1: at
>>> >>> >
>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> >>> > r.next(DeleteDuplicates.java:176)
>>> >>> > task_0025_m_000001_1: at
>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> >>> > task_0025_m_000001_1: at
>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> >>> > task_0025_m_000001_1: at org.apache.hadoop.mapred.MapTask.run
>>> >>> > (MapTask.java:175)
>>> >>> > task_0025_m_000001_1: at
>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> >>> > (TaskTracker.java:1445)
>>> >>> > - Task Id : task_0025_m_000001_2, Status : FAILED
>>> >>> > task_0025_m_000001_2: - Error running child
>>> >>> > task_0025_m_000001_2: java.lang.ArrayIndexOutOfBoundsException: -1
>>> >>> > task_0025_m_000001_2: at
>>> >>> >
>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> >>> > task_0025_m_000001_2: at
>>> >>> >
>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> >>> > r.next(DeleteDuplicates.java:176)
>>> >>> > task_0025_m_000001_2: at
>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> >>> > task_0025_m_000001_2: at
>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> >>> > task_0025_m_000001_2: at org.apache.hadoop.mapred.MapTask.run
>>> >>> > (MapTask.java:175)
>>> >>> > task_0025_m_000001_2: at
>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> >>> > (TaskTracker.java:1445)
>>> >>> > - Task Id : task_0025_m_000000_2, Status : FAILED
>>> >>> > task_0025_m_000000_2: - Error running child
>>> >>> > task_0025_m_000000_2: java.lang.ArrayIndexOutOfBoundsException: -1
>>> >>> > task_0025_m_000000_2: at
>>> >>> >
>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> >>> > task_0025_m_000000_2: at
>>> >>> >
>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> >>> > r.next(DeleteDuplicates.java:176)
>>> >>> > task_0025_m_000000_2: at
>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> >>> > task_0025_m_000000_2: at
>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> >>> > task_0025_m_000000_2: at org.apache.hadoop.mapred.MapTask.run
>>> >>> > (MapTask.java:175)
>>> >>> > task_0025_m_000000_2: at
>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> >>> > (TaskTracker.java:1445)
>>> >>> > - map 100% reduce 100%
>>> >>> > - Task Id : task_0025_m_000001_3, Status : FAILED
>>> >>> > task_0025_m_000001_3: - Error running child
>>> >>> > task_0025_m_000001_3: java.lang.ArrayIndexOutOfBoundsException: -1
>>> >>> > task_0025_m_000001_3: at
>>> >>> >
>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> >>> > task_0025_m_000001_3: at
>>> >>> >
>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> >>> > r.next(DeleteDuplicates.java:176)
>>> >>> > task_0025_m_000001_3: at
>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> >>> > task_0025_m_000001_3: at
>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> >>> > task_0025_m_000001_3: at org.apache.hadoop.mapred.MapTask.run
>>> >>> > (MapTask.java:175)
>>> >>> > task_0025_m_000001_3: at
>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> >>> > (TaskTracker.java:1445)
>>> >>> > - Task Id : task_0025_m_000000_3, Status : FAILED
>>> >>> > task_0025_m_000000_3: - Error running child
>>> >>> > task_0025_m_000000_3: java.lang.ArrayIndexOutOfBoundsException: -1
>>> >>> > task_0025_m_000000_3: at
>>> >>> >
>>> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> >>> > task_0025_m_000000_3: at
>>> >>> >
>>> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> >>> > r.next(DeleteDuplicates.java:176)
>>> >>> > task_0025_m_000000_3: at
>>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> >>> > task_0025_m_000000_3: at
>>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> >>> > task_0025_m_000000_3: at org.apache.hadoop.mapred.MapTask.run
>>> >>> > (MapTask.java:175)
>>> >>> > task_0025_m_000000_3: at
>>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> >>> > (TaskTracker.java:1445)
>>> >>> > Exception in thread "main" java.io.IOException: Job failed!
>>> >>> > at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:604)
>>> >>> > at org.apache.nutch.indexer.DeleteDuplicates.dedup
>>> >>> > (DeleteDuplicates.java:439)
>>> >>> > at org.apache.nutch.crawl.Crawl.main(Crawl.java:135)
>>> >>> >
>>> >>> > how i solve it?
>> 
>> 
>> 
>> 
> 
> 

-- 
View this message in context: http://www.nabble.com/Nutch-crawl-problem-tp14327978p14492766.html
Sent from the Hadoop Users mailing list archive at Nabble.com.


Re: Nutch crawl problem

Posted by jibjoice <su...@hotmail.com>.
i follow this link "http://wiki.apache.org/nutch/NutchHadoopTutorial" so i
think it's not about the conf/crawl-urlfilter.txt file when i use this
command "bin/nutch crawl urls -dir crawled -depth 3" again it shows :
Generator: Selecting best-scoring urls due for fetch.
Generator: starting
Generator: segment: crawled/segments/25501221110712
Generator: filtering: false
Generator: topN: 2147483647
Generator: Partitioning selected urls by host, for politeness.
Generator: done.
Fetcher: starting
Fetcher: segment: crawled/segments/25501221110712
Fetcher: done
CrawlDb update: starting
CrawlDb update: db: crawled/crawldb
CrawlDb update: segments: [crawled/segments/25501221110712]
CrawlDb update: additions allowed: true
CrawlDb update: URL normalizing: true
CrawlDb update: URL filtering: true
CrawlDb update: Merging segment data into db.
CrawlDb update: done
Generator: Selecting best-scoring urls due for fetch.
Generator: starting
Generator: segment: crawled/segments/25501221110908
Generator: filtering: false
Generator: topN: 2147483647
Generator: Partitioning selected urls by host, for politeness.
Generator: done.
Fetcher: starting
Fetcher: segment: crawled/segments/25501221110908
Fetcher: done
CrawlDb update: starting
CrawlDb update: db: crawled/crawldb
CrawlDb update: segments: [crawled/segments/25501221110908]
CrawlDb update: additions allowed: true
CrawlDb update: URL normalizing: true
CrawlDb update: URL filtering: true
CrawlDb update: Merging segment data into db.
CrawlDb update: done
LinkDb: starting
LinkDb: linkdb: crawled/linkdb
LinkDb: URL normalize: true
LinkDb: URL filter: true
LinkDb: adding segment: /user/nutch/crawled/segments/25501221110519
LinkDb: adding segment: /user/nutch/crawled/segments/25501221110712
LinkDb: adding segment: /user/nutch/crawled/segments/25501221110908
LinkDb: done
Indexer: starting
Indexer: linkdb: crawled/linkdb
Indexer: adding segment: /user/nutch/crawled/segments/25501221110519
Indexer: adding segment: /user/nutch/crawled/segments/25501221110712
Indexer: adding segment: /user/nutch/crawled/segments/25501221110908
Indexer: done
Dedup: starting
Dedup: adding indexes in: crawled/indexes
task_0017_m_000000_0: log4j:ERROR Either File or DatePattern options are not
set for appender [DRFA].
task_0017_m_000001_0: log4j:ERROR setFile(null,true) call failed.
task_0017_m_000001_0: java.io.FileNotFoundException: /nutch/search/logs (Is
a directory)
task_0017_m_000001_0:   at java.io.FileOutputStream.openAppend(Native
Method)
task_0017_m_000001_0:   at
java.io.FileOutputStream.<init>(FileOutputStream.java:177)
task_0017_m_000001_0:   at
java.io.FileOutputStream.<init>(FileOutputStream.java:102)
task_0017_m_000001_0:   at
org.apache.log4j.FileAppender.setFile(FileAppender.java:289)
task_0017_m_000001_0:   at
org.apache.log4j.FileAppender.activateOptions(FileAppender.java:163)
task_0017_m_000001_0:   at
org.apache.log4j.DailyRollingFileAppender.activateOptions(DailyRollingFileAppender.java:215)
task_0017_m_000001_0:   at
org.apache.log4j.config.PropertySetter.activate(PropertySetter.java:256)
task_0017_m_000001_0:   at
org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:132)
task_0017_m_000001_0:   at
org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:96)
task_0017_m_000001_0:   at
org.apache.log4j.PropertyConfigurator.parseAppender(PropertyConfigurator.java:654)
task_0017_m_000001_0:   at
org.apache.log4j.PropertyConfigurator.parseCategory(PropertyConfigurator.java:612)
task_0017_m_000001_0:   at
org.apache.log4j.PropertyConfigurator.configureRootCategory(PropertyConfigurator.java:509)
task_0017_m_000001_0:   at
org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:415)
task_0017_m_000001_0:   at
org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:441)
task_0017_m_000001_0:   at
org.apache.log4j.helpers.OptionConverter.selectAndConfigure(OptionConverter.java:468)
task_0017_m_000001_0:   at
org.apache.log4j.LogManager.<clinit>(LogManager.java:122)
task_0017_m_000001_0:   at
org.apache.log4j.Logger.getLogger(Logger.java:104)
task_0017_m_000001_0:   at
org.apache.commons.logging.impl.Log4JLogger.getLogger(Log4JLogger.java:229)
task_0017_m_000001_0:   at
org.apache.commons.logging.impl.Log4JLogger.<init>(Log4JLogger.java:65)
task_0017_m_000001_0:   at
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
task_0017_m_000001_0:   at
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:39)
task_0017_m_000001_0:   at
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:27)
task_0017_m_000001_0:   at
java.lang.reflect.Constructor.newInstance(Constructor.java:513)
task_0017_m_000001_0:   at
org.apache.commons.logging.impl.LogFactoryImpl.newInstance(LogFactoryImpl.java:529)
task_0017_m_000001_0:   at
org.apache.commons.logging.impl.LogFactoryImpl.getInstance(LogFactoryImpl.java:235)
task_0017_m_000001_0:   at
org.apache.commons.logging.LogFactory.getLog(LogFactory.java:370)
task_0017_m_000001_0:   at
org.apache.hadoop.mapred.TaskTracker.<clinit>(TaskTracker.java:82)
task_0017_m_000001_0:   at
org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:1423)
task_0017_m_000001_0: log4j:ERROR Either File or DatePattern options are not
set for appender [DRFA].
task_0017_m_000000_1: log4j:ERROR setFile(null,true) call failed.
task_0017_m_000000_1: java.io.FileNotFoundException: /nutch/search/logs (Is
a directory)
task_0017_m_000000_1:   at java.io.FileOutputStream.openAppend(Native
Method)
task_0017_m_000000_1:   at
java.io.FileOutputStream.<init>(FileOutputStream.java:177)
task_0017_m_000000_1:   at
java.io.FileOutputStream.<init>(FileOutputStream.java:102)
task_0017_m_000000_1:   at
org.apache.log4j.FileAppender.setFile(FileAppender.java:289)
task_0017_m_000000_1:   at
org.apache.log4j.FileAppender.activateOptions(FileAppender.java:163)
task_0017_m_000000_1:   at
org.apache.log4j.DailyRollingFileAppender.activateOptions(DailyRollingFileAppender.java:215)
task_0017_m_000000_1:   at
org.apache.log4j.config.PropertySetter.activate(PropertySetter.java:256)
task_0017_m_000000_1:   at
org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:132)
task_0017_m_000000_1:   at
org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:96)
task_0017_m_000000_1:   at
org.apache.log4j.PropertyConfigurator.parseAppender(PropertyConfigurator.java:654)
task_0017_m_000000_1:   at
org.apache.log4j.PropertyConfigurator.parseCategory(PropertyConfigurator.java:612)
task_0017_m_000000_1:   at
org.apache.log4j.PropertyConfigurator.configureRootCategory(PropertyConfigurator.java:509)
task_0017_m_000000_1:   at
org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:415)
task_0017_m_000000_1:   at
org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:441)
task_0017_m_000000_1:   at
org.apache.log4j.helpers.OptionConverter.selectAndConfigure(OptionConverter.java:468)
task_0017_m_000000_1:   at
org.apache.log4j.LogManager.<clinit>(LogManager.java:122)
task_0017_m_000000_1:   at
org.apache.log4j.Logger.getLogger(Logger.java:104)
task_0017_m_000000_1:   at
org.apache.commons.logging.impl.Log4JLogger.getLogger(Log4JLogger.java:229)
task_0017_m_000000_1:   at
org.apache.commons.logging.impl.Log4JLogger.<init>(Log4JLogger.java:65)
task_0017_m_000000_1:   at
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
task_0017_m_000000_1:   at
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:39)
task_0017_m_000000_1:   at
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:27)
task_0017_m_000000_1:   at
java.lang.reflect.Constructor.newInstance(Constructor.java:513)
task_0017_m_000000_1:   at
org.apache.commons.logging.impl.LogFactoryImpl.newInstance(LogFactoryImpl.java:529)
task_0017_m_000000_1:   at
org.apache.commons.logging.impl.LogFactoryImpl.getInstance(LogFactoryImpl.java:235)
task_0017_m_000000_3:   at
org.apache.commons.logging.LogFactory.getLog(LogFactory.java:370)
task_0017_m_000000_3:   at
org.apache.hadoop.mapred.TaskTracker.<clinit>(TaskTracker.java:82)
task_0017_m_000000_3:   at
org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:1423)
task_0017_m_000000_3: log4j:ERROR Either File or DatePattern options are not
set for appender [DRFA].
Exception in thread "main" java.io.IOException: Job failed!
        at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:604)
        at
org.apache.nutch.indexer.DeleteDuplicates.dedup(DeleteDuplicates.java:439)
        at org.apache.nutch.crawl.Crawl.main(Crawl.java:135)

i don't know what happen?


pvvpr wrote:
> 
> I think you need to check the conf/crawl-urlfilter.txt file
> 
> On Thursday 20 December 2007 04:55, jibjoice wrote:
>> please, help me to solve it
>>
>> jibjoice wrote:
>> > where i should solve this? why it generated 0 records?
>> >
>> > pvvpr wrote:
>> >> basically your indexes are empty since no URLs were generated and
>> >> fetched. See
>> >> this,
>> >>
>> >>> > - Generator: 0 records selected for fetching, exiting ...
>> >>> > - Stopping at depth=0 - no more URLs to fetch.
>> >>> > - No URLs to fetch - check your seed list and URL filters.
>> >>> > - crawl finished: crawled
>> >>
>> >> when no pages are indexed, dedup throws Exception
>> >>
>> >> On Tuesday 18 December 2007 21:33, jibjoice wrote:
>> >>> i can't solve it now, pls help me
>> >>>
>> >>> jibjoice wrote:
>> >>> > i use nutch-0.9, hadoop-0.12.2 and i use this command "bin/nutch
>> >>> > crawl urls -dir crawled -depth 3" have error :
>> >>> >
>> >>> > - crawl started in: crawled
>> >>> > - rootUrlDir = input
>> >>> > - threads = 10
>> >>> > - depth = 3
>> >>> > - Injector: starting
>> >>> > - Injector: crawlDb: crawled/crawldb
>> >>> > - Injector: urlDir: input
>> >>> > - Injector: Converting injected urls to crawl db entries.
>> >>> > - Total input paths to process : 1
>> >>> > - Running job: job_0001
>> >>> > - map 0% reduce 0%
>> >>> > - map 100% reduce 0%
>> >>> > - map 100% reduce 100%
>> >>> > - Job complete: job_0001
>> >>> > - Counters: 6
>> >>> > - Map-Reduce Framework
>> >>> > - Map input records=3
>> >>> > - Map output records=1
>> >>> > - Map input bytes=22
>> >>> > - Map output bytes=52
>> >>> > - Reduce input records=1
>> >>> > - Reduce output records=1
>> >>> > - Injector: Merging injected urls into crawl db.
>> >>> > - Total input paths to process : 2
>> >>> > - Running job: job_0002
>> >>> > - map 0% reduce 0%
>> >>> > - map 100% reduce 0%
>> >>> > - map 100% reduce 58%
>> >>> > - map 100% reduce 100%
>> >>> > - Job complete: job_0002
>> >>> > - Counters: 6
>> >>> > - Map-Reduce Framework
>> >>> > - Map input records=3
>> >>> > - Map output records=1
>> >>> > - Map input bytes=60
>> >>> > - Map output bytes=52
>> >>> > - Reduce input records=1
>> >>> > - Reduce output records=1
>> >>> > - Injector: done
>> >>> > - Generator: Selecting best-scoring urls due for fetch.
>> >>> > - Generator: starting
>> >>> > - Generator: segment: crawled/segments/25501213164325
>> >>> > - Generator: filtering: false
>> >>> > - Generator: topN: 2147483647
>> >>> > - Total input paths to process : 2
>> >>> > - Running job: job_0003
>> >>> > - map 0% reduce 0%
>> >>> > - map 100% reduce 0%
>> >>> > - map 100% reduce 100%
>> >>> > - Job complete: job_0003
>> >>> > - Counters: 6
>> >>> > - Map-Reduce Framework
>> >>> > - Map input records=3
>> >>> > - Map output records=1
>> >>> > - Map input bytes=59
>> >>> > - Map output bytes=77
>> >>> > - Reduce input records=1
>> >>> > - Reduce output records=1
>> >>> > - Generator: 0 records selected for fetching, exiting ...
>> >>> > - Stopping at depth=0 - no more URLs to fetch.
>> >>> > - No URLs to fetch - check your seed list and URL filters.
>> >>> > - crawl finished: crawled
>> >>> >
>> >>> > but sometime i crawl some url it has error indexes time that
>> >>> >
>> >>> > - Indexer: done
>> >>> > - Dedup: starting
>> >>> > - Dedup: adding indexes in: crawled/indexes
>> >>> > - Total input paths to process : 2
>> >>> > - Running job: job_0025
>> >>> > - map 0% reduce 0%
>> >>> > - Task Id : task_0025_m_000001_0, Status : FAILED
>> >>> > task_0025_m_000001_0: - Error running child
>> >>> > task_0025_m_000001_0: java.lang.ArrayIndexOutOfBoundsException: -1
>> >>> > task_0025_m_000001_0: at
>> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> >>> > task_0025_m_000001_0: at
>> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> >>> > r.next(DeleteDuplicates.java:176)
>> >>> > task_0025_m_000001_0: at
>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> >>> > task_0025_m_000001_0: at
>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> >>> > task_0025_m_000001_0: at org.apache.hadoop.mapred.MapTask.run
>> >>> > (MapTask.java:175)
>> >>> > task_0025_m_000001_0: at
>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> >>> > (TaskTracker.java:1445)
>> >>> > - Task Id : task_0025_m_000000_0, Status : FAILED
>> >>> > task_0025_m_000000_0: - Error running child
>> >>> > task_0025_m_000000_0: java.lang.ArrayIndexOutOfBoundsException: -1
>> >>> > task_0025_m_000000_0: at
>> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> >>> > task_0025_m_000000_0: at
>> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> >>> > r.next(DeleteDuplicates.java:176)
>> >>> > task_0025_m_000000_0: at
>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> >>> > task_0025_m_000000_0: at
>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> >>> > task_0025_m_000000_0: at org.apache.hadoop.mapred.MapTask.run
>> >>> > (MapTask.java:175)
>> >>> > task_0025_m_000000_0: at
>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> >>> > (TaskTracker.java:1445)
>> >>> > - Task Id : task_0025_m_000000_1, Status : FAILED
>> >>> > task_0025_m_000000_1: - Error running child
>> >>> > task_0025_m_000000_1: java.lang.ArrayIndexOutOfBoundsException: -1
>> >>> > task_0025_m_000000_1: at
>> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> >>> > task_0025_m_000000_1: at
>> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> >>> > r.next(DeleteDuplicates.java:176)
>> >>> > task_0025_m_000000_1: at
>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> >>> > task_0025_m_000000_1: at
>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> >>> > task_0025_m_000000_1: at org.apache.hadoop.mapred.MapTask.run
>> >>> > (MapTask.java:175)
>> >>> > task_0025_m_000000_1: at
>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> >>> > (TaskTracker.java:1445)
>> >>> > - Task Id : task_0025_m_000001_1, Status : FAILED
>> >>> > task_0025_m_000001_1: - Error running child
>> >>> > task_0025_m_000001_1: java.lang.ArrayIndexOutOfBoundsException: -1
>> >>> > task_0025_m_000001_1: at
>> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> >>> > task_0025_m_000001_1: at
>> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> >>> > r.next(DeleteDuplicates.java:176)
>> >>> > task_0025_m_000001_1: at
>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> >>> > task_0025_m_000001_1: at
>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> >>> > task_0025_m_000001_1: at org.apache.hadoop.mapred.MapTask.run
>> >>> > (MapTask.java:175)
>> >>> > task_0025_m_000001_1: at
>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> >>> > (TaskTracker.java:1445)
>> >>> > - Task Id : task_0025_m_000001_2, Status : FAILED
>> >>> > task_0025_m_000001_2: - Error running child
>> >>> > task_0025_m_000001_2: java.lang.ArrayIndexOutOfBoundsException: -1
>> >>> > task_0025_m_000001_2: at
>> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> >>> > task_0025_m_000001_2: at
>> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> >>> > r.next(DeleteDuplicates.java:176)
>> >>> > task_0025_m_000001_2: at
>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> >>> > task_0025_m_000001_2: at
>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> >>> > task_0025_m_000001_2: at org.apache.hadoop.mapred.MapTask.run
>> >>> > (MapTask.java:175)
>> >>> > task_0025_m_000001_2: at
>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> >>> > (TaskTracker.java:1445)
>> >>> > - Task Id : task_0025_m_000000_2, Status : FAILED
>> >>> > task_0025_m_000000_2: - Error running child
>> >>> > task_0025_m_000000_2: java.lang.ArrayIndexOutOfBoundsException: -1
>> >>> > task_0025_m_000000_2: at
>> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> >>> > task_0025_m_000000_2: at
>> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> >>> > r.next(DeleteDuplicates.java:176)
>> >>> > task_0025_m_000000_2: at
>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> >>> > task_0025_m_000000_2: at
>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> >>> > task_0025_m_000000_2: at org.apache.hadoop.mapred.MapTask.run
>> >>> > (MapTask.java:175)
>> >>> > task_0025_m_000000_2: at
>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> >>> > (TaskTracker.java:1445)
>> >>> > - map 100% reduce 100%
>> >>> > - Task Id : task_0025_m_000001_3, Status : FAILED
>> >>> > task_0025_m_000001_3: - Error running child
>> >>> > task_0025_m_000001_3: java.lang.ArrayIndexOutOfBoundsException: -1
>> >>> > task_0025_m_000001_3: at
>> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> >>> > task_0025_m_000001_3: at
>> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> >>> > r.next(DeleteDuplicates.java:176)
>> >>> > task_0025_m_000001_3: at
>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> >>> > task_0025_m_000001_3: at
>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> >>> > task_0025_m_000001_3: at org.apache.hadoop.mapred.MapTask.run
>> >>> > (MapTask.java:175)
>> >>> > task_0025_m_000001_3: at
>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> >>> > (TaskTracker.java:1445)
>> >>> > - Task Id : task_0025_m_000000_3, Status : FAILED
>> >>> > task_0025_m_000000_3: - Error running child
>> >>> > task_0025_m_000000_3: java.lang.ArrayIndexOutOfBoundsException: -1
>> >>> > task_0025_m_000000_3: at
>> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> >>> > task_0025_m_000000_3: at
>> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> >>> > r.next(DeleteDuplicates.java:176)
>> >>> > task_0025_m_000000_3: at
>> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> >>> > task_0025_m_000000_3: at
>> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> >>> > task_0025_m_000000_3: at org.apache.hadoop.mapred.MapTask.run
>> >>> > (MapTask.java:175)
>> >>> > task_0025_m_000000_3: at
>> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> >>> > (TaskTracker.java:1445)
>> >>> > Exception in thread "main" java.io.IOException: Job failed!
>> >>> > at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:604)
>> >>> > at org.apache.nutch.indexer.DeleteDuplicates.dedup
>> >>> > (DeleteDuplicates.java:439)
>> >>> > at org.apache.nutch.crawl.Crawl.main(Crawl.java:135)
>> >>> >
>> >>> > how i solve it?
> 
> 
> 
> 

-- 
View this message in context: http://www.nabble.com/Nutch-crawl-problem-tp14327978p14450181.html
Sent from the Hadoop Users mailing list archive at Nabble.com.


Re: Nutch crawl problem

Posted by pv...@research.iiit.ac.in.
I think you need to check the conf/crawl-urlfilter.txt file

On Thursday 20 December 2007 04:55, jibjoice wrote:
> please, help me to solve it
>
> jibjoice wrote:
> > where i should solve this? why it generated 0 records?
> >
> > pvvpr wrote:
> >> basically your indexes are empty since no URLs were generated and
> >> fetched. See
> >> this,
> >>
> >>> > - Generator: 0 records selected for fetching, exiting ...
> >>> > - Stopping at depth=0 - no more URLs to fetch.
> >>> > - No URLs to fetch - check your seed list and URL filters.
> >>> > - crawl finished: crawled
> >>
> >> when no pages are indexed, dedup throws Exception
> >>
> >> On Tuesday 18 December 2007 21:33, jibjoice wrote:
> >>> i can't solve it now, pls help me
> >>>
> >>> jibjoice wrote:
> >>> > i use nutch-0.9, hadoop-0.12.2 and i use this command "bin/nutch
> >>> > crawl urls -dir crawled -depth 3" have error :
> >>> >
> >>> > - crawl started in: crawled
> >>> > - rootUrlDir = input
> >>> > - threads = 10
> >>> > - depth = 3
> >>> > - Injector: starting
> >>> > - Injector: crawlDb: crawled/crawldb
> >>> > - Injector: urlDir: input
> >>> > - Injector: Converting injected urls to crawl db entries.
> >>> > - Total input paths to process : 1
> >>> > - Running job: job_0001
> >>> > - map 0% reduce 0%
> >>> > - map 100% reduce 0%
> >>> > - map 100% reduce 100%
> >>> > - Job complete: job_0001
> >>> > - Counters: 6
> >>> > - Map-Reduce Framework
> >>> > - Map input records=3
> >>> > - Map output records=1
> >>> > - Map input bytes=22
> >>> > - Map output bytes=52
> >>> > - Reduce input records=1
> >>> > - Reduce output records=1
> >>> > - Injector: Merging injected urls into crawl db.
> >>> > - Total input paths to process : 2
> >>> > - Running job: job_0002
> >>> > - map 0% reduce 0%
> >>> > - map 100% reduce 0%
> >>> > - map 100% reduce 58%
> >>> > - map 100% reduce 100%
> >>> > - Job complete: job_0002
> >>> > - Counters: 6
> >>> > - Map-Reduce Framework
> >>> > - Map input records=3
> >>> > - Map output records=1
> >>> > - Map input bytes=60
> >>> > - Map output bytes=52
> >>> > - Reduce input records=1
> >>> > - Reduce output records=1
> >>> > - Injector: done
> >>> > - Generator: Selecting best-scoring urls due for fetch.
> >>> > - Generator: starting
> >>> > - Generator: segment: crawled/segments/25501213164325
> >>> > - Generator: filtering: false
> >>> > - Generator: topN: 2147483647
> >>> > - Total input paths to process : 2
> >>> > - Running job: job_0003
> >>> > - map 0% reduce 0%
> >>> > - map 100% reduce 0%
> >>> > - map 100% reduce 100%
> >>> > - Job complete: job_0003
> >>> > - Counters: 6
> >>> > - Map-Reduce Framework
> >>> > - Map input records=3
> >>> > - Map output records=1
> >>> > - Map input bytes=59
> >>> > - Map output bytes=77
> >>> > - Reduce input records=1
> >>> > - Reduce output records=1
> >>> > - Generator: 0 records selected for fetching, exiting ...
> >>> > - Stopping at depth=0 - no more URLs to fetch.
> >>> > - No URLs to fetch - check your seed list and URL filters.
> >>> > - crawl finished: crawled
> >>> >
> >>> > but sometime i crawl some url it has error indexes time that
> >>> >
> >>> > - Indexer: done
> >>> > - Dedup: starting
> >>> > - Dedup: adding indexes in: crawled/indexes
> >>> > - Total input paths to process : 2
> >>> > - Running job: job_0025
> >>> > - map 0% reduce 0%
> >>> > - Task Id : task_0025_m_000001_0, Status : FAILED
> >>> > task_0025_m_000001_0: - Error running child
> >>> > task_0025_m_000001_0: java.lang.ArrayIndexOutOfBoundsException: -1
> >>> > task_0025_m_000001_0: at
> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> >>> > task_0025_m_000001_0: at
> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> >>> > r.next(DeleteDuplicates.java:176)
> >>> > task_0025_m_000001_0: at
> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> >>> > task_0025_m_000001_0: at
> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> >>> > task_0025_m_000001_0: at org.apache.hadoop.mapred.MapTask.run
> >>> > (MapTask.java:175)
> >>> > task_0025_m_000001_0: at
> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
> >>> > (TaskTracker.java:1445)
> >>> > - Task Id : task_0025_m_000000_0, Status : FAILED
> >>> > task_0025_m_000000_0: - Error running child
> >>> > task_0025_m_000000_0: java.lang.ArrayIndexOutOfBoundsException: -1
> >>> > task_0025_m_000000_0: at
> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> >>> > task_0025_m_000000_0: at
> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> >>> > r.next(DeleteDuplicates.java:176)
> >>> > task_0025_m_000000_0: at
> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> >>> > task_0025_m_000000_0: at
> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> >>> > task_0025_m_000000_0: at org.apache.hadoop.mapred.MapTask.run
> >>> > (MapTask.java:175)
> >>> > task_0025_m_000000_0: at
> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
> >>> > (TaskTracker.java:1445)
> >>> > - Task Id : task_0025_m_000000_1, Status : FAILED
> >>> > task_0025_m_000000_1: - Error running child
> >>> > task_0025_m_000000_1: java.lang.ArrayIndexOutOfBoundsException: -1
> >>> > task_0025_m_000000_1: at
> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> >>> > task_0025_m_000000_1: at
> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> >>> > r.next(DeleteDuplicates.java:176)
> >>> > task_0025_m_000000_1: at
> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> >>> > task_0025_m_000000_1: at
> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> >>> > task_0025_m_000000_1: at org.apache.hadoop.mapred.MapTask.run
> >>> > (MapTask.java:175)
> >>> > task_0025_m_000000_1: at
> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
> >>> > (TaskTracker.java:1445)
> >>> > - Task Id : task_0025_m_000001_1, Status : FAILED
> >>> > task_0025_m_000001_1: - Error running child
> >>> > task_0025_m_000001_1: java.lang.ArrayIndexOutOfBoundsException: -1
> >>> > task_0025_m_000001_1: at
> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> >>> > task_0025_m_000001_1: at
> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> >>> > r.next(DeleteDuplicates.java:176)
> >>> > task_0025_m_000001_1: at
> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> >>> > task_0025_m_000001_1: at
> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> >>> > task_0025_m_000001_1: at org.apache.hadoop.mapred.MapTask.run
> >>> > (MapTask.java:175)
> >>> > task_0025_m_000001_1: at
> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
> >>> > (TaskTracker.java:1445)
> >>> > - Task Id : task_0025_m_000001_2, Status : FAILED
> >>> > task_0025_m_000001_2: - Error running child
> >>> > task_0025_m_000001_2: java.lang.ArrayIndexOutOfBoundsException: -1
> >>> > task_0025_m_000001_2: at
> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> >>> > task_0025_m_000001_2: at
> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> >>> > r.next(DeleteDuplicates.java:176)
> >>> > task_0025_m_000001_2: at
> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> >>> > task_0025_m_000001_2: at
> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> >>> > task_0025_m_000001_2: at org.apache.hadoop.mapred.MapTask.run
> >>> > (MapTask.java:175)
> >>> > task_0025_m_000001_2: at
> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
> >>> > (TaskTracker.java:1445)
> >>> > - Task Id : task_0025_m_000000_2, Status : FAILED
> >>> > task_0025_m_000000_2: - Error running child
> >>> > task_0025_m_000000_2: java.lang.ArrayIndexOutOfBoundsException: -1
> >>> > task_0025_m_000000_2: at
> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> >>> > task_0025_m_000000_2: at
> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> >>> > r.next(DeleteDuplicates.java:176)
> >>> > task_0025_m_000000_2: at
> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> >>> > task_0025_m_000000_2: at
> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> >>> > task_0025_m_000000_2: at org.apache.hadoop.mapred.MapTask.run
> >>> > (MapTask.java:175)
> >>> > task_0025_m_000000_2: at
> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
> >>> > (TaskTracker.java:1445)
> >>> > - map 100% reduce 100%
> >>> > - Task Id : task_0025_m_000001_3, Status : FAILED
> >>> > task_0025_m_000001_3: - Error running child
> >>> > task_0025_m_000001_3: java.lang.ArrayIndexOutOfBoundsException: -1
> >>> > task_0025_m_000001_3: at
> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> >>> > task_0025_m_000001_3: at
> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> >>> > r.next(DeleteDuplicates.java:176)
> >>> > task_0025_m_000001_3: at
> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> >>> > task_0025_m_000001_3: at
> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> >>> > task_0025_m_000001_3: at org.apache.hadoop.mapred.MapTask.run
> >>> > (MapTask.java:175)
> >>> > task_0025_m_000001_3: at
> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
> >>> > (TaskTracker.java:1445)
> >>> > - Task Id : task_0025_m_000000_3, Status : FAILED
> >>> > task_0025_m_000000_3: - Error running child
> >>> > task_0025_m_000000_3: java.lang.ArrayIndexOutOfBoundsException: -1
> >>> > task_0025_m_000000_3: at
> >>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> >>> > task_0025_m_000000_3: at
> >>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> >>> > r.next(DeleteDuplicates.java:176)
> >>> > task_0025_m_000000_3: at
> >>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> >>> > task_0025_m_000000_3: at
> >>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> >>> > task_0025_m_000000_3: at org.apache.hadoop.mapred.MapTask.run
> >>> > (MapTask.java:175)
> >>> > task_0025_m_000000_3: at
> >>> > org.apache.hadoop.mapred.TaskTracker$Child.main
> >>> > (TaskTracker.java:1445)
> >>> > Exception in thread "main" java.io.IOException: Job failed!
> >>> > at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:604)
> >>> > at org.apache.nutch.indexer.DeleteDuplicates.dedup
> >>> > (DeleteDuplicates.java:439)
> >>> > at org.apache.nutch.crawl.Crawl.main(Crawl.java:135)
> >>> >
> >>> > how i solve it?



Re: Nutch crawl problem

Posted by jibjoice <su...@hotmail.com>.
please, help me to solve it

jibjoice wrote:
> 
> where i should solve this? why it generated 0 records?
>  
> 
> pvvpr wrote:
>> 
>> basically your indexes are empty since no URLs were generated and
>> fetched. See 
>> this,
>> 
>>> > - Generator: 0 records selected for fetching, exiting ...
>>> > - Stopping at depth=0 - no more URLs to fetch.
>>> > - No URLs to fetch - check your seed list and URL filters.
>>> > - crawl finished: crawled
>> 
>> 
>> when no pages are indexed, dedup throws Exception
>> 
>> 
>> On Tuesday 18 December 2007 21:33, jibjoice wrote:
>>> i can't solve it now, pls help me
>>>
>>> jibjoice wrote:
>>> > i use nutch-0.9, hadoop-0.12.2 and i use this command "bin/nutch crawl
>>> > urls -dir crawled -depth 3" have error :
>>> >
>>> > - crawl started in: crawled
>>> > - rootUrlDir = input
>>> > - threads = 10
>>> > - depth = 3
>>> > - Injector: starting
>>> > - Injector: crawlDb: crawled/crawldb
>>> > - Injector: urlDir: input
>>> > - Injector: Converting injected urls to crawl db entries.
>>> > - Total input paths to process : 1
>>> > - Running job: job_0001
>>> > - map 0% reduce 0%
>>> > - map 100% reduce 0%
>>> > - map 100% reduce 100%
>>> > - Job complete: job_0001
>>> > - Counters: 6
>>> > - Map-Reduce Framework
>>> > - Map input records=3
>>> > - Map output records=1
>>> > - Map input bytes=22
>>> > - Map output bytes=52
>>> > - Reduce input records=1
>>> > - Reduce output records=1
>>> > - Injector: Merging injected urls into crawl db.
>>> > - Total input paths to process : 2
>>> > - Running job: job_0002
>>> > - map 0% reduce 0%
>>> > - map 100% reduce 0%
>>> > - map 100% reduce 58%
>>> > - map 100% reduce 100%
>>> > - Job complete: job_0002
>>> > - Counters: 6
>>> > - Map-Reduce Framework
>>> > - Map input records=3
>>> > - Map output records=1
>>> > - Map input bytes=60
>>> > - Map output bytes=52
>>> > - Reduce input records=1
>>> > - Reduce output records=1
>>> > - Injector: done
>>> > - Generator: Selecting best-scoring urls due for fetch.
>>> > - Generator: starting
>>> > - Generator: segment: crawled/segments/25501213164325
>>> > - Generator: filtering: false
>>> > - Generator: topN: 2147483647
>>> > - Total input paths to process : 2
>>> > - Running job: job_0003
>>> > - map 0% reduce 0%
>>> > - map 100% reduce 0%
>>> > - map 100% reduce 100%
>>> > - Job complete: job_0003
>>> > - Counters: 6
>>> > - Map-Reduce Framework
>>> > - Map input records=3
>>> > - Map output records=1
>>> > - Map input bytes=59
>>> > - Map output bytes=77
>>> > - Reduce input records=1
>>> > - Reduce output records=1
>>> > - Generator: 0 records selected for fetching, exiting ...
>>> > - Stopping at depth=0 - no more URLs to fetch.
>>> > - No URLs to fetch - check your seed list and URL filters.
>>> > - crawl finished: crawled
>>> >
>>> > but sometime i crawl some url it has error indexes time that
>>> >
>>> > - Indexer: done
>>> > - Dedup: starting
>>> > - Dedup: adding indexes in: crawled/indexes
>>> > - Total input paths to process : 2
>>> > - Running job: job_0025
>>> > - map 0% reduce 0%
>>> > - Task Id : task_0025_m_000001_0, Status : FAILED
>>> > task_0025_m_000001_0: - Error running child
>>> > task_0025_m_000001_0: java.lang.ArrayIndexOutOfBoundsException: -1
>>> > task_0025_m_000001_0: at
>>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> > task_0025_m_000001_0: at
>>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> > r.next(DeleteDuplicates.java:176)
>>> > task_0025_m_000001_0: at
>>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> > task_0025_m_000001_0: at
>>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> > task_0025_m_000001_0: at org.apache.hadoop.mapred.MapTask.run
>>> > (MapTask.java:175)
>>> > task_0025_m_000001_0: at
>>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> > (TaskTracker.java:1445)
>>> > - Task Id : task_0025_m_000000_0, Status : FAILED
>>> > task_0025_m_000000_0: - Error running child
>>> > task_0025_m_000000_0: java.lang.ArrayIndexOutOfBoundsException: -1
>>> > task_0025_m_000000_0: at
>>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> > task_0025_m_000000_0: at
>>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> > r.next(DeleteDuplicates.java:176)
>>> > task_0025_m_000000_0: at
>>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> > task_0025_m_000000_0: at
>>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> > task_0025_m_000000_0: at org.apache.hadoop.mapred.MapTask.run
>>> > (MapTask.java:175)
>>> > task_0025_m_000000_0: at
>>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> > (TaskTracker.java:1445)
>>> > - Task Id : task_0025_m_000000_1, Status : FAILED
>>> > task_0025_m_000000_1: - Error running child
>>> > task_0025_m_000000_1: java.lang.ArrayIndexOutOfBoundsException: -1
>>> > task_0025_m_000000_1: at
>>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> > task_0025_m_000000_1: at
>>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> > r.next(DeleteDuplicates.java:176)
>>> > task_0025_m_000000_1: at
>>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> > task_0025_m_000000_1: at
>>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> > task_0025_m_000000_1: at org.apache.hadoop.mapred.MapTask.run
>>> > (MapTask.java:175)
>>> > task_0025_m_000000_1: at
>>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> > (TaskTracker.java:1445)
>>> > - Task Id : task_0025_m_000001_1, Status : FAILED
>>> > task_0025_m_000001_1: - Error running child
>>> > task_0025_m_000001_1: java.lang.ArrayIndexOutOfBoundsException: -1
>>> > task_0025_m_000001_1: at
>>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> > task_0025_m_000001_1: at
>>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> > r.next(DeleteDuplicates.java:176)
>>> > task_0025_m_000001_1: at
>>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> > task_0025_m_000001_1: at
>>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> > task_0025_m_000001_1: at org.apache.hadoop.mapred.MapTask.run
>>> > (MapTask.java:175)
>>> > task_0025_m_000001_1: at
>>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> > (TaskTracker.java:1445)
>>> > - Task Id : task_0025_m_000001_2, Status : FAILED
>>> > task_0025_m_000001_2: - Error running child
>>> > task_0025_m_000001_2: java.lang.ArrayIndexOutOfBoundsException: -1
>>> > task_0025_m_000001_2: at
>>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> > task_0025_m_000001_2: at
>>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> > r.next(DeleteDuplicates.java:176)
>>> > task_0025_m_000001_2: at
>>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> > task_0025_m_000001_2: at
>>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> > task_0025_m_000001_2: at org.apache.hadoop.mapred.MapTask.run
>>> > (MapTask.java:175)
>>> > task_0025_m_000001_2: at
>>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> > (TaskTracker.java:1445)
>>> > - Task Id : task_0025_m_000000_2, Status : FAILED
>>> > task_0025_m_000000_2: - Error running child
>>> > task_0025_m_000000_2: java.lang.ArrayIndexOutOfBoundsException: -1
>>> > task_0025_m_000000_2: at
>>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> > task_0025_m_000000_2: at
>>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> > r.next(DeleteDuplicates.java:176)
>>> > task_0025_m_000000_2: at
>>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> > task_0025_m_000000_2: at
>>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> > task_0025_m_000000_2: at org.apache.hadoop.mapred.MapTask.run
>>> > (MapTask.java:175)
>>> > task_0025_m_000000_2: at
>>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> > (TaskTracker.java:1445)
>>> > - map 100% reduce 100%
>>> > - Task Id : task_0025_m_000001_3, Status : FAILED
>>> > task_0025_m_000001_3: - Error running child
>>> > task_0025_m_000001_3: java.lang.ArrayIndexOutOfBoundsException: -1
>>> > task_0025_m_000001_3: at
>>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> > task_0025_m_000001_3: at
>>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> > r.next(DeleteDuplicates.java:176)
>>> > task_0025_m_000001_3: at
>>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> > task_0025_m_000001_3: at
>>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> > task_0025_m_000001_3: at org.apache.hadoop.mapred.MapTask.run
>>> > (MapTask.java:175)
>>> > task_0025_m_000001_3: at
>>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> > (TaskTracker.java:1445)
>>> > - Task Id : task_0025_m_000000_3, Status : FAILED
>>> > task_0025_m_000000_3: - Error running child
>>> > task_0025_m_000000_3: java.lang.ArrayIndexOutOfBoundsException: -1
>>> > task_0025_m_000000_3: at
>>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>>> > task_0025_m_000000_3: at
>>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>>> > r.next(DeleteDuplicates.java:176)
>>> > task_0025_m_000000_3: at
>>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>>> > task_0025_m_000000_3: at
>>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>>> > task_0025_m_000000_3: at org.apache.hadoop.mapred.MapTask.run
>>> > (MapTask.java:175)
>>> > task_0025_m_000000_3: at
>>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>>> > (TaskTracker.java:1445)
>>> > Exception in thread "main" java.io.IOException: Job failed!
>>> > at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:604)
>>> > at org.apache.nutch.indexer.DeleteDuplicates.dedup
>>> > (DeleteDuplicates.java:439)
>>> > at org.apache.nutch.crawl.Crawl.main(Crawl.java:135)
>>> >
>>> > how i solve it?
>> 
>> 
>> 
>> 
> 
> 

-- 
View this message in context: http://www.nabble.com/Nutch-crawl-problem-tp14327978p14433510.html
Sent from the Hadoop Users mailing list archive at Nabble.com.


Re: Nutch crawl problem

Posted by jibjoice <su...@hotmail.com>.
where i should solve this? why it generated 0 records?
 

pvvpr wrote:
> 
> basically your indexes are empty since no URLs were generated and fetched.
> See 
> this,
> 
>> > - Generator: 0 records selected for fetching, exiting ...
>> > - Stopping at depth=0 - no more URLs to fetch.
>> > - No URLs to fetch - check your seed list and URL filters.
>> > - crawl finished: crawled
> 
> 
> when no pages are indexed, dedup throws Exception
> 
> 
> On Tuesday 18 December 2007 21:33, jibjoice wrote:
>> i can't solve it now, pls help me
>>
>> jibjoice wrote:
>> > i use nutch-0.9, hadoop-0.12.2 and i use this command "bin/nutch crawl
>> > urls -dir crawled -depth 3" have error :
>> >
>> > - crawl started in: crawled
>> > - rootUrlDir = input
>> > - threads = 10
>> > - depth = 3
>> > - Injector: starting
>> > - Injector: crawlDb: crawled/crawldb
>> > - Injector: urlDir: input
>> > - Injector: Converting injected urls to crawl db entries.
>> > - Total input paths to process : 1
>> > - Running job: job_0001
>> > - map 0% reduce 0%
>> > - map 100% reduce 0%
>> > - map 100% reduce 100%
>> > - Job complete: job_0001
>> > - Counters: 6
>> > - Map-Reduce Framework
>> > - Map input records=3
>> > - Map output records=1
>> > - Map input bytes=22
>> > - Map output bytes=52
>> > - Reduce input records=1
>> > - Reduce output records=1
>> > - Injector: Merging injected urls into crawl db.
>> > - Total input paths to process : 2
>> > - Running job: job_0002
>> > - map 0% reduce 0%
>> > - map 100% reduce 0%
>> > - map 100% reduce 58%
>> > - map 100% reduce 100%
>> > - Job complete: job_0002
>> > - Counters: 6
>> > - Map-Reduce Framework
>> > - Map input records=3
>> > - Map output records=1
>> > - Map input bytes=60
>> > - Map output bytes=52
>> > - Reduce input records=1
>> > - Reduce output records=1
>> > - Injector: done
>> > - Generator: Selecting best-scoring urls due for fetch.
>> > - Generator: starting
>> > - Generator: segment: crawled/segments/25501213164325
>> > - Generator: filtering: false
>> > - Generator: topN: 2147483647
>> > - Total input paths to process : 2
>> > - Running job: job_0003
>> > - map 0% reduce 0%
>> > - map 100% reduce 0%
>> > - map 100% reduce 100%
>> > - Job complete: job_0003
>> > - Counters: 6
>> > - Map-Reduce Framework
>> > - Map input records=3
>> > - Map output records=1
>> > - Map input bytes=59
>> > - Map output bytes=77
>> > - Reduce input records=1
>> > - Reduce output records=1
>> > - Generator: 0 records selected for fetching, exiting ...
>> > - Stopping at depth=0 - no more URLs to fetch.
>> > - No URLs to fetch - check your seed list and URL filters.
>> > - crawl finished: crawled
>> >
>> > but sometime i crawl some url it has error indexes time that
>> >
>> > - Indexer: done
>> > - Dedup: starting
>> > - Dedup: adding indexes in: crawled/indexes
>> > - Total input paths to process : 2
>> > - Running job: job_0025
>> > - map 0% reduce 0%
>> > - Task Id : task_0025_m_000001_0, Status : FAILED
>> > task_0025_m_000001_0: - Error running child
>> > task_0025_m_000001_0: java.lang.ArrayIndexOutOfBoundsException: -1
>> > task_0025_m_000001_0: at
>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> > task_0025_m_000001_0: at
>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> > r.next(DeleteDuplicates.java:176)
>> > task_0025_m_000001_0: at
>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> > task_0025_m_000001_0: at
>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> > task_0025_m_000001_0: at org.apache.hadoop.mapred.MapTask.run
>> > (MapTask.java:175)
>> > task_0025_m_000001_0: at
>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> > (TaskTracker.java:1445)
>> > - Task Id : task_0025_m_000000_0, Status : FAILED
>> > task_0025_m_000000_0: - Error running child
>> > task_0025_m_000000_0: java.lang.ArrayIndexOutOfBoundsException: -1
>> > task_0025_m_000000_0: at
>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> > task_0025_m_000000_0: at
>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> > r.next(DeleteDuplicates.java:176)
>> > task_0025_m_000000_0: at
>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> > task_0025_m_000000_0: at
>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> > task_0025_m_000000_0: at org.apache.hadoop.mapred.MapTask.run
>> > (MapTask.java:175)
>> > task_0025_m_000000_0: at
>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> > (TaskTracker.java:1445)
>> > - Task Id : task_0025_m_000000_1, Status : FAILED
>> > task_0025_m_000000_1: - Error running child
>> > task_0025_m_000000_1: java.lang.ArrayIndexOutOfBoundsException: -1
>> > task_0025_m_000000_1: at
>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> > task_0025_m_000000_1: at
>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> > r.next(DeleteDuplicates.java:176)
>> > task_0025_m_000000_1: at
>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> > task_0025_m_000000_1: at
>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> > task_0025_m_000000_1: at org.apache.hadoop.mapred.MapTask.run
>> > (MapTask.java:175)
>> > task_0025_m_000000_1: at
>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> > (TaskTracker.java:1445)
>> > - Task Id : task_0025_m_000001_1, Status : FAILED
>> > task_0025_m_000001_1: - Error running child
>> > task_0025_m_000001_1: java.lang.ArrayIndexOutOfBoundsException: -1
>> > task_0025_m_000001_1: at
>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> > task_0025_m_000001_1: at
>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> > r.next(DeleteDuplicates.java:176)
>> > task_0025_m_000001_1: at
>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> > task_0025_m_000001_1: at
>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> > task_0025_m_000001_1: at org.apache.hadoop.mapred.MapTask.run
>> > (MapTask.java:175)
>> > task_0025_m_000001_1: at
>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> > (TaskTracker.java:1445)
>> > - Task Id : task_0025_m_000001_2, Status : FAILED
>> > task_0025_m_000001_2: - Error running child
>> > task_0025_m_000001_2: java.lang.ArrayIndexOutOfBoundsException: -1
>> > task_0025_m_000001_2: at
>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> > task_0025_m_000001_2: at
>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> > r.next(DeleteDuplicates.java:176)
>> > task_0025_m_000001_2: at
>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> > task_0025_m_000001_2: at
>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> > task_0025_m_000001_2: at org.apache.hadoop.mapred.MapTask.run
>> > (MapTask.java:175)
>> > task_0025_m_000001_2: at
>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> > (TaskTracker.java:1445)
>> > - Task Id : task_0025_m_000000_2, Status : FAILED
>> > task_0025_m_000000_2: - Error running child
>> > task_0025_m_000000_2: java.lang.ArrayIndexOutOfBoundsException: -1
>> > task_0025_m_000000_2: at
>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> > task_0025_m_000000_2: at
>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> > r.next(DeleteDuplicates.java:176)
>> > task_0025_m_000000_2: at
>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> > task_0025_m_000000_2: at
>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> > task_0025_m_000000_2: at org.apache.hadoop.mapred.MapTask.run
>> > (MapTask.java:175)
>> > task_0025_m_000000_2: at
>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> > (TaskTracker.java:1445)
>> > - map 100% reduce 100%
>> > - Task Id : task_0025_m_000001_3, Status : FAILED
>> > task_0025_m_000001_3: - Error running child
>> > task_0025_m_000001_3: java.lang.ArrayIndexOutOfBoundsException: -1
>> > task_0025_m_000001_3: at
>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> > task_0025_m_000001_3: at
>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> > r.next(DeleteDuplicates.java:176)
>> > task_0025_m_000001_3: at
>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> > task_0025_m_000001_3: at
>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> > task_0025_m_000001_3: at org.apache.hadoop.mapred.MapTask.run
>> > (MapTask.java:175)
>> > task_0025_m_000001_3: at
>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> > (TaskTracker.java:1445)
>> > - Task Id : task_0025_m_000000_3, Status : FAILED
>> > task_0025_m_000000_3: - Error running child
>> > task_0025_m_000000_3: java.lang.ArrayIndexOutOfBoundsException: -1
>> > task_0025_m_000000_3: at
>> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
>> > task_0025_m_000000_3: at
>> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
>> > r.next(DeleteDuplicates.java:176)
>> > task_0025_m_000000_3: at
>> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
>> > task_0025_m_000000_3: at
>> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
>> > task_0025_m_000000_3: at org.apache.hadoop.mapred.MapTask.run
>> > (MapTask.java:175)
>> > task_0025_m_000000_3: at
>> > org.apache.hadoop.mapred.TaskTracker$Child.main
>> > (TaskTracker.java:1445)
>> > Exception in thread "main" java.io.IOException: Job failed!
>> > at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:604)
>> > at org.apache.nutch.indexer.DeleteDuplicates.dedup
>> > (DeleteDuplicates.java:439)
>> > at org.apache.nutch.crawl.Crawl.main(Crawl.java:135)
>> >
>> > how i solve it?
> 
> 
> 
> 

-- 
View this message in context: http://www.nabble.com/Nutch-crawl-problem-tp14327978p14412659.html
Sent from the Hadoop Users mailing list archive at Nabble.com.


Re: Nutch crawl problem

Posted by pv...@research.iiit.ac.in.
basically your indexes are empty since no URLs were generated and fetched. See 
this,

> > - Generator: 0 records selected for fetching, exiting ...
> > - Stopping at depth=0 - no more URLs to fetch.
> > - No URLs to fetch - check your seed list and URL filters.
> > - crawl finished: crawled


when no pages are indexed, dedup throws Exception


On Tuesday 18 December 2007 21:33, jibjoice wrote:
> i can't solve it now, pls help me
>
> jibjoice wrote:
> > i use nutch-0.9, hadoop-0.12.2 and i use this command "bin/nutch crawl
> > urls -dir crawled -depth 3" have error :
> >
> > - crawl started in: crawled
> > - rootUrlDir = input
> > - threads = 10
> > - depth = 3
> > - Injector: starting
> > - Injector: crawlDb: crawled/crawldb
> > - Injector: urlDir: input
> > - Injector: Converting injected urls to crawl db entries.
> > - Total input paths to process : 1
> > - Running job: job_0001
> > - map 0% reduce 0%
> > - map 100% reduce 0%
> > - map 100% reduce 100%
> > - Job complete: job_0001
> > - Counters: 6
> > - Map-Reduce Framework
> > - Map input records=3
> > - Map output records=1
> > - Map input bytes=22
> > - Map output bytes=52
> > - Reduce input records=1
> > - Reduce output records=1
> > - Injector: Merging injected urls into crawl db.
> > - Total input paths to process : 2
> > - Running job: job_0002
> > - map 0% reduce 0%
> > - map 100% reduce 0%
> > - map 100% reduce 58%
> > - map 100% reduce 100%
> > - Job complete: job_0002
> > - Counters: 6
> > - Map-Reduce Framework
> > - Map input records=3
> > - Map output records=1
> > - Map input bytes=60
> > - Map output bytes=52
> > - Reduce input records=1
> > - Reduce output records=1
> > - Injector: done
> > - Generator: Selecting best-scoring urls due for fetch.
> > - Generator: starting
> > - Generator: segment: crawled/segments/25501213164325
> > - Generator: filtering: false
> > - Generator: topN: 2147483647
> > - Total input paths to process : 2
> > - Running job: job_0003
> > - map 0% reduce 0%
> > - map 100% reduce 0%
> > - map 100% reduce 100%
> > - Job complete: job_0003
> > - Counters: 6
> > - Map-Reduce Framework
> > - Map input records=3
> > - Map output records=1
> > - Map input bytes=59
> > - Map output bytes=77
> > - Reduce input records=1
> > - Reduce output records=1
> > - Generator: 0 records selected for fetching, exiting ...
> > - Stopping at depth=0 - no more URLs to fetch.
> > - No URLs to fetch - check your seed list and URL filters.
> > - crawl finished: crawled
> >
> > but sometime i crawl some url it has error indexes time that
> >
> > - Indexer: done
> > - Dedup: starting
> > - Dedup: adding indexes in: crawled/indexes
> > - Total input paths to process : 2
> > - Running job: job_0025
> > - map 0% reduce 0%
> > - Task Id : task_0025_m_000001_0, Status : FAILED
> > task_0025_m_000001_0: - Error running child
> > task_0025_m_000001_0: java.lang.ArrayIndexOutOfBoundsException: -1
> > task_0025_m_000001_0: at
> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> > task_0025_m_000001_0: at
> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> > r.next(DeleteDuplicates.java:176)
> > task_0025_m_000001_0: at
> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> > task_0025_m_000001_0: at
> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> > task_0025_m_000001_0: at org.apache.hadoop.mapred.MapTask.run
> > (MapTask.java:175)
> > task_0025_m_000001_0: at
> > org.apache.hadoop.mapred.TaskTracker$Child.main
> > (TaskTracker.java:1445)
> > - Task Id : task_0025_m_000000_0, Status : FAILED
> > task_0025_m_000000_0: - Error running child
> > task_0025_m_000000_0: java.lang.ArrayIndexOutOfBoundsException: -1
> > task_0025_m_000000_0: at
> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> > task_0025_m_000000_0: at
> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> > r.next(DeleteDuplicates.java:176)
> > task_0025_m_000000_0: at
> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> > task_0025_m_000000_0: at
> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> > task_0025_m_000000_0: at org.apache.hadoop.mapred.MapTask.run
> > (MapTask.java:175)
> > task_0025_m_000000_0: at
> > org.apache.hadoop.mapred.TaskTracker$Child.main
> > (TaskTracker.java:1445)
> > - Task Id : task_0025_m_000000_1, Status : FAILED
> > task_0025_m_000000_1: - Error running child
> > task_0025_m_000000_1: java.lang.ArrayIndexOutOfBoundsException: -1
> > task_0025_m_000000_1: at
> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> > task_0025_m_000000_1: at
> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> > r.next(DeleteDuplicates.java:176)
> > task_0025_m_000000_1: at
> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> > task_0025_m_000000_1: at
> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> > task_0025_m_000000_1: at org.apache.hadoop.mapred.MapTask.run
> > (MapTask.java:175)
> > task_0025_m_000000_1: at
> > org.apache.hadoop.mapred.TaskTracker$Child.main
> > (TaskTracker.java:1445)
> > - Task Id : task_0025_m_000001_1, Status : FAILED
> > task_0025_m_000001_1: - Error running child
> > task_0025_m_000001_1: java.lang.ArrayIndexOutOfBoundsException: -1
> > task_0025_m_000001_1: at
> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> > task_0025_m_000001_1: at
> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> > r.next(DeleteDuplicates.java:176)
> > task_0025_m_000001_1: at
> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> > task_0025_m_000001_1: at
> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> > task_0025_m_000001_1: at org.apache.hadoop.mapred.MapTask.run
> > (MapTask.java:175)
> > task_0025_m_000001_1: at
> > org.apache.hadoop.mapred.TaskTracker$Child.main
> > (TaskTracker.java:1445)
> > - Task Id : task_0025_m_000001_2, Status : FAILED
> > task_0025_m_000001_2: - Error running child
> > task_0025_m_000001_2: java.lang.ArrayIndexOutOfBoundsException: -1
> > task_0025_m_000001_2: at
> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> > task_0025_m_000001_2: at
> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> > r.next(DeleteDuplicates.java:176)
> > task_0025_m_000001_2: at
> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> > task_0025_m_000001_2: at
> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> > task_0025_m_000001_2: at org.apache.hadoop.mapred.MapTask.run
> > (MapTask.java:175)
> > task_0025_m_000001_2: at
> > org.apache.hadoop.mapred.TaskTracker$Child.main
> > (TaskTracker.java:1445)
> > - Task Id : task_0025_m_000000_2, Status : FAILED
> > task_0025_m_000000_2: - Error running child
> > task_0025_m_000000_2: java.lang.ArrayIndexOutOfBoundsException: -1
> > task_0025_m_000000_2: at
> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> > task_0025_m_000000_2: at
> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> > r.next(DeleteDuplicates.java:176)
> > task_0025_m_000000_2: at
> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> > task_0025_m_000000_2: at
> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> > task_0025_m_000000_2: at org.apache.hadoop.mapred.MapTask.run
> > (MapTask.java:175)
> > task_0025_m_000000_2: at
> > org.apache.hadoop.mapred.TaskTracker$Child.main
> > (TaskTracker.java:1445)
> > - map 100% reduce 100%
> > - Task Id : task_0025_m_000001_3, Status : FAILED
> > task_0025_m_000001_3: - Error running child
> > task_0025_m_000001_3: java.lang.ArrayIndexOutOfBoundsException: -1
> > task_0025_m_000001_3: at
> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> > task_0025_m_000001_3: at
> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> > r.next(DeleteDuplicates.java:176)
> > task_0025_m_000001_3: at
> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> > task_0025_m_000001_3: at
> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> > task_0025_m_000001_3: at org.apache.hadoop.mapred.MapTask.run
> > (MapTask.java:175)
> > task_0025_m_000001_3: at
> > org.apache.hadoop.mapred.TaskTracker$Child.main
> > (TaskTracker.java:1445)
> > - Task Id : task_0025_m_000000_3, Status : FAILED
> > task_0025_m_000000_3: - Error running child
> > task_0025_m_000000_3: java.lang.ArrayIndexOutOfBoundsException: -1
> > task_0025_m_000000_3: at
> > org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> > task_0025_m_000000_3: at
> > org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> > r.next(DeleteDuplicates.java:176)
> > task_0025_m_000000_3: at
> > org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> > task_0025_m_000000_3: at
> > org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> > task_0025_m_000000_3: at org.apache.hadoop.mapred.MapTask.run
> > (MapTask.java:175)
> > task_0025_m_000000_3: at
> > org.apache.hadoop.mapred.TaskTracker$Child.main
> > (TaskTracker.java:1445)
> > Exception in thread "main" java.io.IOException: Job failed!
> > at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:604)
> > at org.apache.nutch.indexer.DeleteDuplicates.dedup
> > (DeleteDuplicates.java:439)
> > at org.apache.nutch.crawl.Crawl.main(Crawl.java:135)
> >
> > how i solve it?



Re: Nutch crawl problem

Posted by jibjoice <su...@hotmail.com>.
i can't solve it now, pls help me


jibjoice wrote:
> 
> i use nutch-0.9, hadoop-0.12.2 and i use this command "bin/nutch crawl
> urls -dir crawled -depth 3" have error :
> 
> - crawl started in: crawled
> - rootUrlDir = input
> - threads = 10
> - depth = 3
> - Injector: starting
> - Injector: crawlDb: crawled/crawldb
> - Injector: urlDir: input
> - Injector: Converting injected urls to crawl db entries.
> - Total input paths to process : 1
> - Running job: job_0001
> - map 0% reduce 0%
> - map 100% reduce 0%
> - map 100% reduce 100%
> - Job complete: job_0001
> - Counters: 6
> - Map-Reduce Framework
> - Map input records=3
> - Map output records=1
> - Map input bytes=22
> - Map output bytes=52
> - Reduce input records=1
> - Reduce output records=1
> - Injector: Merging injected urls into crawl db.
> - Total input paths to process : 2
> - Running job: job_0002
> - map 0% reduce 0%
> - map 100% reduce 0%
> - map 100% reduce 58%
> - map 100% reduce 100%
> - Job complete: job_0002
> - Counters: 6
> - Map-Reduce Framework
> - Map input records=3
> - Map output records=1
> - Map input bytes=60
> - Map output bytes=52
> - Reduce input records=1
> - Reduce output records=1
> - Injector: done
> - Generator: Selecting best-scoring urls due for fetch.
> - Generator: starting
> - Generator: segment: crawled/segments/25501213164325
> - Generator: filtering: false
> - Generator: topN: 2147483647
> - Total input paths to process : 2
> - Running job: job_0003
> - map 0% reduce 0%
> - map 100% reduce 0%
> - map 100% reduce 100%
> - Job complete: job_0003
> - Counters: 6
> - Map-Reduce Framework
> - Map input records=3
> - Map output records=1
> - Map input bytes=59
> - Map output bytes=77
> - Reduce input records=1
> - Reduce output records=1
> - Generator: 0 records selected for fetching, exiting ...
> - Stopping at depth=0 - no more URLs to fetch.
> - No URLs to fetch - check your seed list and URL filters.
> - crawl finished: crawled
> 
> but sometime i crawl some url it has error indexes time that
> 
> - Indexer: done
> - Dedup: starting
> - Dedup: adding indexes in: crawled/indexes
> - Total input paths to process : 2
> - Running job: job_0025
> - map 0% reduce 0%
> - Task Id : task_0025_m_000001_0, Status : FAILED
> task_0025_m_000001_0: - Error running child
> task_0025_m_000001_0: java.lang.ArrayIndexOutOfBoundsException: -1
> task_0025_m_000001_0: at
> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> task_0025_m_000001_0: at
> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> r.next(DeleteDuplicates.java:176)
> task_0025_m_000001_0: at
> org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> task_0025_m_000001_0: at
> org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> task_0025_m_000001_0: at org.apache.hadoop.mapred.MapTask.run
> (MapTask.java:175)
> task_0025_m_000001_0: at
> org.apache.hadoop.mapred.TaskTracker$Child.main
> (TaskTracker.java:1445)
> - Task Id : task_0025_m_000000_0, Status : FAILED
> task_0025_m_000000_0: - Error running child
> task_0025_m_000000_0: java.lang.ArrayIndexOutOfBoundsException: -1
> task_0025_m_000000_0: at
> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> task_0025_m_000000_0: at
> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> r.next(DeleteDuplicates.java:176)
> task_0025_m_000000_0: at
> org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> task_0025_m_000000_0: at
> org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> task_0025_m_000000_0: at org.apache.hadoop.mapred.MapTask.run
> (MapTask.java:175)
> task_0025_m_000000_0: at
> org.apache.hadoop.mapred.TaskTracker$Child.main
> (TaskTracker.java:1445)
> - Task Id : task_0025_m_000000_1, Status : FAILED
> task_0025_m_000000_1: - Error running child
> task_0025_m_000000_1: java.lang.ArrayIndexOutOfBoundsException: -1
> task_0025_m_000000_1: at
> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> task_0025_m_000000_1: at
> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> r.next(DeleteDuplicates.java:176)
> task_0025_m_000000_1: at
> org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> task_0025_m_000000_1: at
> org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> task_0025_m_000000_1: at org.apache.hadoop.mapred.MapTask.run
> (MapTask.java:175)
> task_0025_m_000000_1: at
> org.apache.hadoop.mapred.TaskTracker$Child.main
> (TaskTracker.java:1445)
> - Task Id : task_0025_m_000001_1, Status : FAILED
> task_0025_m_000001_1: - Error running child
> task_0025_m_000001_1: java.lang.ArrayIndexOutOfBoundsException: -1
> task_0025_m_000001_1: at
> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> task_0025_m_000001_1: at
> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> r.next(DeleteDuplicates.java:176)
> task_0025_m_000001_1: at
> org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> task_0025_m_000001_1: at
> org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> task_0025_m_000001_1: at org.apache.hadoop.mapred.MapTask.run
> (MapTask.java:175)
> task_0025_m_000001_1: at
> org.apache.hadoop.mapred.TaskTracker$Child.main
> (TaskTracker.java:1445)
> - Task Id : task_0025_m_000001_2, Status : FAILED
> task_0025_m_000001_2: - Error running child
> task_0025_m_000001_2: java.lang.ArrayIndexOutOfBoundsException: -1
> task_0025_m_000001_2: at
> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> task_0025_m_000001_2: at
> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> r.next(DeleteDuplicates.java:176)
> task_0025_m_000001_2: at
> org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> task_0025_m_000001_2: at
> org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> task_0025_m_000001_2: at org.apache.hadoop.mapred.MapTask.run
> (MapTask.java:175)
> task_0025_m_000001_2: at
> org.apache.hadoop.mapred.TaskTracker$Child.main
> (TaskTracker.java:1445)
> - Task Id : task_0025_m_000000_2, Status : FAILED
> task_0025_m_000000_2: - Error running child
> task_0025_m_000000_2: java.lang.ArrayIndexOutOfBoundsException: -1
> task_0025_m_000000_2: at
> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> task_0025_m_000000_2: at
> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> r.next(DeleteDuplicates.java:176)
> task_0025_m_000000_2: at
> org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> task_0025_m_000000_2: at
> org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> task_0025_m_000000_2: at org.apache.hadoop.mapred.MapTask.run
> (MapTask.java:175)
> task_0025_m_000000_2: at
> org.apache.hadoop.mapred.TaskTracker$Child.main
> (TaskTracker.java:1445)
> - map 100% reduce 100%
> - Task Id : task_0025_m_000001_3, Status : FAILED
> task_0025_m_000001_3: - Error running child
> task_0025_m_000001_3: java.lang.ArrayIndexOutOfBoundsException: -1
> task_0025_m_000001_3: at
> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> task_0025_m_000001_3: at
> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> r.next(DeleteDuplicates.java:176)
> task_0025_m_000001_3: at
> org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> task_0025_m_000001_3: at
> org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> task_0025_m_000001_3: at org.apache.hadoop.mapred.MapTask.run
> (MapTask.java:175)
> task_0025_m_000001_3: at
> org.apache.hadoop.mapred.TaskTracker$Child.main
> (TaskTracker.java:1445)
> - Task Id : task_0025_m_000000_3, Status : FAILED
> task_0025_m_000000_3: - Error running child
> task_0025_m_000000_3: java.lang.ArrayIndexOutOfBoundsException: -1
> task_0025_m_000000_3: at
> org.apache.lucene.index.MultiReader.isDeleted(MultiReader.java:113)
> task_0025_m_000000_3: at
> org.apache.nutch.indexer.DeleteDuplicates$InputFormat$DDRecordReade
> r.next(DeleteDuplicates.java:176)
> task_0025_m_000000_3: at
> org.apache.hadoop.mapred.MapTask$1.next(MapTask.java:157)
> task_0025_m_000000_3: at
> org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:46)
> task_0025_m_000000_3: at org.apache.hadoop.mapred.MapTask.run
> (MapTask.java:175)
> task_0025_m_000000_3: at
> org.apache.hadoop.mapred.TaskTracker$Child.main
> (TaskTracker.java:1445)
> Exception in thread "main" java.io.IOException: Job failed!
> at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:604)
> at org.apache.nutch.indexer.DeleteDuplicates.dedup
> (DeleteDuplicates.java:439)
> at org.apache.nutch.crawl.Crawl.main(Crawl.java:135)
> 
> how i solve it?
> 
> 

-- 
View this message in context: http://www.nabble.com/Nutch-crawl-problem-tp14327978p14410062.html
Sent from the Hadoop Users mailing list archive at Nabble.com.