-
Notifications
You must be signed in to change notification settings - Fork 1.3k
NUTCH-3162 Latency metrics to properly merge data from all threads and tasks #906
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
cc7b93a
117ed9a
5028b15
ba9c15a
f1d5dc3
f11a945
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -30,13 +30,15 @@ | |
|
|
||
| import org.apache.commons.lang3.time.StopWatch; | ||
| import org.apache.nutch.metadata.Nutch; | ||
| import org.apache.nutch.metrics.ErrorTracker; | ||
| import org.apache.nutch.metrics.NutchMetrics; | ||
| import org.apache.nutch.segment.SegmentChecker; | ||
| import org.apache.hadoop.conf.Configuration; | ||
| import org.apache.hadoop.fs.FileStatus; | ||
| import org.apache.hadoop.fs.FileSystem; | ||
| import org.apache.hadoop.fs.Path; | ||
| import org.apache.hadoop.mapreduce.Job; | ||
| import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; | ||
| import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; | ||
| import org.apache.hadoop.mapreduce.Counter; | ||
| import org.apache.hadoop.util.StringUtils; | ||
|
|
@@ -143,6 +145,8 @@ public void index(Path crawlDb, Path linkDb, List<Path> segments, | |
| + RANDOM.nextInt()); | ||
|
|
||
| FileOutputFormat.setOutputPath(job, tmp); | ||
| // Driver-level error tracking: categorization + LOG.error only (no job counters; see ErrorTracker Javadoc). | ||
| ErrorTracker errorTracker = new ErrorTracker(NutchMetrics.GROUP_INDEXER); | ||
| try { | ||
| try{ | ||
| boolean success = job.waitForCompletion(true); | ||
|
|
@@ -155,6 +159,25 @@ public void index(Path crawlDb, Path linkDb, List<Path> segments, | |
| LOG.error(StringUtils.stringifyException(e)); | ||
| throw e; | ||
| } | ||
| Path latencyDir = new Path(tmp, "_latency"); | ||
| FileSystem fs = tmp.getFileSystem(conf); | ||
| if (fs.exists(latencyDir)) { | ||
| try (Job mergeJob = IndexerMapReduce.createLatencyMergeJob(conf, latencyDir)) { | ||
| FileOutputFormat.setOutputPath(mergeJob, new Path(tmp, "_latency_merge_out")); | ||
| boolean mergeSuccess = mergeJob.waitForCompletion(true); | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. when running
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Or when running on a single-node cluster (indexing to Solr): This only affects the latency-merge job.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Looked into SequenceFileInputFormat.listStatus: it either takes a single file or a directory tree with sequence files
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, I wondered about this. I am not a huge fan of the intermediate output being written for IndexerJob either. I think we could even remove the changes for this job and address them separately. This will NOT have an impact on the Job execution... however the counters are not accurate. |
||
| if (!mergeSuccess) { | ||
| LOG.error("Indexer Latency Merge job failed"); | ||
| errorTracker.recordError(ErrorTracker.ErrorType.OTHER); | ||
| } | ||
| } catch (IOException | InterruptedException | ClassNotFoundException e) { | ||
| if (e instanceof InterruptedException) { | ||
| Thread.currentThread().interrupt(); | ||
| } | ||
| LOG.error("Indexer Latency Merge job failed: {}", e.getMessage()); | ||
| errorTracker.recordError(e); | ||
| throw e; | ||
| } | ||
| } | ||
| LOG.info("Indexer: number of documents indexed, deleted, or skipped:"); | ||
| for (Counter counter : job.getCounters() | ||
| .getGroup(NutchMetrics.GROUP_INDEXER)) { | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.