如何在没有重复的node.js流中写入数据?

时间:2021-11-30 02:30:53

This question is about a URL-crawler in node.js. On the start_url URL he looks for links and "pushes" them to a .json-file (output.json).

这个问题是关于node.js中的URL爬虫。在start_url URL上,他查找链接并将它们“推送”到.json文件(output.json)。

How can I make sure that he does not "push" or "write" domains twice to output.json (so that I do not get duplicates)? I've been using the hash function but this has caused problems.

我怎样才能确保他没有两次“推”或“写”域名到output.json(这样我就不会重复了)?我一直在使用哈希函数,但这导致了问题。

var fs = require('fs');
var request = require('request');
var cheerio = require('cheerio');

var start_url = ["http://blog.codinghorror.com/"]
var wstream = fs.createWriteStream("output.json");

// Extract root domain name from string
function extractDomain(url) {
    var domain;
    if (url.indexOf("://") > -1) { //find & remove protocol (http(s), ftp, etc.) and get domain
        domain = url.split('/')[2];
    } else {
        domain = url.split('/')[0];
    }
    domain = domain.split(':')[0]; //find & remove port number
    return domain;
}

var req = function(url){
    request(url, function(error, response, html){
      if(!error){
        var $ = cheerio.load(html);

        $("a").each(function() {
            var link = $(this).attr("href");
            var makelinkplain = extractDomain(link);

            start_url.push("http://" + makelinkplain);
            wstream.write('"http://'+ makelinkplain + '",');
        });
      }

        start_url.shift();

        if(start_url.length > 0) {
          return req(start_url[0]);
        }

          wstream.end();
    });
}

req(start_url[0]);

1 个解决方案

#1


2  

You can just keep track of the previously seen domains in a Set object like this:

您可以像这样跟踪Set对象中以前看到的域:

var fs = require('fs');
var request = require('request');
var cheerio = require('cheerio');

var domainList = new Set();
var start_url = ["http://blog.codinghorror.com/"]
var wstream = fs.createWriteStream("output.json");

// Extract root domain name from string
function extractDomain(url) {
    var domain;
    if (url.indexOf("://") > -1) { //find & remove protocol (http(s), ftp, etc.) and get domain
        domain = url.split('/')[2];
    } else {
        domain = url.split('/')[0];
    }
    domain = domain.split(':')[0]; //find & remove port number
    // since domains are not case sensitive, canonicalize it by going to lowercase
    return domain.toLowerCase();
}

var req = function(url){
    request(url, function(error, response, html){
      if(!error){
        var $ = cheerio.load(html);

        $("a").each(function() {
            var link = $(this).attr("href");
            if (link) {
                var makelinkplain = extractDomain(link);
                // see if we've already done this domain
                if (!domainList.has(makelinkplain)) {
                    domainList.add(makelinkplain);
                    start_url.push("http://" + makelinkplain);
                    wstream.write('"http://'+ makelinkplain + '",');
                }
            }
        });
      }

        start_url.shift();

        if(start_url.length > 0) {
          return req(start_url[0]);
        }

          wstream.end();
    });
}

req(start_url[0]);

Note: I also added a .toLowerCase() to the extractDomain() function since domains are not case sensitive, but a Set object is. This will make sure that even domains that differ only in case are recognized as the same domain.

注意:我还在extractDomain()函数中添加了一个.toLowerCase(),因为域不区分大小写,但是Set对象是。这将确保即使仅在大小写不同的域被识别为相同的域。

#1


2  

You can just keep track of the previously seen domains in a Set object like this:

您可以像这样跟踪Set对象中以前看到的域:

var fs = require('fs');
var request = require('request');
var cheerio = require('cheerio');

var domainList = new Set();
var start_url = ["http://blog.codinghorror.com/"]
var wstream = fs.createWriteStream("output.json");

// Extract root domain name from string
function extractDomain(url) {
    var domain;
    if (url.indexOf("://") > -1) { //find & remove protocol (http(s), ftp, etc.) and get domain
        domain = url.split('/')[2];
    } else {
        domain = url.split('/')[0];
    }
    domain = domain.split(':')[0]; //find & remove port number
    // since domains are not case sensitive, canonicalize it by going to lowercase
    return domain.toLowerCase();
}

var req = function(url){
    request(url, function(error, response, html){
      if(!error){
        var $ = cheerio.load(html);

        $("a").each(function() {
            var link = $(this).attr("href");
            if (link) {
                var makelinkplain = extractDomain(link);
                // see if we've already done this domain
                if (!domainList.has(makelinkplain)) {
                    domainList.add(makelinkplain);
                    start_url.push("http://" + makelinkplain);
                    wstream.write('"http://'+ makelinkplain + '",');
                }
            }
        });
      }

        start_url.shift();

        if(start_url.length > 0) {
          return req(start_url[0]);
        }

          wstream.end();
    });
}

req(start_url[0]);

Note: I also added a .toLowerCase() to the extractDomain() function since domains are not case sensitive, but a Set object is. This will make sure that even domains that differ only in case are recognized as the same domain.

注意:我还在extractDomain()函数中添加了一个.toLowerCase(),因为域不区分大小写,但是Set对象是。这将确保即使仅在大小写不同的域被识别为相同的域。