之前用爬虫模仿了一个追书神器,哈,当然没有完成.
现在重新拾起来重温一下.
参考资料:
小说爬虫
根据这个思路,做了个基于chrome扩展的爬虫
源码
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
| var cheerio = require('cheerio') var fs = require('fs') const request = require('superagent') require('superagent-charset')(request) var async = require('async')
let listURL = 'http://www.biquguo.com/10_10315/'
var concurrencyCount = 0
var fetchUrl = function (url, callback) { concurrencyCount++ let tempURL = listURL + url console.log('现在的并发数是', concurrencyCount, ',正在抓取的是', tempURL) request.get(tempURL) .charset('gbk') .end(function (err, res) { if (err) { return next(err) } html = res.text var $ = cheerio.load(html) var title = $('.bookname h1').text().trim() var tempStr = $('#content').text().trim().replace(/[<br>|</br>|\?]/g, '\r\n') var data = '\r\n' + title + '\r\n' + tempStr.replace(/\s+/g, '\r\n\r\n') concurrencyCount-- callback(null, data) }) }
getList(listURL)
function getList (url) { request.get(url) .charset('gbk') .end(function (err, res) { if (err) { return next(err) } html = res.text var $ = cheerio.load(html) let list = [] $('#list dd a').each(function (i, e) { list.push($(this).attr('href')) }) list.splice(0, 128) console.log(list, list.length) async.mapLimit(list, 5, function (url, callback) { fetchUrl(url, callback) }, function (err, result) { console.log('final:') console.log(result) fs.appendFile('cr.txt', result, 'utf-8', function (err) { if (err) throw err else console.log('大体信息写入成功' + '\r\n' + result) }) })
})
}
|