第一次提交
This commit is contained in:
46
src/crawler/services/bid-crawler.service.ts
Normal file
46
src/crawler/services/bid-crawler.service.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
import * as puppeteer from 'puppeteer';
|
||||
import { BidsService } from '../../bids/services/bid.service';
|
||||
import { ChdtpCrawler } from './chdtp_target';
|
||||
|
||||
@Injectable()
|
||||
export class BidCrawlerService {
|
||||
private readonly logger = new Logger(BidCrawlerService.name);
|
||||
|
||||
constructor(
|
||||
private bidsService: BidsService,
|
||||
) {}
|
||||
|
||||
async crawlAll() {
|
||||
this.logger.log('Starting crawl task with Puppeteer...');
|
||||
|
||||
const browser = await puppeteer.launch({
|
||||
headless: true,
|
||||
args: ['--no-sandbox', '--disable-setuid-sandbox'],
|
||||
});
|
||||
|
||||
try {
|
||||
// Currently only supports ChdtpCrawler, but can be extended to a list of crawlers
|
||||
const crawler = ChdtpCrawler;
|
||||
this.logger.log(`Crawling: ${crawler.name}`);
|
||||
|
||||
const results = await crawler.crawl(browser);
|
||||
this.logger.log(`Extracted ${results.length} items from ${crawler.name}`);
|
||||
|
||||
for (const item of results) {
|
||||
await this.bidsService.createOrUpdate({
|
||||
title,
|
||||
url: itemUrl,
|
||||
publishDate,
|
||||
source: type || 'Unknown',
|
||||
});
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
this.logger.error(`Crawl task failed: ${error.message}`);
|
||||
} finally {
|
||||
await browser.close();
|
||||
this.logger.log('Crawl task finished.');
|
||||
}
|
||||
}
|
||||
}
|
||||
51
src/crawler/services/chdtp_target.spec.ts
Normal file
51
src/crawler/services/chdtp_target.spec.ts
Normal file
@@ -0,0 +1,51 @@
|
||||
import { ChdtpCrawler } from './chdtp_target';
|
||||
import * as puppeteer from 'puppeteer';
|
||||
|
||||
// Increase timeout to 60 seconds for network operations
|
||||
jest.setTimeout(60000);
|
||||
|
||||
describe('ChdtpCrawler Real Site Test', () => {
|
||||
let browser: puppeteer.Browser;
|
||||
|
||||
beforeAll(async () => {
|
||||
browser = await puppeteer.launch({
|
||||
headless: true, // Change to false to see the browser UI
|
||||
args: ['--no-sandbox', '--disable-setuid-sandbox'],
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
if (browser) {
|
||||
await browser.close();
|
||||
}
|
||||
});
|
||||
|
||||
it('should visit the website and list all found bid information', async () => {
|
||||
console.log(`\nStarting crawl for: ${ChdtpCrawler.name}`);
|
||||
console.log(`Target URL: ${ChdtpCrawler.url}`);
|
||||
|
||||
const results = await ChdtpCrawler.crawl(browser);
|
||||
|
||||
console.log(`\nSuccessfully found ${results.length} items:\n`);
|
||||
console.log('----------------------------------------');
|
||||
results.forEach((item, index) => {
|
||||
console.log(`${index + 1}. [${item.publishDate.toLocaleDateString()}] ${item.title}`);
|
||||
console.log(` Link: ${item.url}`);
|
||||
console.log('----------------------------------------');
|
||||
});
|
||||
|
||||
// Basic assertions to ensure the crawler is working
|
||||
expect(results).toBeDefined();
|
||||
expect(Array.isArray(results)).toBeTruthy();
|
||||
// Warn but don't fail if site returns 0 items (could be empty or changed structure)
|
||||
if (results.length === 0) {
|
||||
console.warn('Warning: No items found. Check if the website structure has changed or if the list is currently empty.');
|
||||
} else {
|
||||
// Check data integrity of the first item
|
||||
const firstItem = results[0];
|
||||
expect(firstItem.title).toBeTruthy();
|
||||
expect(firstItem.url).toMatch(/^https?:\/\//);
|
||||
expect(firstItem.publishDate).toBeInstanceOf(Date);
|
||||
}
|
||||
});
|
||||
});
|
||||
110
src/crawler/services/chdtp_target.ts
Normal file
110
src/crawler/services/chdtp_target.ts
Normal file
@@ -0,0 +1,110 @@
|
||||
import * as puppeteer from 'puppeteer';
|
||||
import { Logger } from '@nestjs/common';
|
||||
|
||||
export interface ChdtpResult {
|
||||
title: string;
|
||||
publishDate: Date;
|
||||
url: string; // Necessary for system uniqueness
|
||||
}
|
||||
|
||||
export const ChdtpCrawler = {
|
||||
name: '中国华能集团',
|
||||
url: 'https://www.chdtp.com/webs/queryWebZbgg.action?zbggType=1',
|
||||
baseUrl: 'https://www.chdtp.com/webs/',
|
||||
|
||||
async crawl(browser: puppeteer.Browser): Promise<ChdtpResult[]> {
|
||||
const logger = new Logger('ChdtpCrawler');
|
||||
const page = await browser.newPage();
|
||||
await page.setUserAgent('Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36');
|
||||
|
||||
const allResults: ChdtpResult[] = [];
|
||||
let currentPage = 1;
|
||||
const maxPages = 5; // Safety limit to prevent infinite loops during testing
|
||||
|
||||
try {
|
||||
logger.log(`Navigating to ${this.url}...`);
|
||||
await page.goto(this.url, { waitUntil: 'networkidle2', timeout: 60000 });
|
||||
|
||||
while (currentPage <= maxPages) {
|
||||
const content = await page.content();
|
||||
const pageResults = this.extract(content);
|
||||
|
||||
if (pageResults.length === 0) {
|
||||
logger.warn(`No results found on page ${currentPage}, stopping.`);
|
||||
break;
|
||||
}
|
||||
|
||||
allResults.push(...pageResults);
|
||||
logger.log(`Extracted ${pageResults.length} items from page ${currentPage}`);
|
||||
|
||||
// Find the "Next Page" button
|
||||
// Using partial match for src to be robust against path variations
|
||||
const nextButtonSelector = 'input[type="image"][src*="page-next.png"]';
|
||||
const nextButton = await page.$(nextButtonSelector);
|
||||
|
||||
if (!nextButton) {
|
||||
logger.log('Next page button not found. Reached end of list.');
|
||||
break;
|
||||
}
|
||||
|
||||
// Optional: Check if the button is disabled (though image inputs usually aren't "disabled" in the same way)
|
||||
// For this specific site, we'll try to click.
|
||||
|
||||
logger.log(`Navigating to page ${currentPage + 1}...`);
|
||||
|
||||
try {
|
||||
await Promise.all([
|
||||
page.waitForNavigation({ waitUntil: 'networkidle2', timeout: 60000 }),
|
||||
nextButton.click(),
|
||||
]);
|
||||
} catch (navError) {
|
||||
logger.error(`Navigation to page ${currentPage + 1} failed: ${navError.message}`);
|
||||
break;
|
||||
}
|
||||
|
||||
currentPage++;
|
||||
|
||||
// Random delay between pages
|
||||
const delay = Math.floor(Math.random() * (3000 - 1000 + 1)) + 1000;
|
||||
await new Promise(resolve => setTimeout(resolve, delay));
|
||||
}
|
||||
|
||||
return allResults;
|
||||
|
||||
} catch (error) {
|
||||
logger.error(`Failed to crawl ${this.name}: ${error.message}`);
|
||||
return allResults; // Return what we have so far
|
||||
} finally {
|
||||
await page.close();
|
||||
}
|
||||
},
|
||||
|
||||
extract(html: string): ChdtpResult[] {
|
||||
const results: ChdtpResult[] = [];
|
||||
/**
|
||||
* Regex groups for chdtp.com:
|
||||
* 1: Status
|
||||
* 2: URL suffix
|
||||
* 3: Title
|
||||
* 4: Business Type
|
||||
* 5: Date
|
||||
*/
|
||||
const regex = /<tr[^>]*>\s*<td class="td_1">.*?<span[^>]*>\s*(.*?)\s*<\/span>.*?<\/td>\s*<td class="td_2">\s*<a[^>]*href="javascript:toGetContent\('(.*?)'\)" title="(.*?)">.*?<\/a><\/td>\s*<td class="td_3">\s*<a[^>]*>\s*(.*?)\s*<\/a>\s*<\/td>\s*<td class="td_4"><span>\[(.*?)\]<\/span><\/td>/gs;
|
||||
|
||||
let match;
|
||||
while ((match = regex.exec(html)) !== null) {
|
||||
const urlSuffix = match[2]?.trim();
|
||||
const title = match[3]?.trim();
|
||||
const dateStr = match[5]?.trim();
|
||||
|
||||
if (title && urlSuffix) {
|
||||
results.push({
|
||||
title,
|
||||
publishDate: dateStr ? new Date(dateStr) : new Date(),
|
||||
url: this.baseUrl + urlSuffix
|
||||
});
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
};
|
||||
Reference in New Issue
Block a user