0

Nick: ignoreSitemap, better crawling algo

This commit is contained in:
Nicolas 2024-06-10 18:12:41 -07:00
parent 1bd0327e1a
commit f6b06ac27a
4 changed files with 57 additions and 41 deletions

View File

@ -35,20 +35,23 @@ export type SearchOptions = {
location?: string; location?: string;
}; };
export type CrawlerOptions = {
returnOnlyUrls?: boolean;
includes?: string[];
excludes?: string[];
maxCrawledLinks?: number;
maxDepth?: number;
limit?: number;
generateImgAltText?: boolean;
replaceAllPathsWithAbsolutePaths?: boolean;
ignoreSitemap?: boolean;
mode?: "default" | "fast"; // have a mode of some sort
}
export type WebScraperOptions = { export type WebScraperOptions = {
urls: string[]; urls: string[];
mode: "single_urls" | "sitemap" | "crawl"; mode: "single_urls" | "sitemap" | "crawl";
crawlerOptions?: { crawlerOptions?: CrawlerOptions;
returnOnlyUrls?: boolean;
includes?: string[];
excludes?: string[];
maxCrawledLinks?: number;
maxDepth?: number;
limit?: number;
generateImgAltText?: boolean;
replaceAllPathsWithAbsolutePaths?: boolean;
mode?: "default" | "fast"; // have a mode of some sort
};
pageOptions?: PageOptions; pageOptions?: PageOptions;
extractorOptions?: ExtractorOptions; extractorOptions?: ExtractorOptions;
concurrentRequests?: number; concurrentRequests?: number;

View File

@ -3,7 +3,7 @@ import cheerio, { load } from "cheerio";
import { URL } from "url"; import { URL } from "url";
import { getLinksFromSitemap } from "./sitemap"; import { getLinksFromSitemap } from "./sitemap";
import async from "async"; import async from "async";
import { PageOptions, Progress } from "../../lib/entities"; import { CrawlerOptions, PageOptions, Progress } from "../../lib/entities";
import { scrapSingleUrl, scrapWithScrapingBee } from "./single_url"; import { scrapSingleUrl, scrapWithScrapingBee } from "./single_url";
import robotsParser from "robots-parser"; import robotsParser from "robots-parser";
@ -109,6 +109,7 @@ export class WebCrawler {
public async start( public async start(
inProgress?: (progress: Progress) => void, inProgress?: (progress: Progress) => void,
pageOptions?: PageOptions, pageOptions?: PageOptions,
crawlerOptions?: CrawlerOptions,
concurrencyLimit: number = 5, concurrencyLimit: number = 5,
limit: number = 10000, limit: number = 10000,
maxDepth: number = 10 maxDepth: number = 10
@ -123,10 +124,12 @@ export class WebCrawler {
} }
const sitemapLinks = await this.tryFetchSitemapLinks(this.initialUrl); if(!crawlerOptions?.ignoreSitemap){
if (sitemapLinks.length > 0) { const sitemapLinks = await this.tryFetchSitemapLinks(this.initialUrl);
let filteredLinks = this.filterLinks(sitemapLinks, limit, maxDepth); if (sitemapLinks.length > 0) {
return filteredLinks.map(link => ({ url: link, html: "" })); let filteredLinks = this.filterLinks(sitemapLinks, limit, maxDepth);
return filteredLinks.map(link => ({ url: link, html: "" }));
}
} }
const urls = await this.crawlUrls( const urls = await this.crawlUrls(
@ -135,6 +138,7 @@ export class WebCrawler {
concurrencyLimit, concurrencyLimit,
inProgress inProgress
); );
if ( if (
urls.length === 0 && urls.length === 0 &&
this.filterLinks([this.initialUrl], limit, this.maxCrawledDepth).length > 0 this.filterLinks([this.initialUrl], limit, this.maxCrawledDepth).length > 0
@ -142,9 +146,9 @@ export class WebCrawler {
return [{ url: this.initialUrl, html: "" }]; return [{ url: this.initialUrl, html: "" }];
} }
// make sure to run include exclude here again // make sure to run include exclude here again
const filteredUrls = this.filterLinks(urls.map(urlObj => urlObj.url), limit, this.maxCrawledDepth); const filteredUrls = this.filterLinks(urls.map(urlObj => urlObj.url), limit, this.maxCrawledDepth);
return filteredUrls.map(url => ({ url, html: urls.find(urlObj => urlObj.url === url)?.html || "" })); return filteredUrls.map(url => ({ url, html: urls.find(urlObj => urlObj.url === url)?.html || "" }));
} }
@ -211,19 +215,17 @@ export class WebCrawler {
} }
async crawl(url: string, pageOptions: PageOptions): Promise<{url: string, html: string}[]> { async crawl(url: string, pageOptions: PageOptions): Promise<{url: string, html: string}[]> {
if (this.visited.has(url) || !this.robots.isAllowed(url, "FireCrawlAgent")){ const normalizedUrl = this.normalizeCrawlUrl(url);
if (this.visited.has(normalizedUrl) || !this.robots.isAllowed(url, "FireCrawlAgent")) {
return []; return [];
} }
this.visited.add(url); this.visited.add(normalizedUrl);
if (!url.startsWith("http")) { if (!url.startsWith("http")) {
url = "https://" + url; url = "https://" + url;
} }
if (url.endsWith("/")) { if (url.endsWith("/")) {
url = url.slice(0, -1); url = url.slice(0, -1);
} }
if (this.isFile(url) || this.isSocialMediaOrEmail(url)) { if (this.isFile(url) || this.isSocialMediaOrEmail(url)) {
@ -231,26 +233,23 @@ export class WebCrawler {
} }
try { try {
let content : string = ""; let content: string = "";
// If it is the first link, fetch with single url // If it is the first link, fetch with single url
if (this.visited.size === 1) { if (this.visited.size === 1) {
console.log(pageOptions) const page = await scrapSingleUrl(url, { ...pageOptions, includeHtml: true });
const page = await scrapSingleUrl(url, {...pageOptions, includeHtml: true}); content = page.html ?? "";
content = page.html ?? ""
} else { } else {
const response = await axios.get(url); const response = await axios.get(url);
content = response.data ?? ""; content = response.data ?? "";
} }
const $ = load(content); const $ = load(content);
let links: {url: string, html: string}[] = []; let links: { url: string, html: string }[] = [];
// Add the initial URL to the list of links // Add the initial URL to the list of links
if(this.visited.size === 1) if (this.visited.size === 1) {
{ links.push({ url, html: content });
links.push({url, html: content});
} }
$("a").each((_, element) => { $("a").each((_, element) => {
const href = $(element).attr("href"); const href = $(element).attr("href");
if (href) { if (href) {
@ -258,32 +257,43 @@ export class WebCrawler {
if (!href.startsWith("http")) { if (!href.startsWith("http")) {
fullUrl = new URL(href, this.baseUrl).toString(); fullUrl = new URL(href, this.baseUrl).toString();
} }
const url = new URL(fullUrl); const urlObj = new URL(fullUrl);
const path = url.pathname; const path = urlObj.pathname;
if ( if (
this.isInternalLink(fullUrl) && this.isInternalLink(fullUrl) &&
this.matchesPattern(fullUrl) && this.matchesPattern(fullUrl) &&
this.noSections(fullUrl) && this.noSections(fullUrl) &&
this.matchesIncludes(path) && // The idea here to comment this out is to allow wider website coverage as we filter this anyway afterwards
// this.matchesIncludes(path) &&
!this.matchesExcludes(path) && !this.matchesExcludes(path) &&
this.robots.isAllowed(fullUrl, "FireCrawlAgent") this.robots.isAllowed(fullUrl, "FireCrawlAgent")
) { ) {
links.push({url: fullUrl, html: content}); links.push({ url: fullUrl, html: content });
} }
} }
}); });
if(this.visited.size === 1){ if (this.visited.size === 1) {
return links; return links;
} }
// Create a new list to return to avoid modifying the visited list // Create a new list to return to avoid modifying the visited list
return links.filter((link) => !this.visited.has(link.url)); return links.filter((link) => !this.visited.has(this.normalizeCrawlUrl(link.url)));
} catch (error) { } catch (error) {
return []; return [];
} }
} }
private normalizeCrawlUrl(url: string): string {
try{
const urlObj = new URL(url);
urlObj.searchParams.sort(); // Sort query parameters to normalize
return urlObj.toString();
} catch (error) {
return url;
}
}
private matchesIncludes(url: string): boolean { private matchesIncludes(url: string): boolean {
if (this.includes.length === 0 || this.includes[0] == "") return true; if (this.includes.length === 0 || this.includes[0] == "") return true;
return this.includes.some((pattern) => new RegExp(pattern).test(url)); return this.includes.some((pattern) => new RegExp(pattern).test(url));
@ -392,7 +402,6 @@ export class WebCrawler {
// Normalize and check if the URL is present in any of the sitemaps // Normalize and check if the URL is present in any of the sitemaps
const normalizedUrl = normalizeUrl(url); const normalizedUrl = normalizeUrl(url);
const normalizedSitemapLinks = sitemapLinks.map(link => normalizeUrl(link)); const normalizedSitemapLinks = sitemapLinks.map(link => normalizeUrl(link));
// has to be greater than 0 to avoid adding the initial URL to the sitemap links, and preventing crawler to crawl // has to be greater than 0 to avoid adding the initial URL to the sitemap links, and preventing crawler to crawl

View File

@ -31,6 +31,7 @@ export class WebScraperDataProvider {
private limit: number = 10000; private limit: number = 10000;
private concurrentRequests: number = 20; private concurrentRequests: number = 20;
private generateImgAltText: boolean = false; private generateImgAltText: boolean = false;
private ignoreSitemap: boolean = false;
private pageOptions?: PageOptions; private pageOptions?: PageOptions;
private extractorOptions?: ExtractorOptions; private extractorOptions?: ExtractorOptions;
private replaceAllPathsWithAbsolutePaths?: boolean = false; private replaceAllPathsWithAbsolutePaths?: boolean = false;
@ -38,6 +39,7 @@ export class WebScraperDataProvider {
"gpt-4-turbo"; "gpt-4-turbo";
private crawlerMode: string = "default"; private crawlerMode: string = "default";
authorize(): void { authorize(): void {
throw new Error("Method not implemented."); throw new Error("Method not implemented.");
} }
@ -174,6 +176,9 @@ export class WebScraperDataProvider {
let links = await crawler.start( let links = await crawler.start(
inProgress, inProgress,
this.pageOptions, this.pageOptions,
{
ignoreSitemap: this.ignoreSitemap,
},
5, 5,
this.limit, this.limit,
this.maxCrawledDepth this.maxCrawledDepth
@ -474,6 +479,7 @@ export class WebScraperDataProvider {
//! @nicolas, for some reason this was being injected and breaking everything. Don't have time to find source of the issue so adding this check //! @nicolas, for some reason this was being injected and breaking everything. Don't have time to find source of the issue so adding this check
this.excludes = this.excludes.filter((item) => item !== ""); this.excludes = this.excludes.filter((item) => item !== "");
this.crawlerMode = options.crawlerOptions?.mode ?? "default"; this.crawlerMode = options.crawlerOptions?.mode ?? "default";
this.ignoreSitemap = options.crawlerOptions?.ignoreSitemap ?? false;
// make sure all urls start with https:// // make sure all urls start with https://
this.urls = this.urls.map((url) => { this.urls = this.urls.map((url) => {

View File

@ -12,7 +12,6 @@ export async function getLinksFromSitemap(
content = response.data; content = response.data;
} catch (error) { } catch (error) {
console.error(`Request failed for ${sitemapUrl}: ${error}`); console.error(`Request failed for ${sitemapUrl}: ${error}`);
console.log(allUrls)
return allUrls; return allUrls;
} }
@ -36,7 +35,6 @@ export async function getLinksFromSitemap(
} catch (error) { } catch (error) {
console.error(`Error processing ${sitemapUrl}: ${error}`); console.error(`Error processing ${sitemapUrl}: ${error}`);
} }
console.log(allUrls)
return allUrls; return allUrls;
} }