0

Merge pull request #269 from mendableai/feat/allowbackwardcrawling-option

[Feat] Added allowBackwardCrawling option
This commit is contained in:
Rafael Miller 2024-06-12 11:34:39 -03:00 committed by GitHub
commit 48f6c19a05
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 81 additions and 16 deletions

View File

@ -596,7 +596,7 @@ describe("E2E Tests for API Routes", () => {
.post("/v0/crawl")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({ url: "https://roastmywebsite.ai" });
.send({ url: "https://mendable.ai/blog" });
expect(crawlResponse.statusCode).toBe(200);
let isCompleted = false;
@ -622,7 +622,13 @@ describe("E2E Tests for API Routes", () => {
expect(completedResponse.body.data[0]).toHaveProperty("content");
expect(completedResponse.body.data[0]).toHaveProperty("markdown");
expect(completedResponse.body.data[0]).toHaveProperty("metadata");
expect(completedResponse.body.data[0].content).toContain("_Roast_");
expect(completedResponse.body.data[0].content).toContain("Mendable");
const childrenLinks = completedResponse.body.data.filter(doc =>
doc.metadata && doc.metadata.sourceURL && doc.metadata.sourceURL.includes("mendable.ai/blog")
);
expect(childrenLinks.length).toBe(completedResponse.body.data.length);
}, 120000); // 120 seconds
it.concurrent('should return a successful response for a valid crawl job with PDF files without explicit .pdf extension', async () => {
@ -757,34 +763,82 @@ describe("E2E Tests for API Routes", () => {
}, 60000);
}); // 60 seconds
it.concurrent("should return a successful response for a valid crawl job with allowBackwardCrawling set to true option", async () => {
const crawlResponse = await request(TEST_URL)
.post("/v0/crawl")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({
url: "https://mendable.ai/blog",
pageOptions: { includeHtml: true },
crawlerOptions: { allowBackwardCrawling: true },
});
expect(crawlResponse.statusCode).toBe(200);
let isFinished = false;
let completedResponse;
while (!isFinished) {
const response = await request(TEST_URL)
.get(`/v0/crawl/status/${crawlResponse.body.jobId}`)
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`);
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("status");
if (response.body.status === "completed") {
isFinished = true;
completedResponse = response;
} else {
await new Promise((r) => setTimeout(r, 1000)); // Wait for 1 second before checking again
}
}
expect(completedResponse.statusCode).toBe(200);
expect(completedResponse.body).toHaveProperty("status");
expect(completedResponse.body.status).toBe("completed");
expect(completedResponse.body).toHaveProperty("data");
expect(completedResponse.body.data[0]).toHaveProperty("content");
expect(completedResponse.body.data[0]).toHaveProperty("markdown");
expect(completedResponse.body.data[0]).toHaveProperty("metadata");
expect(completedResponse.body.data[0]).toHaveProperty("html");
expect(completedResponse.body.data[0].content).toContain("Mendable");
expect(completedResponse.body.data[0].markdown).toContain("Mendable");
const onlyChildrenLinks = completedResponse.body.data.filter(doc => {
return doc.metadata && doc.metadata.sourceURL && doc.metadata.sourceURL.includes("mendable.ai/blog")
});
expect(completedResponse.body.data.length).toBeGreaterThan(onlyChildrenLinks.length);
}, 60000);
it.concurrent("If someone cancels a crawl job, it should turn into failed status", async () => {
const crawlResponse = await request(TEST_URL)
.post("/v0/crawl")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({ url: "https://jestjs.io" });
expect(crawlResponse.statusCode).toBe(200);
// wait for 30 seconds
await new Promise((r) => setTimeout(r, 20000));
const response = await request(TEST_URL)
const responseCancel = await request(TEST_URL)
.delete(`/v0/crawl/cancel/${crawlResponse.body.jobId}`)
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`);
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("status");
expect(response.body.status).toBe("cancelled");
expect(responseCancel.statusCode).toBe(200);
expect(responseCancel.body).toHaveProperty("status");
expect(responseCancel.body.status).toBe("cancelled");
await new Promise((r) => setTimeout(r, 10000));
const completedResponse = await request(TEST_URL)
.get(`/v0/crawl/status/${crawlResponse.body.jobId}`)
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`);
expect(completedResponse.statusCode).toBe(200);
expect(completedResponse.body).toHaveProperty("status");
expect(completedResponse.body.status).toBe("failed");
expect(completedResponse.body).toHaveProperty("data");
expect(completedResponse.body.data).toEqual(null);
expect(completedResponse.body.data).toBeNull();
expect(completedResponse.body).toHaveProperty("partial_data");
expect(completedResponse.body.partial_data[0]).toHaveProperty("content");
expect(completedResponse.body.partial_data[0]).toHaveProperty("markdown");

View File

@ -55,7 +55,7 @@ export async function crawlController(req: Request, res: Response) {
}
const mode = req.body.mode ?? "crawl";
const crawlerOptions = req.body.crawlerOptions ?? {};
const crawlerOptions = req.body.crawlerOptions ?? { allowBackwardCrawling: false };
const pageOptions = req.body.pageOptions ?? { onlyMainContent: false, includeHtml: false };
if (mode === "single_urls" && !url.includes(",")) {
@ -64,9 +64,7 @@ export async function crawlController(req: Request, res: Response) {
await a.setOptions({
mode: "single_urls",
urls: [url],
crawlerOptions: {
returnOnlyUrls: true,
},
crawlerOptions: { ...crawlerOptions, returnOnlyUrls: true },
pageOptions: pageOptions,
});
@ -91,7 +89,7 @@ export async function crawlController(req: Request, res: Response) {
const job = await addWebScraperJob({
url: url,
mode: mode ?? "crawl", // fix for single urls not working
crawlerOptions: { ...crawlerOptions },
crawlerOptions: crawlerOptions,
team_id: team_id,
pageOptions: pageOptions,
origin: req.body.origin ?? "api",

View File

@ -47,6 +47,7 @@ export type CrawlerOptions = {
replaceAllPathsWithAbsolutePaths?: boolean;
ignoreSitemap?: boolean;
mode?: "default" | "fast"; // have a mode of some sort
allowBackwardCrawling?: boolean;
}
export type WebScraperOptions = {

View File

@ -20,6 +20,7 @@ export class WebCrawler {
private robotsTxtUrl: string;
private robots: any;
private generateImgAltText: boolean;
private allowBackwardCrawling: boolean;
constructor({
initialUrl,
@ -29,6 +30,7 @@ export class WebCrawler {
limit = 10000,
generateImgAltText = false,
maxCrawledDepth = 10,
allowBackwardCrawling = false
}: {
initialUrl: string;
includes?: string[];
@ -37,6 +39,7 @@ export class WebCrawler {
limit?: number;
generateImgAltText?: boolean;
maxCrawledDepth?: number;
allowBackwardCrawling?: boolean;
}) {
this.initialUrl = initialUrl;
this.baseUrl = new URL(initialUrl).origin;
@ -49,6 +52,7 @@ export class WebCrawler {
this.maxCrawledLinks = maxCrawledLinks ?? limit;
this.maxCrawledDepth = maxCrawledDepth ?? 10;
this.generateImgAltText = generateImgAltText ?? false;
this.allowBackwardCrawling = allowBackwardCrawling ?? false;
}
private filterLinks(sitemapLinks: string[], limit: number, maxDepth: number): string[] {
@ -90,10 +94,16 @@ export class WebCrawler {
const linkHostname = normalizedLink.hostname.replace(/^www\./, '');
// Ensure the protocol and hostname match, and the path starts with the initial URL's path
if (linkHostname !== initialHostname || !normalizedLink.pathname.startsWith(normalizedInitialUrl.pathname)) {
if (linkHostname !== initialHostname) {
return false;
}
if (!this.allowBackwardCrawling) {
if (!normalizedLink.pathname.startsWith(normalizedInitialUrl.pathname)) {
return false;
}
}
const isAllowed = this.robots.isAllowed(link, "FireCrawlAgent") ?? true;
// Check if the link is disallowed by robots.txt
if (!isAllowed) {

View File

@ -38,8 +38,8 @@ export class WebScraperDataProvider {
private generateImgAltTextModel: "gpt-4-turbo" | "claude-3-opus" =
"gpt-4-turbo";
private crawlerMode: string = "default";
private allowBackwardCrawling: boolean = false;
authorize(): void {
throw new Error("Method not implemented.");
}
@ -171,6 +171,7 @@ export class WebScraperDataProvider {
maxCrawledDepth: this.maxCrawledDepth,
limit: this.limit,
generateImgAltText: this.generateImgAltText,
allowBackwardCrawling: this.allowBackwardCrawling,
});
let links = await crawler.start(
@ -481,6 +482,7 @@ export class WebScraperDataProvider {
this.excludes = this.excludes.filter((item) => item !== "");
this.crawlerMode = options.crawlerOptions?.mode ?? "default";
this.ignoreSitemap = options.crawlerOptions?.ignoreSitemap ?? false;
this.allowBackwardCrawling = options.crawlerOptions?.allowBackwardCrawling ?? false;
// make sure all urls start with https://
this.urls = this.urls.map((url) => {