Merge branch 'main' into feat/removeTags-regex
This commit is contained in:
commit
f5a9acc4c6
1
.github/workflows/ci.yml
vendored
1
.github/workflows/ci.yml
vendored
@ -27,6 +27,7 @@ env:
|
|||||||
TEST_API_KEY: ${{ secrets.TEST_API_KEY }}
|
TEST_API_KEY: ${{ secrets.TEST_API_KEY }}
|
||||||
HYPERDX_API_KEY: ${{ secrets.HYPERDX_API_KEY }}
|
HYPERDX_API_KEY: ${{ secrets.HYPERDX_API_KEY }}
|
||||||
HDX_NODE_BETA_MODE: 1
|
HDX_NODE_BETA_MODE: 1
|
||||||
|
FIRE_ENGINE_BETA_URL: ${{ secrets.FIRE_ENGINE_BETA_URL }}
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
2
.github/workflows/fly-direct.yml
vendored
2
.github/workflows/fly-direct.yml
vendored
@ -1,7 +1,7 @@
|
|||||||
name: Fly Deploy Direct
|
name: Fly Deploy Direct
|
||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '0 * * * *'
|
- cron: '0 */2 * * *'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||||
|
2
.github/workflows/fly.yml
vendored
2
.github/workflows/fly.yml
vendored
@ -93,7 +93,7 @@ jobs:
|
|||||||
working-directory: ./apps/test-suite
|
working-directory: ./apps/test-suite
|
||||||
- name: Run E2E tests
|
- name: Run E2E tests
|
||||||
run: |
|
run: |
|
||||||
npm run test
|
npm run test:suite
|
||||||
working-directory: ./apps/test-suite
|
working-directory: ./apps/test-suite
|
||||||
|
|
||||||
python-sdk-tests:
|
python-sdk-tests:
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
"build": "tsc",
|
"build": "tsc",
|
||||||
"test": "npx jest --detectOpenHandles --forceExit --openHandlesTimeout=120000 --watchAll=false --testPathIgnorePatterns='src/__tests__/e2e_noAuth/*'",
|
"test": "npx jest --detectOpenHandles --forceExit --openHandlesTimeout=120000 --watchAll=false --testPathIgnorePatterns='src/__tests__/e2e_noAuth/*'",
|
||||||
"test:local-no-auth": "npx jest --detectOpenHandles --forceExit --openHandlesTimeout=120000 --watchAll=false --testPathIgnorePatterns='src/__tests__/e2e_withAuth/*'",
|
"test:local-no-auth": "npx jest --detectOpenHandles --forceExit --openHandlesTimeout=120000 --watchAll=false --testPathIgnorePatterns='src/__tests__/e2e_withAuth/*'",
|
||||||
"test:prod": "npx jest --detectOpenHandles --forceExit --openHandlesTimeout=120000 --watchAll=false --testPathIgnorePatterns='src/__tests__/e2e_noAuth/*'",
|
"test:prod": "npx jest --detectOpenHandles --forceExit --openHandlesTimeout=120000 --watchAll=false --testPathIgnorePatterns='(src/__tests__/e2e_noAuth|src/__tests__/e2e_full_withAuth)'",
|
||||||
"workers": "nodemon --exec ts-node src/services/queue-worker.ts",
|
"workers": "nodemon --exec ts-node src/services/queue-worker.ts",
|
||||||
"worker:production": "node dist/src/services/queue-worker.js",
|
"worker:production": "node dist/src/services/queue-worker.js",
|
||||||
"mongo-docker": "docker run -d -p 2717:27017 -v ./mongo-data:/data/db --name mongodb mongo:latest",
|
"mongo-docker": "docker run -d -p 2717:27017 -v ./mongo-data:/data/db --name mongodb mongo:latest",
|
||||||
|
1390
apps/api/src/__tests__/e2e_full_withAuth/index.test.ts
Normal file
1390
apps/api/src/__tests__/e2e_full_withAuth/index.test.ts
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
47
apps/api/src/controllers/__tests__/crawl.test.ts
Normal file
47
apps/api/src/controllers/__tests__/crawl.test.ts
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
import { crawlController } from '../crawl'
|
||||||
|
import { Request, Response } from 'express';
|
||||||
|
import { authenticateUser } from '../auth'; // Ensure this import is correct
|
||||||
|
import { createIdempotencyKey } from '../../services/idempotency/create';
|
||||||
|
import { validateIdempotencyKey } from '../../services/idempotency/validate';
|
||||||
|
import { v4 as uuidv4 } from 'uuid';
|
||||||
|
|
||||||
|
jest.mock('../auth', () => ({
|
||||||
|
authenticateUser: jest.fn().mockResolvedValue({
|
||||||
|
success: true,
|
||||||
|
team_id: 'team123',
|
||||||
|
error: null,
|
||||||
|
status: 200
|
||||||
|
}),
|
||||||
|
reduce: jest.fn()
|
||||||
|
}));
|
||||||
|
jest.mock('../../services/idempotency/validate');
|
||||||
|
|
||||||
|
describe('crawlController', () => {
|
||||||
|
it('should prevent duplicate requests using the same idempotency key', async () => {
|
||||||
|
const req = {
|
||||||
|
headers: {
|
||||||
|
'x-idempotency-key': await uuidv4(),
|
||||||
|
'Authorization': `Bearer ${process.env.TEST_API_KEY}`
|
||||||
|
},
|
||||||
|
body: {
|
||||||
|
url: 'https://mendable.ai'
|
||||||
|
}
|
||||||
|
} as unknown as Request;
|
||||||
|
const res = {
|
||||||
|
status: jest.fn().mockReturnThis(),
|
||||||
|
json: jest.fn()
|
||||||
|
} as unknown as Response;
|
||||||
|
|
||||||
|
// Mock the idempotency key validation to return false for the second call
|
||||||
|
(validateIdempotencyKey as jest.Mock).mockResolvedValueOnce(true).mockResolvedValueOnce(false);
|
||||||
|
|
||||||
|
// First request should succeed
|
||||||
|
await crawlController(req, res);
|
||||||
|
expect(res.status).not.toHaveBeenCalledWith(409);
|
||||||
|
|
||||||
|
// Second request with the same key should fail
|
||||||
|
await crawlController(req, res);
|
||||||
|
expect(res.status).toHaveBeenCalledWith(409);
|
||||||
|
expect(res.json).toHaveBeenCalledWith({ error: 'Idempotency key already used' });
|
||||||
|
});
|
||||||
|
});
|
@ -91,6 +91,7 @@ export async function searchHelper(
|
|||||||
});
|
});
|
||||||
|
|
||||||
const docs = await a.getDocuments(false);
|
const docs = await a.getDocuments(false);
|
||||||
|
|
||||||
if (docs.length === 0) {
|
if (docs.length === 0) {
|
||||||
return { success: true, error: "No search results found", returnCode: 200 };
|
return { success: true, error: "No search results found", returnCode: 200 };
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,7 @@ import { getAdjustedMaxDepth } from '../utils/maxDepthUtils';
|
|||||||
jest.mock('axios');
|
jest.mock('axios');
|
||||||
jest.mock('robots-parser');
|
jest.mock('robots-parser');
|
||||||
|
|
||||||
describe('WebCrawler maxDepth and filterLinks', () => {
|
describe('WebCrawler', () => {
|
||||||
let crawler: WebCrawler;
|
let crawler: WebCrawler;
|
||||||
const mockAxios = axios as jest.Mocked<typeof axios>;
|
const mockAxios = axios as jest.Mocked<typeof axios>;
|
||||||
const mockRobotsParser = robotsParser as jest.MockedFunction<typeof robotsParser>;
|
const mockRobotsParser = robotsParser as jest.MockedFunction<typeof robotsParser>;
|
||||||
@ -156,8 +156,37 @@ describe('WebCrawler maxDepth and filterLinks', () => {
|
|||||||
]);
|
]);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should handle allowBackwardCrawling option correctly', async () => {
|
||||||
|
const initialUrl = 'https://mendable.ai/blog';
|
||||||
// Add more tests to cover other scenarios, such as checking includes and excludes
|
|
||||||
|
// Setup the crawler with the specific test case options
|
||||||
|
const crawler = new WebCrawler({
|
||||||
|
initialUrl: initialUrl,
|
||||||
|
includes: [],
|
||||||
|
excludes: [],
|
||||||
|
limit: 100,
|
||||||
|
maxCrawledDepth: 3, // Example depth
|
||||||
|
allowBackwardCrawling: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Mock the sitemap fetching function to simulate backward crawling
|
||||||
|
crawler['tryFetchSitemapLinks'] = jest.fn().mockResolvedValue([
|
||||||
|
initialUrl,
|
||||||
|
'https://mendable.ai', // backward link
|
||||||
|
initialUrl + '/page1',
|
||||||
|
initialUrl + '/page1/page2'
|
||||||
|
]);
|
||||||
|
|
||||||
|
const results = await crawler.start();
|
||||||
|
expect(results).toEqual([
|
||||||
|
{ url: initialUrl, html: '' },
|
||||||
|
{ url: 'https://mendable.ai', html: '' }, // Expect the backward link to be included
|
||||||
|
{ url: initialUrl + '/page1', html: '' },
|
||||||
|
{ url: initialUrl + '/page1/page2', html: '' }
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Check that the backward link is included if allowBackwardCrawling is true
|
||||||
|
expect(results.some(r => r.url === 'https://mendable.ai')).toBe(true);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
24
apps/api/src/scraper/WebScraper/__tests__/single_url.test.ts
Normal file
24
apps/api/src/scraper/WebScraper/__tests__/single_url.test.ts
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
jest.mock('../single_url', () => {
|
||||||
|
const originalModule = jest.requireActual('../single_url');
|
||||||
|
originalModule.fetchHtmlContent = jest.fn().mockResolvedValue('<html><head><title>Test</title></head><body><h1>Roast</h1></body></html>');
|
||||||
|
|
||||||
|
return originalModule;
|
||||||
|
});
|
||||||
|
|
||||||
|
import { scrapSingleUrl } from '../single_url';
|
||||||
|
import { PageOptions } from '../../../lib/entities';
|
||||||
|
|
||||||
|
describe('scrapSingleUrl', () => {
|
||||||
|
it('should handle includeHtml option correctly', async () => {
|
||||||
|
const url = 'https://roastmywebsite.ai';
|
||||||
|
const pageOptionsWithHtml: PageOptions = { includeHtml: true };
|
||||||
|
const pageOptionsWithoutHtml: PageOptions = { includeHtml: false };
|
||||||
|
|
||||||
|
const resultWithHtml = await scrapSingleUrl(url, pageOptionsWithHtml);
|
||||||
|
const resultWithoutHtml = await scrapSingleUrl(url, pageOptionsWithoutHtml);
|
||||||
|
|
||||||
|
expect(resultWithHtml.html).toBeDefined();
|
||||||
|
expect(resultWithoutHtml.html).toBeUndefined();
|
||||||
|
}, 10000);
|
||||||
|
});
|
||||||
|
|
@ -0,0 +1,89 @@
|
|||||||
|
import { isUrlBlocked } from '../blocklist';
|
||||||
|
|
||||||
|
describe('Blocklist Functionality', () => {
|
||||||
|
describe('isUrlBlocked', () => {
|
||||||
|
test.each([
|
||||||
|
'https://facebook.com/fake-test',
|
||||||
|
'https://x.com/user-profile',
|
||||||
|
'https://twitter.com/home',
|
||||||
|
'https://instagram.com/explore',
|
||||||
|
'https://linkedin.com/in/johndoe',
|
||||||
|
'https://pinterest.com/pin/create',
|
||||||
|
'https://snapchat.com/add/johndoe',
|
||||||
|
'https://tiktok.com/@johndoe',
|
||||||
|
'https://reddit.com/r/funny',
|
||||||
|
'https://tumblr.com/dashboard',
|
||||||
|
'https://flickr.com/photos/johndoe',
|
||||||
|
'https://whatsapp.com/download',
|
||||||
|
'https://wechat.com/features',
|
||||||
|
'https://telegram.org/apps'
|
||||||
|
])('should return true for blocklisted URL %s', (url) => {
|
||||||
|
expect(isUrlBlocked(url)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
test.each([
|
||||||
|
'https://facebook.com/policy',
|
||||||
|
'https://twitter.com/tos',
|
||||||
|
'https://instagram.com/about/legal/terms',
|
||||||
|
'https://linkedin.com/legal/privacy-policy',
|
||||||
|
'https://pinterest.com/about/privacy',
|
||||||
|
'https://snapchat.com/legal/terms',
|
||||||
|
'https://tiktok.com/legal/privacy-policy',
|
||||||
|
'https://reddit.com/policies',
|
||||||
|
'https://tumblr.com/policy/en/privacy',
|
||||||
|
'https://flickr.com/help/terms',
|
||||||
|
'https://whatsapp.com/legal',
|
||||||
|
'https://wechat.com/en/privacy-policy',
|
||||||
|
'https://telegram.org/tos'
|
||||||
|
])('should return false for allowed URLs with keywords %s', (url) => {
|
||||||
|
expect(isUrlBlocked(url)).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should return false for non-blocklisted domain', () => {
|
||||||
|
const url = 'https://example.com';
|
||||||
|
expect(isUrlBlocked(url)).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should handle invalid URLs gracefully', () => {
|
||||||
|
const url = 'htp://invalid-url';
|
||||||
|
expect(isUrlBlocked(url)).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
test.each([
|
||||||
|
'https://subdomain.facebook.com',
|
||||||
|
'https://facebook.com.someotherdomain.com',
|
||||||
|
'https://www.facebook.com/profile',
|
||||||
|
'https://api.twitter.com/info',
|
||||||
|
'https://instagram.com/accounts/login'
|
||||||
|
])('should return true for URLs with blocklisted domains in subdomains or paths %s', (url) => {
|
||||||
|
expect(isUrlBlocked(url)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
test.each([
|
||||||
|
'https://example.com/facebook.com',
|
||||||
|
'https://example.com/redirect?url=https://twitter.com',
|
||||||
|
'https://facebook.com.policy.example.com'
|
||||||
|
])('should return false for URLs where blocklisted domain is part of another domain or path %s', (url) => {
|
||||||
|
expect(isUrlBlocked(url)).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
test.each([
|
||||||
|
'https://FACEBOOK.com',
|
||||||
|
'https://INSTAGRAM.com/@something'
|
||||||
|
])('should handle case variations %s', (url) => {
|
||||||
|
expect(isUrlBlocked(url)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
test.each([
|
||||||
|
'https://facebook.com?redirect=https://example.com',
|
||||||
|
'https://twitter.com?query=something'
|
||||||
|
])('should handle query parameters %s', (url) => {
|
||||||
|
expect(isUrlBlocked(url)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should handle internationalized domain names', () => {
|
||||||
|
const url = 'https://xn--d1acpjx3f.xn--p1ai';
|
||||||
|
expect(isUrlBlocked(url)).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
@ -9,41 +9,11 @@ describe('PDF Processing Module - Integration Test', () => {
|
|||||||
expect(pageError).toBeUndefined();
|
expect(pageError).toBeUndefined();
|
||||||
});
|
});
|
||||||
|
|
||||||
// We're hitting the LLAMAPARSE rate limit 🫠
|
it('should return a successful response for a valid scrape with PDF file and parsePDF set to false', async () => {
|
||||||
// it('should download and read a simple PDF file by URL', async () => {
|
const { content, pageStatusCode, pageError } = await pdfProcessor.fetchAndProcessPdf('https://arxiv.org/pdf/astro-ph/9301001.pdf', false);
|
||||||
// const pdfContent = await pdfProcessor.fetchAndProcessPdf('https://s3.us-east-1.amazonaws.com/storage.mendable.ai/rafa-testing/test%20%281%29.pdf');
|
expect(pageStatusCode).toBe(200);
|
||||||
// expect(pdfContent).toEqual("Dummy PDF file");
|
expect(pageError).toBeUndefined();
|
||||||
// });
|
expect(content).toContain('/Title(arXiv:astro-ph/9301001v1 7 Jan 1993)>>endobj');
|
||||||
|
}, 60000); // 60 seconds
|
||||||
|
|
||||||
// it('should download and read a complex PDF file by URL', async () => {
|
});
|
||||||
// const pdfContent = await pdfProcessor.fetchAndProcessPdf('https://arxiv.org/pdf/2307.06435.pdf');
|
|
||||||
|
|
||||||
// const expectedContent = 'A Comprehensive Overview of Large Language Models\n' +
|
|
||||||
// ' a a,∗ b,∗ c,d,∗ e,f e,f g,i\n' +
|
|
||||||
// ' Humza Naveed , Asad Ullah Khan , Shi Qiu , Muhammad Saqib , Saeed Anwar , Muhammad Usman , Naveed Akhtar ,\n' +
|
|
||||||
// ' Nick Barnes h, Ajmal Mian i\n' +
|
|
||||||
// ' aUniversity of Engineering and Technology (UET), Lahore, Pakistan\n' +
|
|
||||||
// ' bThe Chinese University of Hong Kong (CUHK), HKSAR, China\n' +
|
|
||||||
// ' cUniversity of Technology Sydney (UTS), Sydney, Australia\n' +
|
|
||||||
// ' dCommonwealth Scientific and Industrial Research Organisation (CSIRO), Sydney, Australia\n' +
|
|
||||||
// ' eKing Fahd University of Petroleum and Minerals (KFUPM), Dhahran, Saudi Arabia\n' +
|
|
||||||
// ' fSDAIA-KFUPM Joint Research Center for Artificial Intelligence (JRCAI), Dhahran, Saudi Arabia\n' +
|
|
||||||
// ' gThe University of Melbourne (UoM), Melbourne, Australia\n' +
|
|
||||||
// ' hAustralian National University (ANU), Canberra, Australia\n' +
|
|
||||||
// ' iThe University of Western Australia (UWA), Perth, Australia\n' +
|
|
||||||
// ' Abstract\n' +
|
|
||||||
// ' Large Language Models (LLMs) have recently demonstrated remarkable capabilities in natural language processing tasks and\n' +
|
|
||||||
// ' beyond. This success of LLMs has led to a large influx of research contributions in this direction. These works encompass diverse\n' +
|
|
||||||
// ' topics such as architectural innovations, better training strategies, context length improvements, fine-tuning, multi-modal LLMs,\n' +
|
|
||||||
// ' robotics, datasets, benchmarking, efficiency, and more. With the rapid development of techniques and regular breakthroughs in\n' +
|
|
||||||
// ' LLM research, it has become considerably challenging to perceive the bigger picture of the advances in this direction. Considering\n' +
|
|
||||||
// ' the rapidly emerging plethora of literature on LLMs, it is imperative that the research community is able to benefit from a concise\n' +
|
|
||||||
// ' yet comprehensive overview of the recent developments in this field. This article provides an overview of the existing literature\n' +
|
|
||||||
// ' on a broad range of LLM-related concepts. Our self-contained comprehensive overview of LLMs discusses relevant background\n' +
|
|
||||||
// ' concepts along with covering the advanced topics at the frontier of research in LLMs. This review article is intended to not only\n' +
|
|
||||||
// ' provide a systematic survey but also a quick comprehensive reference for the researchers and practitioners to draw insights from\n' +
|
|
||||||
// ' extensive informative summaries of the existing works to advance the LLM research.\n'
|
|
||||||
// expect(pdfContent).toContain(expectedContent);
|
|
||||||
// }, 60000);
|
|
||||||
|
|
||||||
});
|
|
||||||
|
@ -23,6 +23,7 @@ const allowedKeywords = [
|
|||||||
'user-agreement',
|
'user-agreement',
|
||||||
'legal',
|
'legal',
|
||||||
'help',
|
'help',
|
||||||
|
'policies',
|
||||||
'support',
|
'support',
|
||||||
'contact',
|
'contact',
|
||||||
'about',
|
'about',
|
||||||
@ -30,25 +31,31 @@ const allowedKeywords = [
|
|||||||
'blog',
|
'blog',
|
||||||
'press',
|
'press',
|
||||||
'conditions',
|
'conditions',
|
||||||
|
'tos'
|
||||||
];
|
];
|
||||||
|
|
||||||
export function isUrlBlocked(url: string): boolean {
|
export function isUrlBlocked(url: string): boolean {
|
||||||
// Check if the URL contains any allowed keywords
|
const lowerCaseUrl = url.toLowerCase();
|
||||||
if (allowedKeywords.some(keyword => url.includes(keyword))) {
|
|
||||||
|
// Check if the URL contains any allowed keywords as whole words
|
||||||
|
if (allowedKeywords.some(keyword => new RegExp(`\\b${keyword}\\b`, 'i').test(lowerCaseUrl))) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
const urlObj = new URL(url);
|
||||||
|
const hostname = urlObj.hostname.toLowerCase();
|
||||||
|
|
||||||
// Check if the URL matches any domain in the blocklist
|
// Check if the URL matches any domain in the blocklist
|
||||||
return socialMediaBlocklist.some(domain => {
|
const isBlocked = socialMediaBlocklist.some(domain => {
|
||||||
// Create a regular expression to match the exact domain
|
const domainPattern = new RegExp(`(^|\\.)${domain.replace('.', '\\.')}(\\.|$)`, 'i');
|
||||||
const domainPattern = new RegExp(`(^|\\.)${domain.replace('.', '\\.')}$`);
|
return domainPattern.test(hostname);
|
||||||
// Test the hostname of the URL against the pattern
|
|
||||||
return domainPattern.test(new URL(url).hostname);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
return isBlocked;
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
// If an error occurs (e.g., invalid URL), return false
|
// If an error occurs (e.g., invalid URL), return false
|
||||||
|
console.error(`Error processing URL: ${url}`, e);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
87
apps/api/src/services/rate-limiter.test.ts
Normal file
87
apps/api/src/services/rate-limiter.test.ts
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
import { getRateLimiter, serverRateLimiter, testSuiteRateLimiter, redisClient } from "./rate-limiter";
|
||||||
|
import { RateLimiterMode } from "../../src/types";
|
||||||
|
import { RateLimiterRedis } from "rate-limiter-flexible";
|
||||||
|
|
||||||
|
describe("Rate Limiter Service", () => {
|
||||||
|
beforeAll(async () => {
|
||||||
|
await redisClient.connect();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterAll(async () => {
|
||||||
|
await redisClient.disconnect();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should return the testSuiteRateLimiter for specific tokens", () => {
|
||||||
|
const limiter = getRateLimiter("crawl" as RateLimiterMode, "a01ccae");
|
||||||
|
expect(limiter).toBe(testSuiteRateLimiter);
|
||||||
|
|
||||||
|
const limiter2 = getRateLimiter("scrape" as RateLimiterMode, "6254cf9");
|
||||||
|
expect(limiter2).toBe(testSuiteRateLimiter);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should return the serverRateLimiter if mode is not found", () => {
|
||||||
|
const limiter = getRateLimiter("nonexistent" as RateLimiterMode, "someToken");
|
||||||
|
expect(limiter).toBe(serverRateLimiter);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should return the correct rate limiter based on mode and plan", () => {
|
||||||
|
const limiter = getRateLimiter("crawl" as RateLimiterMode, "someToken", "free");
|
||||||
|
expect(limiter.points).toBe(2);
|
||||||
|
|
||||||
|
const limiter2 = getRateLimiter("scrape" as RateLimiterMode, "someToken", "standard");
|
||||||
|
expect(limiter2.points).toBe(50);
|
||||||
|
|
||||||
|
const limiter3 = getRateLimiter("search" as RateLimiterMode, "someToken", "growth");
|
||||||
|
expect(limiter3.points).toBe(500);
|
||||||
|
|
||||||
|
const limiter4 = getRateLimiter("crawlStatus" as RateLimiterMode, "someToken", "growth");
|
||||||
|
expect(limiter4.points).toBe(150);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should return the default rate limiter if plan is not provided", () => {
|
||||||
|
const limiter = getRateLimiter("crawl" as RateLimiterMode, "someToken");
|
||||||
|
expect(limiter.points).toBe(3);
|
||||||
|
|
||||||
|
const limiter2 = getRateLimiter("scrape" as RateLimiterMode, "someToken");
|
||||||
|
expect(limiter2.points).toBe(20);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should create a new RateLimiterRedis instance with correct parameters", () => {
|
||||||
|
const keyPrefix = "test-prefix";
|
||||||
|
const points = 10;
|
||||||
|
const limiter = new RateLimiterRedis({
|
||||||
|
storeClient: redisClient,
|
||||||
|
keyPrefix,
|
||||||
|
points,
|
||||||
|
duration: 60,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(limiter.keyPrefix).toBe(keyPrefix);
|
||||||
|
expect(limiter.points).toBe(points);
|
||||||
|
expect(limiter.duration).toBe(60);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should return the correct rate limiter for 'preview' mode", () => {
|
||||||
|
const limiter = getRateLimiter("preview" as RateLimiterMode, "someToken", "free");
|
||||||
|
expect(limiter.points).toBe(5);
|
||||||
|
|
||||||
|
const limiter2 = getRateLimiter("preview" as RateLimiterMode, "someToken");
|
||||||
|
expect(limiter2.points).toBe(5);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should return the correct rate limiter for 'account' mode", () => {
|
||||||
|
const limiter = getRateLimiter("account" as RateLimiterMode, "someToken", "free");
|
||||||
|
expect(limiter.points).toBe(100);
|
||||||
|
|
||||||
|
const limiter2 = getRateLimiter("account" as RateLimiterMode, "someToken");
|
||||||
|
expect(limiter2.points).toBe(100);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should return the correct rate limiter for 'crawlStatus' mode", () => {
|
||||||
|
const limiter = getRateLimiter("crawlStatus" as RateLimiterMode, "someToken", "free");
|
||||||
|
expect(limiter.points).toBe(150);
|
||||||
|
|
||||||
|
const limiter2 = getRateLimiter("crawlStatus" as RateLimiterMode, "someToken");
|
||||||
|
expect(limiter2.points).toBe(150);
|
||||||
|
});
|
||||||
|
});
|
@ -1,4 +1,4 @@
|
|||||||
import { ExtractorOptions } from "./lib/entities";
|
import { ExtractorOptions, Document } from "./lib/entities";
|
||||||
|
|
||||||
export interface CrawlResult {
|
export interface CrawlResult {
|
||||||
source: string;
|
source: string;
|
||||||
@ -43,6 +43,34 @@ export interface FirecrawlJob {
|
|||||||
num_tokens?: number,
|
num_tokens?: number,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface FirecrawlScrapeResponse {
|
||||||
|
statusCode: number;
|
||||||
|
body: {
|
||||||
|
status: string;
|
||||||
|
data: Document;
|
||||||
|
};
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface FirecrawlCrawlResponse {
|
||||||
|
statusCode: number;
|
||||||
|
body: {
|
||||||
|
status: string;
|
||||||
|
jobId: string;
|
||||||
|
|
||||||
|
};
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface FirecrawlCrawlStatusResponse {
|
||||||
|
statusCode: number;
|
||||||
|
body: {
|
||||||
|
status: string;
|
||||||
|
data: Document[];
|
||||||
|
};
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
|
||||||
export enum RateLimiterMode {
|
export enum RateLimiterMode {
|
||||||
Crawl = "crawl",
|
Crawl = "crawl",
|
||||||
CrawlStatus = "crawlStatus",
|
CrawlStatus = "crawlStatus",
|
||||||
|
@ -1,11 +1,14 @@
|
|||||||
import FirecrawlApp, { JobStatusResponse } from '@mendable/firecrawl-js';
|
import FirecrawlApp, { JobStatusResponse } from './firecrawl/src/index' //'@mendable/firecrawl-js';
|
||||||
import { z } from "zod";
|
import { z } from "zod";
|
||||||
|
|
||||||
const app = new FirecrawlApp({apiKey: "fc-YOUR_API_KEY"});
|
const app = new FirecrawlApp({apiKey: "fc-YOUR_API_KEY"});
|
||||||
|
|
||||||
// Scrape a website:
|
// Scrape a website:
|
||||||
const scrapeResult = await app.scrapeUrl('firecrawl.dev');
|
const scrapeResult = await app.scrapeUrl('firecrawl.dev');
|
||||||
console.log(scrapeResult.data.content)
|
|
||||||
|
if (scrapeResult.data) {
|
||||||
|
console.log(scrapeResult.data.content)
|
||||||
|
}
|
||||||
|
|
||||||
// Crawl a website:
|
// Crawl a website:
|
||||||
const crawlResult = await app.crawlUrl('mendable.ai', {crawlerOptions: {excludes: ['blog/*'], limit: 5}}, false);
|
const crawlResult = await app.crawlUrl('mendable.ai', {crawlerOptions: {excludes: ['blog/*'], limit: 5}}, false);
|
||||||
@ -23,12 +26,13 @@ while (true) {
|
|||||||
await new Promise(resolve => setTimeout(resolve, 1000)); // wait 1 second
|
await new Promise(resolve => setTimeout(resolve, 1000)); // wait 1 second
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log(job.data[0].content);
|
if (job.data) {
|
||||||
|
console.log(job.data[0].content);
|
||||||
|
}
|
||||||
|
|
||||||
// Search for a query:
|
// Search for a query:
|
||||||
const query = 'what is mendable?'
|
const query = 'what is mendable?'
|
||||||
const searchResult = await app.search(query)
|
const searchResult = await app.search(query)
|
||||||
console.log(searchResult)
|
|
||||||
|
|
||||||
// LLM Extraction:
|
// LLM Extraction:
|
||||||
// Define schema to extract contents into using zod schema
|
// Define schema to extract contents into using zod schema
|
||||||
@ -50,7 +54,9 @@ let llmExtractionResult = await app.scrapeUrl("https://news.ycombinator.com", {
|
|||||||
extractorOptions: { extractionSchema: zodSchema },
|
extractorOptions: { extractionSchema: zodSchema },
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log(llmExtractionResult.data.llm_extraction);
|
if (llmExtractionResult.data) {
|
||||||
|
console.log(llmExtractionResult.data.llm_extraction);
|
||||||
|
}
|
||||||
|
|
||||||
// Define schema to extract contents into using json schema
|
// Define schema to extract contents into using json schema
|
||||||
const jsonSchema = {
|
const jsonSchema = {
|
||||||
@ -80,4 +86,7 @@ llmExtractionResult = await app.scrapeUrl("https://news.ycombinator.com", {
|
|||||||
extractorOptions: { extractionSchema: jsonSchema },
|
extractorOptions: { extractionSchema: jsonSchema },
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log(llmExtractionResult.data.llm_extraction);
|
if (llmExtractionResult.data) {
|
||||||
|
console.log(llmExtractionResult.data.llm_extraction);
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -176,6 +176,11 @@ async function checkStatusExample(jobId) {
|
|||||||
checkStatusExample('your_job_id_here');
|
checkStatusExample('your_job_id_here');
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Running Locally
|
||||||
|
To use the SDK when running Firecrawl locally, you can change the initial Firecrawl app instance to:
|
||||||
|
```js
|
||||||
|
const app = new FirecrawlApp({ apiKey: "YOUR_API_KEY", apiUrl: "http://localhost:3002" });
|
||||||
|
```
|
||||||
|
|
||||||
## Error Handling
|
## Error Handling
|
||||||
|
|
||||||
|
@ -18,9 +18,9 @@ export default class FirecrawlApp {
|
|||||||
* Initializes a new instance of the FirecrawlApp class.
|
* Initializes a new instance of the FirecrawlApp class.
|
||||||
* @param {FirecrawlAppConfig} config - Configuration options for the FirecrawlApp instance.
|
* @param {FirecrawlAppConfig} config - Configuration options for the FirecrawlApp instance.
|
||||||
*/
|
*/
|
||||||
constructor({ apiKey = null }) {
|
constructor({ apiKey = null, apiUrl = null }) {
|
||||||
this.apiUrl = "https://api.firecrawl.dev";
|
|
||||||
this.apiKey = apiKey || "";
|
this.apiKey = apiKey || "";
|
||||||
|
this.apiUrl = apiUrl || "https://api.firecrawl.dev";
|
||||||
if (!this.apiKey) {
|
if (!this.apiKey) {
|
||||||
throw new Error("No API key provided");
|
throw new Error("No API key provided");
|
||||||
}
|
}
|
||||||
|
11
apps/js-sdk/firecrawl/package-lock.json
generated
11
apps/js-sdk/firecrawl/package-lock.json
generated
@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"name": "@mendable/firecrawl-js",
|
"name": "@mendable/firecrawl-js",
|
||||||
"version": "0.0.22",
|
"version": "0.0.28",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "@mendable/firecrawl-js",
|
"name": "@mendable/firecrawl-js",
|
||||||
"version": "0.0.22",
|
"version": "0.0.28",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"axios": "^1.6.8",
|
"axios": "^1.6.8",
|
||||||
@ -20,6 +20,7 @@
|
|||||||
"@types/axios": "^0.14.0",
|
"@types/axios": "^0.14.0",
|
||||||
"@types/dotenv": "^8.2.0",
|
"@types/dotenv": "^8.2.0",
|
||||||
"@types/jest": "^29.5.12",
|
"@types/jest": "^29.5.12",
|
||||||
|
"@types/mocha": "^10.0.6",
|
||||||
"@types/node": "^20.12.12",
|
"@types/node": "^20.12.12",
|
||||||
"@types/uuid": "^9.0.8",
|
"@types/uuid": "^9.0.8",
|
||||||
"jest": "^29.7.0",
|
"jest": "^29.7.0",
|
||||||
@ -1071,6 +1072,12 @@
|
|||||||
"pretty-format": "^29.0.0"
|
"pretty-format": "^29.0.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@types/mocha": {
|
||||||
|
"version": "10.0.6",
|
||||||
|
"resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-10.0.6.tgz",
|
||||||
|
"integrity": "sha512-dJvrYWxP/UcXm36Qn36fxhUKu8A/xMRXVT2cliFF1Z7UA9liG5Psj3ezNSZw+5puH2czDXRLcXQxf8JbJt0ejg==",
|
||||||
|
"dev": true
|
||||||
|
},
|
||||||
"node_modules/@types/node": {
|
"node_modules/@types/node": {
|
||||||
"version": "20.12.12",
|
"version": "20.12.12",
|
||||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.12.12.tgz",
|
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.12.12.tgz",
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@mendable/firecrawl-js",
|
"name": "@mendable/firecrawl-js",
|
||||||
"version": "0.0.26",
|
"version": "0.0.28",
|
||||||
"description": "JavaScript SDK for Firecrawl API",
|
"description": "JavaScript SDK for Firecrawl API",
|
||||||
"main": "build/index.js",
|
"main": "build/index.js",
|
||||||
"types": "types/index.d.ts",
|
"types": "types/index.d.ts",
|
||||||
@ -33,6 +33,7 @@
|
|||||||
"@types/axios": "^0.14.0",
|
"@types/axios": "^0.14.0",
|
||||||
"@types/dotenv": "^8.2.0",
|
"@types/dotenv": "^8.2.0",
|
||||||
"@types/jest": "^29.5.12",
|
"@types/jest": "^29.5.12",
|
||||||
|
"@types/mocha": "^10.0.6",
|
||||||
"@types/node": "^20.12.12",
|
"@types/node": "^20.12.12",
|
||||||
"@types/uuid": "^9.0.8",
|
"@types/uuid": "^9.0.8",
|
||||||
"jest": "^29.7.0",
|
"jest": "^29.7.0",
|
||||||
|
@ -2,6 +2,7 @@ import FirecrawlApp from '../../index';
|
|||||||
import { v4 as uuidv4 } from 'uuid';
|
import { v4 as uuidv4 } from 'uuid';
|
||||||
import dotenv from 'dotenv';
|
import dotenv from 'dotenv';
|
||||||
|
|
||||||
|
|
||||||
dotenv.config();
|
dotenv.config();
|
||||||
|
|
||||||
const TEST_API_KEY = process.env.TEST_API_KEY;
|
const TEST_API_KEY = process.env.TEST_API_KEY;
|
||||||
@ -29,14 +30,14 @@ describe('FirecrawlApp E2E Tests', () => {
|
|||||||
const app = new FirecrawlApp({ apiKey: "this_is_just_a_preview_token", apiUrl: API_URL });
|
const app = new FirecrawlApp({ apiKey: "this_is_just_a_preview_token", apiUrl: API_URL });
|
||||||
const response = await app.scrapeUrl('https://roastmywebsite.ai');
|
const response = await app.scrapeUrl('https://roastmywebsite.ai');
|
||||||
expect(response).not.toBeNull();
|
expect(response).not.toBeNull();
|
||||||
expect(response.data.content).toContain("_Roast_");
|
expect(response.data?.content).toContain("_Roast_");
|
||||||
}, 30000); // 30 seconds timeout
|
}, 30000); // 30 seconds timeout
|
||||||
|
|
||||||
test.concurrent('should return successful response for valid scrape', async () => {
|
test.concurrent('should return successful response for valid scrape', async () => {
|
||||||
const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL });
|
const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL });
|
||||||
const response = await app.scrapeUrl('https://roastmywebsite.ai');
|
const response = await app.scrapeUrl('https://roastmywebsite.ai');
|
||||||
expect(response).not.toBeNull();
|
expect(response).not.toBeNull();
|
||||||
expect(response.data.content).toContain("_Roast_");
|
expect(response.data?.content).toContain("_Roast_");
|
||||||
expect(response.data).toHaveProperty('markdown');
|
expect(response.data).toHaveProperty('markdown');
|
||||||
expect(response.data).toHaveProperty('metadata');
|
expect(response.data).toHaveProperty('metadata');
|
||||||
expect(response.data).not.toHaveProperty('html');
|
expect(response.data).not.toHaveProperty('html');
|
||||||
@ -46,23 +47,23 @@ describe('FirecrawlApp E2E Tests', () => {
|
|||||||
const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL });
|
const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL });
|
||||||
const response = await app.scrapeUrl('https://roastmywebsite.ai', { pageOptions: { includeHtml: true } });
|
const response = await app.scrapeUrl('https://roastmywebsite.ai', { pageOptions: { includeHtml: true } });
|
||||||
expect(response).not.toBeNull();
|
expect(response).not.toBeNull();
|
||||||
expect(response.data.content).toContain("_Roast_");
|
expect(response.data?.content).toContain("_Roast_");
|
||||||
expect(response.data.markdown).toContain("_Roast_");
|
expect(response.data?.markdown).toContain("_Roast_");
|
||||||
expect(response.data.html).toContain("<h1");
|
expect(response.data?.html).toContain("<h1");
|
||||||
}, 30000); // 30 seconds timeout
|
}, 30000); // 30 seconds timeout
|
||||||
|
|
||||||
test.concurrent('should return successful response for valid scrape with PDF file', async () => {
|
test.concurrent('should return successful response for valid scrape with PDF file', async () => {
|
||||||
const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL });
|
const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL });
|
||||||
const response = await app.scrapeUrl('https://arxiv.org/pdf/astro-ph/9301001.pdf');
|
const response = await app.scrapeUrl('https://arxiv.org/pdf/astro-ph/9301001.pdf');
|
||||||
expect(response).not.toBeNull();
|
expect(response).not.toBeNull();
|
||||||
expect(response.data.content).toContain('We present spectrophotometric observations of the Broad Line Radio Galaxy');
|
expect(response.data?.content).toContain('We present spectrophotometric observations of the Broad Line Radio Galaxy');
|
||||||
}, 30000); // 30 seconds timeout
|
}, 30000); // 30 seconds timeout
|
||||||
|
|
||||||
test.concurrent('should return successful response for valid scrape with PDF file without explicit extension', async () => {
|
test.concurrent('should return successful response for valid scrape with PDF file without explicit extension', async () => {
|
||||||
const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL });
|
const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL });
|
||||||
const response = await app.scrapeUrl('https://arxiv.org/pdf/astro-ph/9301001');
|
const response = await app.scrapeUrl('https://arxiv.org/pdf/astro-ph/9301001');
|
||||||
expect(response).not.toBeNull();
|
expect(response).not.toBeNull();
|
||||||
expect(response.data.content).toContain('We present spectrophotometric observations of the Broad Line Radio Galaxy');
|
expect(response.data?.content).toContain('We present spectrophotometric observations of the Broad Line Radio Galaxy');
|
||||||
}, 30000); // 30 seconds timeout
|
}, 30000); // 30 seconds timeout
|
||||||
|
|
||||||
test.concurrent('should throw error for invalid API key on crawl', async () => {
|
test.concurrent('should throw error for invalid API key on crawl', async () => {
|
||||||
@ -112,15 +113,15 @@ describe('FirecrawlApp E2E Tests', () => {
|
|||||||
|
|
||||||
expect(statusResponse).not.toBeNull();
|
expect(statusResponse).not.toBeNull();
|
||||||
expect(statusResponse.status).toBe('completed');
|
expect(statusResponse.status).toBe('completed');
|
||||||
expect(statusResponse.data.length).toBeGreaterThan(0);
|
expect(statusResponse?.data?.length).toBeGreaterThan(0);
|
||||||
}, 35000); // 35 seconds timeout
|
}, 35000); // 35 seconds timeout
|
||||||
|
|
||||||
test.concurrent('should return successful response for search', async () => {
|
test.concurrent('should return successful response for search', async () => {
|
||||||
const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL });
|
const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL });
|
||||||
const response = await app.search("test query");
|
const response = await app.search("test query");
|
||||||
expect(response).not.toBeNull();
|
expect(response).not.toBeNull();
|
||||||
expect(response.data[0].content).toBeDefined();
|
expect(response?.data?.[0]?.content).toBeDefined();
|
||||||
expect(response.data.length).toBeGreaterThan(2);
|
expect(response?.data?.length).toBeGreaterThan(2);
|
||||||
}, 30000); // 30 seconds timeout
|
}, 30000); // 30 seconds timeout
|
||||||
|
|
||||||
test.concurrent('should throw error for invalid API key on search', async () => {
|
test.concurrent('should throw error for invalid API key on search', async () => {
|
||||||
@ -146,10 +147,10 @@ describe('FirecrawlApp E2E Tests', () => {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
expect(response).not.toBeNull();
|
expect(response).not.toBeNull();
|
||||||
expect(response.data.llm_extraction).toBeDefined();
|
expect(response.data?.llm_extraction).toBeDefined();
|
||||||
const llmExtraction = response.data.llm_extraction;
|
const llmExtraction = response.data?.llm_extraction;
|
||||||
expect(llmExtraction.company_mission).toBeDefined();
|
expect(llmExtraction?.company_mission).toBeDefined();
|
||||||
expect(typeof llmExtraction.supports_sso).toBe('boolean');
|
expect(typeof llmExtraction?.supports_sso).toBe('boolean');
|
||||||
expect(typeof llmExtraction.is_open_source).toBe('boolean');
|
expect(typeof llmExtraction?.is_open_source).toBe('boolean');
|
||||||
}, 30000); // 30 seconds timeout
|
}, 30000); // 30 seconds timeout
|
||||||
});
|
});
|
||||||
|
@ -43,6 +43,6 @@ describe('the firecrawl JS SDK', () => {
|
|||||||
expect.objectContaining({ headers: expect.objectContaining({'Authorization': `Bearer ${apiKey}`}) }),
|
expect.objectContaining({ headers: expect.objectContaining({'Authorization': `Bearer ${apiKey}`}) }),
|
||||||
)
|
)
|
||||||
expect(scrapedData.success).toBe(true);
|
expect(scrapedData.success).toBe(true);
|
||||||
expect(scrapedData.data.metadata.title).toEqual('Mendable');
|
expect(scrapedData?.data?.metadata.title).toEqual('Mendable');
|
||||||
});
|
});
|
||||||
})
|
})
|
@ -9,6 +9,102 @@ export interface FirecrawlAppConfig {
|
|||||||
apiUrl?: string | null;
|
apiUrl?: string | null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Metadata for a Firecrawl document.
|
||||||
|
*/
|
||||||
|
export interface FirecrawlDocumentMetadata {
|
||||||
|
title?: string;
|
||||||
|
description?: string;
|
||||||
|
language?: string;
|
||||||
|
keywords?: string;
|
||||||
|
robots?: string;
|
||||||
|
ogTitle?: string;
|
||||||
|
ogDescription?: string;
|
||||||
|
ogUrl?: string;
|
||||||
|
ogImage?: string;
|
||||||
|
ogAudio?: string;
|
||||||
|
ogDeterminer?: string;
|
||||||
|
ogLocale?: string;
|
||||||
|
ogLocaleAlternate?: string[];
|
||||||
|
ogSiteName?: string;
|
||||||
|
ogVideo?: string;
|
||||||
|
dctermsCreated?: string;
|
||||||
|
dcDateCreated?: string;
|
||||||
|
dcDate?: string;
|
||||||
|
dctermsType?: string;
|
||||||
|
dcType?: string;
|
||||||
|
dctermsAudience?: string;
|
||||||
|
dctermsSubject?: string;
|
||||||
|
dcSubject?: string;
|
||||||
|
dcDescription?: string;
|
||||||
|
dctermsKeywords?: string;
|
||||||
|
modifiedTime?: string;
|
||||||
|
publishedTime?: string;
|
||||||
|
articleTag?: string;
|
||||||
|
articleSection?: string;
|
||||||
|
sourceURL?: string;
|
||||||
|
pageStatusCode?: number;
|
||||||
|
pageError?: string;
|
||||||
|
[key: string]: any;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Document interface for Firecrawl.
|
||||||
|
*/
|
||||||
|
export interface FirecrawlDocument {
|
||||||
|
id?: string;
|
||||||
|
url?: string;
|
||||||
|
content: string;
|
||||||
|
markdown?: string;
|
||||||
|
html?: string;
|
||||||
|
llm_extraction?: Record<string, any>;
|
||||||
|
createdAt?: Date;
|
||||||
|
updatedAt?: Date;
|
||||||
|
type?: string;
|
||||||
|
metadata: FirecrawlDocumentMetadata;
|
||||||
|
childrenLinks?: string[];
|
||||||
|
provider?: string;
|
||||||
|
warning?: string;
|
||||||
|
|
||||||
|
index?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Response interface for scraping operations.
|
||||||
|
*/
|
||||||
|
export interface ScrapeResponse {
|
||||||
|
success: boolean;
|
||||||
|
data?: FirecrawlDocument;
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Response interface for searching operations.
|
||||||
|
*/
|
||||||
|
export interface SearchResponse {
|
||||||
|
success: boolean;
|
||||||
|
data?: FirecrawlDocument[];
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Response interface for crawling operations.
|
||||||
|
*/
|
||||||
|
export interface CrawlResponse {
|
||||||
|
success: boolean;
|
||||||
|
jobId?: string;
|
||||||
|
data?: FirecrawlDocument[];
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Response interface for job status checks.
|
||||||
|
*/
|
||||||
|
export interface JobStatusResponse {
|
||||||
|
success: boolean;
|
||||||
|
status: string;
|
||||||
|
jobId?: string;
|
||||||
|
data?: FirecrawlDocument[];
|
||||||
|
partial_data?: FirecrawlDocument[];
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* Generic parameter interface.
|
* Generic parameter interface.
|
||||||
*/
|
*/
|
||||||
@ -20,59 +116,20 @@ export interface Params {
|
|||||||
extractionPrompt?: string;
|
extractionPrompt?: string;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Response interface for scraping operations.
|
|
||||||
*/
|
|
||||||
export interface ScrapeResponse {
|
|
||||||
success: boolean;
|
|
||||||
data?: any;
|
|
||||||
error?: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Response interface for searching operations.
|
|
||||||
*/
|
|
||||||
export interface SearchResponse {
|
|
||||||
success: boolean;
|
|
||||||
data?: any;
|
|
||||||
error?: string;
|
|
||||||
}
|
|
||||||
/**
|
|
||||||
* Response interface for crawling operations.
|
|
||||||
*/
|
|
||||||
export interface CrawlResponse {
|
|
||||||
success: boolean;
|
|
||||||
jobId?: string;
|
|
||||||
data?: any;
|
|
||||||
error?: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Response interface for job status checks.
|
|
||||||
*/
|
|
||||||
export interface JobStatusResponse {
|
|
||||||
success: boolean;
|
|
||||||
status: string;
|
|
||||||
jobId?: string;
|
|
||||||
data?: any;
|
|
||||||
partial_data?: any,
|
|
||||||
error?: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Main class for interacting with the Firecrawl API.
|
* Main class for interacting with the Firecrawl API.
|
||||||
*/
|
*/
|
||||||
export default class FirecrawlApp {
|
export default class FirecrawlApp {
|
||||||
private apiKey: string;
|
private apiKey: string;
|
||||||
private apiUrl: string = "https://api.firecrawl.dev";
|
private apiUrl: string;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initializes a new instance of the FirecrawlApp class.
|
* Initializes a new instance of the FirecrawlApp class.
|
||||||
* @param {FirecrawlAppConfig} config - Configuration options for the FirecrawlApp instance.
|
* @param {FirecrawlAppConfig} config - Configuration options for the FirecrawlApp instance.
|
||||||
*/
|
*/
|
||||||
constructor({ apiKey = null }: FirecrawlAppConfig) {
|
constructor({ apiKey = null, apiUrl = null }: FirecrawlAppConfig) {
|
||||||
this.apiKey = apiKey || "";
|
this.apiKey = apiKey || "";
|
||||||
|
this.apiUrl = apiUrl || "https://api.firecrawl.dev";
|
||||||
if (!this.apiKey) {
|
if (!this.apiKey) {
|
||||||
throw new Error("No API key provided");
|
throw new Error("No API key provided");
|
||||||
}
|
}
|
||||||
@ -112,7 +169,7 @@ export default class FirecrawlApp {
|
|||||||
const response: AxiosResponse = await axios.post(
|
const response: AxiosResponse = await axios.post(
|
||||||
this.apiUrl + "/v0/scrape",
|
this.apiUrl + "/v0/scrape",
|
||||||
jsonData,
|
jsonData,
|
||||||
{ headers },
|
{ headers }
|
||||||
);
|
);
|
||||||
if (response.status === 200) {
|
if (response.status === 200) {
|
||||||
const responseData = response.data;
|
const responseData = response.data;
|
||||||
@ -231,7 +288,9 @@ export default class FirecrawlApp {
|
|||||||
success: true,
|
success: true,
|
||||||
status: response.data.status,
|
status: response.data.status,
|
||||||
data: response.data.data,
|
data: response.data.data,
|
||||||
partial_data: !response.data.data ? response.data.partial_data : undefined,
|
partial_data: !response.data.data
|
||||||
|
? response.data.partial_data
|
||||||
|
: undefined,
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
this.handleError(response, "check crawl status");
|
this.handleError(response, "check crawl status");
|
||||||
@ -252,10 +311,10 @@ export default class FirecrawlApp {
|
|||||||
*/
|
*/
|
||||||
prepareHeaders(idempotencyKey?: string): AxiosRequestHeaders {
|
prepareHeaders(idempotencyKey?: string): AxiosRequestHeaders {
|
||||||
return {
|
return {
|
||||||
'Content-Type': 'application/json',
|
"Content-Type": "application/json",
|
||||||
'Authorization': `Bearer ${this.apiKey}`,
|
Authorization: `Bearer ${this.apiKey}`,
|
||||||
...(idempotencyKey ? { 'x-idempotency-key': idempotencyKey } : {}),
|
...(idempotencyKey ? { "x-idempotency-key": idempotencyKey } : {}),
|
||||||
} as AxiosRequestHeaders & { 'x-idempotency-key'?: string };
|
} as AxiosRequestHeaders & { "x-idempotency-key"?: string };
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -317,7 +376,9 @@ export default class FirecrawlApp {
|
|||||||
if (checkInterval < 2) {
|
if (checkInterval < 2) {
|
||||||
checkInterval = 2;
|
checkInterval = 2;
|
||||||
}
|
}
|
||||||
await new Promise((resolve) => setTimeout(resolve, checkInterval * 1000)); // Wait for the specified timeout before checking again
|
await new Promise((resolve) =>
|
||||||
|
setTimeout(resolve, checkInterval * 1000)
|
||||||
|
); // Wait for the specified timeout before checking again
|
||||||
} else {
|
} else {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
`Crawl job failed or was stopped. Status: ${statusData.status}`
|
`Crawl job failed or was stopped. Status: ${statusData.status}`
|
||||||
|
133
apps/js-sdk/firecrawl/types/index.d.ts
vendored
133
apps/js-sdk/firecrawl/types/index.d.ts
vendored
@ -8,8 +8,101 @@ export interface FirecrawlAppConfig {
|
|||||||
apiUrl?: string | null;
|
apiUrl?: string | null;
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* Generic parameter interface.
|
* Metadata for a Firecrawl document.
|
||||||
*/
|
*/
|
||||||
|
export interface FirecrawlDocumentMetadata {
|
||||||
|
title?: string;
|
||||||
|
description?: string;
|
||||||
|
language?: string;
|
||||||
|
keywords?: string;
|
||||||
|
robots?: string;
|
||||||
|
ogTitle?: string;
|
||||||
|
ogDescription?: string;
|
||||||
|
ogUrl?: string;
|
||||||
|
ogImage?: string;
|
||||||
|
ogAudio?: string;
|
||||||
|
ogDeterminer?: string;
|
||||||
|
ogLocale?: string;
|
||||||
|
ogLocaleAlternate?: string[];
|
||||||
|
ogSiteName?: string;
|
||||||
|
ogVideo?: string;
|
||||||
|
dctermsCreated?: string;
|
||||||
|
dcDateCreated?: string;
|
||||||
|
dcDate?: string;
|
||||||
|
dctermsType?: string;
|
||||||
|
dcType?: string;
|
||||||
|
dctermsAudience?: string;
|
||||||
|
dctermsSubject?: string;
|
||||||
|
dcSubject?: string;
|
||||||
|
dcDescription?: string;
|
||||||
|
dctermsKeywords?: string;
|
||||||
|
modifiedTime?: string;
|
||||||
|
publishedTime?: string;
|
||||||
|
articleTag?: string;
|
||||||
|
articleSection?: string;
|
||||||
|
sourceURL?: string;
|
||||||
|
pageStatusCode?: number;
|
||||||
|
pageError?: string;
|
||||||
|
[key: string]: any;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Document interface for Firecrawl.
|
||||||
|
*/
|
||||||
|
export interface FirecrawlDocument {
|
||||||
|
id?: string;
|
||||||
|
url?: string;
|
||||||
|
content: string;
|
||||||
|
markdown?: string;
|
||||||
|
html?: string;
|
||||||
|
llm_extraction?: Record<string, any>;
|
||||||
|
createdAt?: Date;
|
||||||
|
updatedAt?: Date;
|
||||||
|
type?: string;
|
||||||
|
metadata: FirecrawlDocumentMetadata;
|
||||||
|
childrenLinks?: string[];
|
||||||
|
provider?: string;
|
||||||
|
warning?: string;
|
||||||
|
index?: number;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Response interface for scraping operations.
|
||||||
|
*/
|
||||||
|
export interface ScrapeResponse {
|
||||||
|
success: boolean;
|
||||||
|
data?: FirecrawlDocument;
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Response interface for searching operations.
|
||||||
|
*/
|
||||||
|
export interface SearchResponse {
|
||||||
|
success: boolean;
|
||||||
|
data?: FirecrawlDocument[];
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Response interface for crawling operations.
|
||||||
|
*/
|
||||||
|
export interface CrawlResponse {
|
||||||
|
success: boolean;
|
||||||
|
jobId?: string;
|
||||||
|
data?: FirecrawlDocument[];
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Response interface for job status checks.
|
||||||
|
*/
|
||||||
|
export interface JobStatusResponse {
|
||||||
|
success: boolean;
|
||||||
|
status: string;
|
||||||
|
jobId?: string;
|
||||||
|
data?: FirecrawlDocument[];
|
||||||
|
partial_data?: FirecrawlDocument[];
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Generic parameter interface.
|
||||||
|
*/
|
||||||
export interface Params {
|
export interface Params {
|
||||||
[key: string]: any;
|
[key: string]: any;
|
||||||
extractorOptions?: {
|
extractorOptions?: {
|
||||||
@ -18,42 +111,6 @@ export interface Params {
|
|||||||
extractionPrompt?: string;
|
extractionPrompt?: string;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
/**
|
|
||||||
* Response interface for scraping operations.
|
|
||||||
*/
|
|
||||||
export interface ScrapeResponse {
|
|
||||||
success: boolean;
|
|
||||||
data?: any;
|
|
||||||
error?: string;
|
|
||||||
}
|
|
||||||
/**
|
|
||||||
* Response interface for searching operations.
|
|
||||||
*/
|
|
||||||
export interface SearchResponse {
|
|
||||||
success: boolean;
|
|
||||||
data?: any;
|
|
||||||
error?: string;
|
|
||||||
}
|
|
||||||
/**
|
|
||||||
* Response interface for crawling operations.
|
|
||||||
*/
|
|
||||||
export interface CrawlResponse {
|
|
||||||
success: boolean;
|
|
||||||
jobId?: string;
|
|
||||||
data?: any;
|
|
||||||
error?: string;
|
|
||||||
}
|
|
||||||
/**
|
|
||||||
* Response interface for job status checks.
|
|
||||||
*/
|
|
||||||
export interface JobStatusResponse {
|
|
||||||
success: boolean;
|
|
||||||
status: string;
|
|
||||||
jobId?: string;
|
|
||||||
data?: any;
|
|
||||||
partial_data?: any;
|
|
||||||
error?: string;
|
|
||||||
}
|
|
||||||
/**
|
/**
|
||||||
* Main class for interacting with the Firecrawl API.
|
* Main class for interacting with the Firecrawl API.
|
||||||
*/
|
*/
|
||||||
@ -64,7 +121,7 @@ export default class FirecrawlApp {
|
|||||||
* Initializes a new instance of the FirecrawlApp class.
|
* Initializes a new instance of the FirecrawlApp class.
|
||||||
* @param {FirecrawlAppConfig} config - Configuration options for the FirecrawlApp instance.
|
* @param {FirecrawlAppConfig} config - Configuration options for the FirecrawlApp instance.
|
||||||
*/
|
*/
|
||||||
constructor({ apiKey }: FirecrawlAppConfig);
|
constructor({ apiKey, apiUrl }: FirecrawlAppConfig);
|
||||||
/**
|
/**
|
||||||
* Scrapes a URL using the Firecrawl API.
|
* Scrapes a URL using the Firecrawl API.
|
||||||
* @param {string} url - The URL to scrape.
|
* @param {string} url - The URL to scrape.
|
||||||
|
40
apps/js-sdk/package-lock.json
generated
40
apps/js-sdk/package-lock.json
generated
@ -11,10 +11,8 @@
|
|||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@mendable/firecrawl-js": "^0.0.19",
|
"@mendable/firecrawl-js": "^0.0.19",
|
||||||
"axios": "^1.6.8",
|
"axios": "^1.6.8",
|
||||||
"dotenv": "^16.4.5",
|
|
||||||
"ts-node": "^10.9.2",
|
"ts-node": "^10.9.2",
|
||||||
"typescript": "^5.4.5",
|
"typescript": "^5.4.5",
|
||||||
"uuid": "^9.0.1",
|
|
||||||
"zod": "^3.23.8"
|
"zod": "^3.23.8"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
@ -452,15 +450,6 @@
|
|||||||
"resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz",
|
"resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz",
|
||||||
"integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA=="
|
"integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA=="
|
||||||
},
|
},
|
||||||
"node_modules/@types/node": {
|
|
||||||
"version": "20.12.11",
|
|
||||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.12.11.tgz",
|
|
||||||
"integrity": "sha512-vDg9PZ/zi+Nqp6boSOT7plNuthRugEKixDv5sFTIpkE89MmNtEArAShI4mxuX2+UrLEe9pxC1vm2cjm9YlWbJw==",
|
|
||||||
"peer": true,
|
|
||||||
"dependencies": {
|
|
||||||
"undici-types": "~5.26.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/acorn": {
|
"node_modules/acorn": {
|
||||||
"version": "8.11.3",
|
"version": "8.11.3",
|
||||||
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz",
|
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz",
|
||||||
@ -532,17 +521,6 @@
|
|||||||
"node": ">=0.3.1"
|
"node": ">=0.3.1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/dotenv": {
|
|
||||||
"version": "16.4.5",
|
|
||||||
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.5.tgz",
|
|
||||||
"integrity": "sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==",
|
|
||||||
"engines": {
|
|
||||||
"node": ">=12"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"url": "https://dotenvx.com"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/esbuild": {
|
"node_modules/esbuild": {
|
||||||
"version": "0.20.2",
|
"version": "0.20.2",
|
||||||
"resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.20.2.tgz",
|
"resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.20.2.tgz",
|
||||||
@ -750,24 +728,6 @@
|
|||||||
"node": ">=14.17"
|
"node": ">=14.17"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/undici-types": {
|
|
||||||
"version": "5.26.5",
|
|
||||||
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
|
|
||||||
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
|
|
||||||
"peer": true
|
|
||||||
},
|
|
||||||
"node_modules/uuid": {
|
|
||||||
"version": "9.0.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz",
|
|
||||||
"integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==",
|
|
||||||
"funding": [
|
|
||||||
"https://github.com/sponsors/broofa",
|
|
||||||
"https://github.com/sponsors/ctavan"
|
|
||||||
],
|
|
||||||
"bin": {
|
|
||||||
"uuid": "dist/bin/uuid"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/v8-compile-cache-lib": {
|
"node_modules/v8-compile-cache-lib": {
|
||||||
"version": "3.0.1",
|
"version": "3.0.1",
|
||||||
"resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz",
|
||||||
|
Loading…
Reference in New Issue
Block a user