diff --git a/docs.json b/docs.json
index c534233..85df8c6 100644
--- a/docs.json
+++ b/docs.json
@@ -54,16 +54,7 @@
"services/crawl",
"services/monitor",
"services/schema",
- "services/history",
- {
- "group": "Additional Parameters",
- "pages": [
- "services/additional-parameters/headers",
- "services/additional-parameters/pagination",
- "services/additional-parameters/proxy",
- "services/additional-parameters/wait-ms"
- ]
- }
+ "services/history"
]
},
{
@@ -92,8 +83,7 @@
"services/mcp-server/claude",
"services/mcp-server/smithery"
]
- },
- "services/toonify"
+ }
]
},
{
@@ -116,7 +106,14 @@
{
"group": "No-code",
"pages": [
- "integrations/n8n",
+ {
+ "group": "n8n",
+ "icon": "/logo/n8n.svg",
+ "pages": [
+ "integrations/n8n",
+ "integrations/n8n-transition-from-v1-to-v2"
+ ]
+ },
"integrations/make"
]
},
@@ -308,16 +305,6 @@
"v1/mcp-server/claude",
"v1/mcp-server/smithery"
]
- },
- "v1/toonify",
- {
- "group": "Additional Parameters",
- "pages": [
- "v1/additional-parameters/headers",
- "v1/additional-parameters/pagination",
- "v1/additional-parameters/proxy",
- "v1/additional-parameters/wait-ms"
- ]
}
]
}
diff --git a/install.md b/install.md
index d057bb3..0f7d3f8 100644
--- a/install.md
+++ b/install.md
@@ -1,15 +1,11 @@
---
-title: Installation
-description: 'Install and get started with ScrapeGraphAI v2 SDKs'
+title: "Installation"
+description: "Install and get started with ScrapeGraphAI v2 SDKs"
---
## Prerequisites
-- Obtain your **API key** by signing up on the [ScrapeGraphAI Dashboard](https://scrapegraphai.com/dashboard)
-
----
-
-## Python SDK
+- Obtain your **API key** by signing up on the [ScrapeGraphAI Dashboard](https://scrapegraphai.com/dashboard) Python SDK
Requires **Python ≥ 3.12**.
@@ -33,7 +29,7 @@ print(res.data.json_data if res.status == "success" else res.error)
```
-You can also set the `SGAI_API_KEY` environment variable and initialize the client without parameters: `sgai = ScrapeGraphAI()`.
+ You can also set the `SGAI_API_KEY` environment variable and initialize the client without parameters: `sgai = ScrapeGraphAI()`.
For more advanced usage, see the [Python SDK documentation](/sdks/python).
@@ -42,7 +38,7 @@ For more advanced usage, see the [Python SDK documentation](/sdks/python).
## JavaScript SDK
-Requires **Node.js >= 22**.
+Requires **Node.js \>= 22**.
Install using npm, pnpm, yarn, or bun:
@@ -76,7 +72,7 @@ console.log(data);
```
-Store your API keys securely in environment variables. Use `.env` files and libraries like `dotenv` to load them into your app.
+ Store your API keys securely in environment variables. Use `.env` files and libraries like `dotenv` to load them into your app.
For more advanced usage, see the [JavaScript SDK documentation](/sdks/javascript).
@@ -86,22 +82,29 @@ For more advanced usage, see the [JavaScript SDK documentation](/sdks/javascript
## Key Concepts
### Scrape (formerly Markdownify)
+
Convert any webpage into markdown, HTML, screenshot, or branding format. [Learn more](/services/scrape)
### Extract (formerly SmartScraper)
+
Extract specific information from any webpage using AI. Provide a URL and a prompt describing what you want to extract. [Learn more](/services/extract)
### Search (formerly SearchScraper)
+
Search and extract information from multiple web sources using AI. Start with just a query - Search will find relevant websites and extract the information you need. [Learn more](/services/search)
### Crawl (formerly SmartCrawler)
+
Multi-page website crawling with flexible output formats. Traverse multiple pages, follow links, and return content in your preferred format. [Learn more](/services/crawl)
### Monitor
+
Scheduled web monitoring with AI-powered extraction. Set up recurring scraping jobs that automatically extract data on a cron schedule. [Learn more](/services/monitor)
### Structured Output with Schemas
+
Both SDKs support structured output using schemas:
+
- **Python**: Use Pydantic models
- **JavaScript**: Use Zod schemas
@@ -165,4 +168,4 @@ console.log(data);
- Explore our [use cases](/use-cases/overview) to see how ScrapeGraphAI can help your projects
- Check out the [Cookbook](/cookbook/introduction) for real-world examples
- Read the [API Reference](/api-reference/introduction) for detailed endpoint documentation
-- Join our [Discord community](https://discord.gg/uJN7TYcpNa) for support and updates
+- Join our [Discord community](https://discord.gg/uJN7TYcpNa) for support and updates
\ No newline at end of file
diff --git a/integrations/images/make-crawl-output.png b/integrations/images/make-crawl-output.png
new file mode 100644
index 0000000..919b9be
Binary files /dev/null and b/integrations/images/make-crawl-output.png differ
diff --git a/integrations/images/make-crawl.png b/integrations/images/make-crawl.png
new file mode 100644
index 0000000..fc80a9e
Binary files /dev/null and b/integrations/images/make-crawl.png differ
diff --git a/integrations/images/make-get-crawl-status.png b/integrations/images/make-get-crawl-status.png
deleted file mode 100644
index 0fda62e..0000000
Binary files a/integrations/images/make-get-crawl-status.png and /dev/null differ
diff --git a/integrations/images/make-get-history.png b/integrations/images/make-get-history.png
new file mode 100644
index 0000000..ed99f30
Binary files /dev/null and b/integrations/images/make-get-history.png differ
diff --git a/integrations/images/make-start-crawl.png b/integrations/images/make-start-crawl.png
deleted file mode 100644
index e62875a..0000000
Binary files a/integrations/images/make-start-crawl.png and /dev/null differ
diff --git a/integrations/images/n8n/n8n-fetch-config.png b/integrations/images/n8n/n8n-fetch-config.png
new file mode 100644
index 0000000..495fd8f
Binary files /dev/null and b/integrations/images/n8n/n8n-fetch-config.png differ
diff --git a/integrations/images/n8n/n8n-output-modes.png b/integrations/images/n8n/n8n-output-modes.png
new file mode 100644
index 0000000..19540f0
Binary files /dev/null and b/integrations/images/n8n/n8n-output-modes.png differ
diff --git a/integrations/images/n8n/n8n-resources.png b/integrations/images/n8n/n8n-resources.png
new file mode 100644
index 0000000..9bc692b
Binary files /dev/null and b/integrations/images/n8n/n8n-resources.png differ
diff --git a/integrations/images/n8n/v1-resources.png b/integrations/images/n8n/v1-resources.png
new file mode 100644
index 0000000..5ced239
Binary files /dev/null and b/integrations/images/n8n/v1-resources.png differ
diff --git a/integrations/make.mdx b/integrations/make.mdx
index 6b1b2d6..498e3fe 100644
--- a/integrations/make.mdx
+++ b/integrations/make.mdx
@@ -1,6 +1,7 @@
---
-title: 'Make (Integromat)'
+title: 'Make'
description: 'Use ScrapeGraphAI inside Make.com scenarios to scrape, extract, search, crawl, and monitor web pages'
+icon: '/logo/make.png'
---
## Overview
@@ -94,12 +95,12 @@ This scenario runs daily, extracts all products from an Amazon search page, and
## Modules
-### Scrape
+### Scrape a URL
Fetch a URL and return its content in one or more formats: Markdown, HTML, links, images, a plain-text summary, or branding elements.
-
+
| Field | Description |
@@ -110,12 +111,12 @@ Fetch a URL and return its content in one or more formats: Markdown, HTML, links
---
-### Extract
+### Extract data from URL
Send a URL to ScrapeGraph and get back structured JSON — driven by a natural-language prompt and an optional JSON schema.
-
+
| Field | Description |
@@ -127,12 +128,12 @@ Send a URL to ScrapeGraph and get back structured JSON — driven by a natural-l
---
-### Search
+### Search web
Run a web search and get page content returned inline — optionally with AI extraction applied to each result.
-
+
| Field | Description |
@@ -146,12 +147,12 @@ Run a web search and get page content returned inline — optionally with AI ext
---
-### Start Crawl
+### Crawl a website
-Start an async multi-page crawl from an entry URL. Returns a **Crawl Job ID** to pass into **Get Crawl Status**.
+Start a multi-page crawl from an entry URL. The module polls internally and returns the **completed** crawl in a single bundle — a `pages` array with one entry per crawled page, each carrying a `scrapeRefId` you can pass to **Get a past result** to fetch its full content.
-
+
| Field | Description |
@@ -163,34 +164,26 @@ Start an async multi-page crawl from an entry URL. Returns a **Crawl Job ID** to
| Max Links Per Page | Maximum links to follow per page |
| Include / Exclude Patterns | URL glob patterns, e.g. `/blog/*` |
----
-
-### Get Crawl Status
-
-Poll the status and results of a crawl job started by **Start Crawl**.
+**Output:**
-
+
-| Field | Description |
-|-------|-------------|
-| Crawl Job ID | The `id` output from Start Crawl — map with `{{1.id}}` |
-
-Returns `status` (`running` / `completed` / `failed`) and a `pages` array when completed.
+The bundle includes the `Crawl Job ID`, a `Status` of `completed`, and a `pages[]` array. Each page has `url`, `depth`, `title`, `contentType`, `status`, and `scrapeRefId`.
-Add a **Tools → Sleep** module (60 seconds) between Start Crawl and Get Crawl Status to give the crawl time to finish before polling. For large crawls, use two separate scenarios with a Make Data Store to persist the job ID.
+Crawls can take a while on large sites. The module waits for completion before emitting its bundle — for very large crawls (hundreds of pages), increase your scenario's execution timeout in **Scenario settings**.
---
-### Create Monitor
+### Create monitor
Schedule ScrapeGraph to fetch a URL on a recurring cron schedule and detect changes between runs.
-
+
| Field | Description |
@@ -211,28 +204,48 @@ Schedule ScrapeGraph to fetch a URL on a recurring cron schedule and detect chan
| Weekly on Monday | `0 9 * * 1` |
-Run Create Monitor once manually to set up the monitor, then use Get Monitor Activity in a separate scheduled scenario to fetch what changed.
+Run Create monitor once manually to set up the monitor, then use Get monitor activity in a separate scheduled scenario to fetch what changed.
---
-### Get Monitor Activity
+### Get monitor activity
Fetch the latest activity ticks from an existing monitor.
-
+
| Field | Description |
|-------|-------------|
-| Monitor ID | The `id` returned by Create Monitor |
+| Monitor ID | The `id` returned by Create monitor |
| Limit | Number of ticks to return (1–100, default 20) |
Returns a `ticks` array where each entry has `changed` (boolean), `diffs`, `status`, and `createdAt`.
---
+### Get a past result
+
+Fetch a stored job result by its ID. Most useful for retrieving the full content of a crawled page using the `scrapeRefId` from **Crawl a website**.
+
+
+
+
+
+| Field | Description |
+|-------|-------------|
+| Entry ID | A job ID or `scrapeRefId` from a crawl page |
+
+Returns the full stored entry — `result` (the original response payload), `metadata` (content type and other run details), `params` (the inputs the job was run with), `service`, `status`, and `createdAt`.
+
+
+Combine **Crawl a website → Iterator → Get a past result** to crawl a site and retrieve the full markdown / HTML / extracted JSON for every page in one scenario. Map the iterator's `scrapeRefId` into the Entry ID field — the module runs once per crawled page.
+
+
+---
+
## Deprecated modules
The following modules from the v1 integration are still visible but no longer functional. Use the v2 modules above instead.
diff --git a/integrations/n8n-transition-from-v1-to-v2.mdx b/integrations/n8n-transition-from-v1-to-v2.mdx
new file mode 100644
index 0000000..964ac39
--- /dev/null
+++ b/integrations/n8n-transition-from-v1-to-v2.mdx
@@ -0,0 +1,142 @@
+---
+title: 'n8n: Transition Guide from v1 to v2'
+sidebarTitle: 'Transition Guide from v1 to v2'
+description: 'Move your n8n workflows from n8n-nodes-scrapegraphai 0.x to 1.0.2+'
+---
+
+## Transition from v1 to v2
+
+
+ v1 of the n8n node (`0.x`, last published `0.1.21`) calls the deprecated v1 API. After login, v1 is deprecated within 7 days. Update to `1.0.2` and rebuild any workflows that use the renamed resources or fields below.
+
+
+If you're on `n8n-nodes-scrapegraphai@0.x`, this is your migration checkpoint.
+
+Before anything else, update the community node in n8n at **Settings → Community Nodes → `n8n-nodes-scrapegraphai` → Update to `1.0.2`** (or later). Your existing `SGAI-APIKEY` works as-is — no re-auth needed.
+
+## Method-by-method migration
+
+Use this table to map old resources to the new ones. Details and field changes follow below.
+
+| v1 | v2 | Notes |
+|----|-----|------|
+| `Markdownify` | [**`Scrape`**](/integrations/n8n) with format `Markdown` | One Scrape node with a Markdown format entry replaces Markdownify. |
+| `SmartScraper` | [**`Extract`**](/integrations/n8n) | Same job — structured extraction from a URL. |
+| `SearchScraper` | [**`Search`**](/integrations/n8n) | Renamed; the prompt-style query field is now called `Query`. |
+| `SmartCrawler` (single call) | [**`Crawl.Start`**](/integrations/n8n), then [**`Crawl.GetStatus`**](/integrations/n8n), [**`Crawl.Stop`**](/integrations/n8n), [**`Crawl.Resume`**](/integrations/n8n), [**`Crawl.Delete`**](/integrations/n8n) | Crawl is async — Start returns a job ID, poll Get Status. |
+| `Scrape` | [**`Scrape`**](/integrations/n8n) | Same name, expanded — multi-format per call (markdown, HTML, JSON, screenshot, links, summary, branding). |
+| `AgenticScraper` | **Removed** | Use `Extract` with **Fetch Config** (mode `JS`, stealth, wait) for hard pages. |
+| — | [**`Monitor`**](/integrations/n8n) (new) | Cron-scheduled fetches with diff detection and webhooks. |
+| — | [**`History`**](/integrations/n8n) (new) | Look up past results by `scrapeRefId`. |
+| — | [**`Credit`**](/integrations/n8n) (new) | Check remaining credits and plan. |
+
+The resource picker at a glance — **before** (v1, 6 resources):
+
+
+
+**After** (v2, 7 resources):
+
+
+
+## Step-by-step rebuild
+
+### 1. Markdownify → [`Scrape`](/integrations/n8n)
+
+**Before:** A dedicated `Markdownify` resource that always returned markdown.
+
+**After:** Use the **Scrape** resource with one `Markdown` format entry. Same job, more flexible — you can mix in HTML, Links, Summary, or Branding in the same call.
+
+### 2. SmartScraper → [`Extract`](/integrations/n8n)
+
+**Before (v1):** `Website URL` + `User Prompt`, plus optional flat fields like `Render Heavy JS` and `Number of Scrolls`.
+
+**After (v2):** `URL` + `Prompt`, optional `Schema (JSON)` behind a `Use JSON Schema` toggle. All fetch knobs move into a single **Fetch Config** collection shared across every resource.
+
+| v1 field | v2 field |
+|---|---|
+| `Website URL` (`websiteUrl`) | `URL` |
+| `User Prompt` (`userPrompt`) | `Prompt` |
+| `Output Schema` (`outputSchema`) | `Schema (JSON)` (behind `Use JSON Schema` toggle) |
+| `Render Heavy JS` (`renderHeavyJs`) | `Fetch Config → Mode` set to `JS` |
+| `Number of Scrolls` (`numberOfScrolls`) | `Fetch Config → Scrolls` |
+
+Fetch Config also adds knobs that didn't exist in v1: `Stealth`, `Wait (Ms)`, `Timeout (Ms)`, `Country`, `Headers (JSON)`, `Cookies (JSON)`.
+
+
+
+### 3. SearchScraper → [`Search`](/integrations/n8n)
+
+**Before:** `User Prompt` + a few flat options.
+
+**After:** `Query` (the search string) plus optional `Rollup Prompt` for AI extraction across all fetched results, optional `Schema (JSON)` behind a toggle, and new fields like `Time Range` and `Location (Country Code)`.
+
+| v1 field | v2 field |
+|---|---|
+| `User Prompt` (`userPrompt`) | `Query` |
+| `Output Schema` (`outputSchema`) | `Schema (JSON)` (behind `Use JSON Schema` toggle) |
+| — | `Rollup Prompt` (new — AI extraction across results) |
+| — | `Time Range` / `Location (Country Code)` (new) |
+
+### 4. SmartCrawler → [`Crawl`](/integrations/n8n) jobs
+
+**Before:** A single synchronous `SmartCrawler` operation.
+
+**After:** Crawl is explicitly async. Start the job, then poll. Five operations are exposed: `Start`, `Get Status`, `Stop`, `Resume`, `Delete`.
+
+A typical chain in n8n:
+
+1. **Crawl → Start** — returns a `cronId`
+2. **Wait** node (~60s)
+3. **Crawl → Get Status** — returns the `pages[]` array
+4. (Optional) **Split Out** + **History → Get** — fetch full content per crawled page
+
+See the full walkthrough on the [n8n integration page](/integrations/n8n#example-crawl-a-site-save-every-page-to-airtable).
+
+### 5. Output shape
+
+Downstream nodes (Set, IF, HTTP Request) that reference v1 paths like `$json.result.markdown` will break — v2 returns a different shape.
+
+The new node ships an **Output** parameter on every content-producing operation (Scrape, Extract, Search) with three modes: **Simplified**, **Raw**, **Selected Fields**. Pick **Simplified** when migrating — it's the closest match to v1.
+
+
+
+## What else changed in v2
+
+- **New Fetch Config knobs** that didn't exist in v1: `Stealth`, `Wait (Ms)`, `Timeout (Ms)`, `Country`, `Headers (JSON)`, `Cookies (JSON)`
+- **New resources**: `Monitor` (cron + diff + webhook), `History` (look up past results by `scrapeRefId`), `Credit` (check usage)
+- **Async crawl model** with five lifecycle ops instead of one synchronous call
+- **AI-Agent friendly** — every content-producing op exposes `Simplified` / `Raw` / `Selected Fields` output modes
+- **Cleaner credentials test** — n8n hits `GET /api/credits` to verify keys
+
+## Recommended path
+
+1. Update the community node: **Settings → Community Nodes → `n8n-nodes-scrapegraphai` → Update to `1.0.2`** (or later)
+2. Open each affected workflow — v1 ScrapeGraphAI nodes will surface as deprecated or fail to execute
+3. Drop in fresh **ScrapeGraphAI** nodes and pick the matching v2 resource from the [migration table](#method-by-method-migration)
+4. Re-map fields per the [step-by-step rebuild](#step-by-step-rebuild) above
+5. Set **Output** to `Simplified` (closest to v1)
+6. Test the node, fix downstream expressions, delete the v1 node
+
+## FAQ
+
+- **Will my workflows keep running until I touch them?** Yes — until the next execution opens the v1 node, which then fails against the deprecated v1 API.
+- **Can I run v1 and v2 side-by-side?** No — same package, version-pinned.
+- **Self-hosted vs n8n Cloud?** Self-hosted: bump the version in **Community Nodes**. n8n Cloud doesn't yet allow community nodes.
+- **I used Agentic Scraper — what now?** Use **Extract** with **Fetch Config → Mode = JS** plus **Stealth** and **Wait (Ms)**.
+
+## Related guides
+
+
+
+ Full reference for the v2 node — every resource, operation, and field
+
+
+ Python / JavaScript / REST migration — the underlying API changes
+
+
+ Source code, issue tracker, release notes
+
+
+ The last 0.x release (deprecated)
+
+
diff --git a/integrations/n8n.mdx b/integrations/n8n.mdx
index 6065858..48bac3b 100644
--- a/integrations/n8n.mdx
+++ b/integrations/n8n.mdx
@@ -1,6 +1,8 @@
---
title: 'n8n'
+sidebarTitle: 'Overview'
description: 'Use ScrapeGraphAI inside n8n workflows — scrape, extract, crawl, monitor, and more, with no code'
+icon: '/logo/n8n.svg'
---
## Overview
diff --git a/introduction.mdx b/introduction.mdx
index 82d32c1..da0e4d8 100644
--- a/introduction.mdx
+++ b/introduction.mdx
@@ -1,19 +1,19 @@
---
-title: Introduction
-description: 'Welcome to ScrapeGraphAI - AI-Powered Web Data Extraction'
+title: "Introduction"
+description: "Welcome to ScrapeGraphAI - AI-Powered Web Data Extraction"
---
## Overview
@@ -26,12 +26,15 @@ description: 'Welcome to ScrapeGraphAI - AI-Powered Web Data Extraction'
Feed your AI agents with structured web data for enhanced decision-making
+
Extract and structure web data for research and analysis
-
+
+
Build comprehensive datasets from web sources
+
Create scraping-powered platforms and applications
@@ -57,13 +60,15 @@ description: 'Welcome to ScrapeGraphAI - AI-Powered Web Data Extraction'
Learn how to manage your account, monitor jobs, and access your API keys
+
Explore our core services: SmartScraper, SearchScraper, and Markdownify
+
Implement with Python, JavaScript, or integrate with LangChain and LlamaIndex
-
+
Detailed API documentation for direct integration
@@ -71,22 +76,25 @@ description: 'Welcome to ScrapeGraphAI - AI-Powered Web Data Extraction'
## Core Services
-- **Scrape**: Fetch a page in markdown, HTML, screenshot, JSON, links, images, summary, or branding
-- **Extract**: AI-powered structured data extraction from any URL, HTML, or markdown
-- **Crawl**: Asynchronous multi-page site crawling with start / stop / resume controls
-- **Monitor**: Cron-scheduled jobs that track page changes and fire webhooks
-
-
-
+- **Scrape**
+ : Fetch a page in markdown, HTML, screenshot, JSON, links, images, summary, or branding
+- **Extract**
+ : AI-powered structured data extraction from any URL, HTML, or markdown
+- **Crawl**
+ : Asynchronous multi-page site crawling with start / stop / resume controls
+- **Monitor**
+ : Cron-scheduled jobs that track page changes and fire webhooks
## Implementation Options
### Official SDKs
+
- Production-ready SDKs for Python and JavaScript
- Comprehensive error handling and retry logic
- Type hints and full IDE support
### Integrations
+
- Seamless integration with LangChain
- Native support for LlamaIndex
- Perfect for AI agent workflows
@@ -94,16 +102,16 @@ description: 'Welcome to ScrapeGraphAI - AI-Powered Web Data Extraction'
## Examples & Use Cases
Visit our [Cookbook](/cookbook/introduction) to explore real-world examples and implementation patterns:
+
- E-commerce data extraction
- News article scraping
- Research data collection
- Content aggregation
-
- ScrapeGraphAI is built with transparency in mind. Check out our open-source core at:
- [github.com/scrapegraphai/scrapegraph-ai](https://github.com/scrapegraphai/scrapegraph-ai)
+
+ ScrapeGraphAI is built with transparency in mind. Check out our open-source core at: [github.com/scrapegraphai/scrapegraph-ai](https://github.com/scrapegraphai/scrapegraph-ai)
Get your API key and start extracting data in minutes!
-
+
\ No newline at end of file
diff --git a/logo/make.png b/logo/make.png
new file mode 100644
index 0000000..d99a7b1
Binary files /dev/null and b/logo/make.png differ
diff --git a/logo/n8n.svg b/logo/n8n.svg
new file mode 100644
index 0000000..5416dd3
--- /dev/null
+++ b/logo/n8n.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/script.js b/script.js
index b8f949e..3eb05d4 100644
--- a/script.js
+++ b/script.js
@@ -18,36 +18,30 @@
return el;
}
- function findTocAside() {
- var candidates = document.querySelectorAll('aside, nav');
- for (var i = 0; i < candidates.length; i++) {
- var node = candidates[i];
+ function findTocList() {
+ var asides = document.querySelectorAll('aside, nav');
+ for (var i = 0; i < asides.length; i++) {
+ var node = asides[i];
var label = (node.getAttribute('aria-label') || '').toLowerCase();
if (label.indexOf('table of contents') !== -1 || label.indexOf('on this page') !== -1) {
+ var ul = node.querySelector('ul');
+ if (ul) return ul;
return node;
}
}
var toc = document.querySelector('#table-of-contents, [data-toc], .toc');
- if (toc) return toc.closest('aside') || toc.parentElement;
+ if (toc) return toc.querySelector('ul') || toc;
return null;
}
- function relaxClipping(node) {
- var current = node;
- for (var i = 0; i < 4 && current; i++) {
- current.style.overflow = 'visible';
- current.style.maxHeight = 'none';
- current = current.parentElement;
- }
- }
-
function inject() {
if (document.getElementById(CTA_ID)) return;
- var host = findTocAside();
- if (!host) return;
+ var anchor = findTocList();
+ if (!anchor) return;
+ var parent = anchor.parentElement;
+ if (!parent) return;
var cta = buildCta();
- host.appendChild(cta);
- relaxClipping(host);
+ parent.insertBefore(cta, anchor);
}
function schedule() {
diff --git a/sdks/javascript.mdx b/sdks/javascript.mdx
index f348ca8..90eb761 100644
--- a/sdks/javascript.mdx
+++ b/sdks/javascript.mdx
@@ -10,11 +10,14 @@ icon: "js"
alt="ScrapeGraph API Banner"
/>
-
-
+
+
[](https://badge.fury.io/js/scrapegraph-js)
-
+
+ Issues, PRs, and the changelog
+
+
[](https://opensource.org/licenses/MIT)
diff --git a/sdks/python.mdx b/sdks/python.mdx
index 0cb66c2..1d94fb6 100644
--- a/sdks/python.mdx
+++ b/sdks/python.mdx
@@ -4,13 +4,16 @@ description: 'Official Python SDK for ScrapeGraphAI v2'
icon: 'python'
---
-
-
+
+
[](https://badge.fury.io/py/scrapegraph-py)
-
+
[](https://pypi.org/project/scrapegraph-py/)
+
+ Issues, PRs, and the changelog
+
diff --git a/style.css b/style.css
index 09f23cf..0d4b937 100644
--- a/style.css
+++ b/style.css
@@ -8,7 +8,7 @@
display: block;
box-sizing: border-box;
width: 100%;
- max-width: min(260px, 100%);
+ max-width: 200px;
margin-top: 24px;
padding: 14px;
border-radius: 12px;