mirror of
https://github.com/tnypxl/rollup.git
synced 2025-12-15 15:03:17 +00:00
flatten scrape config to 'sites:'
* flatten scrape config to 'sites:'. Update unit tests and readme. * remove check for file_extensions configuration. * show progress indication after 5 seconds. * add documentation to functions * fix: remove MaxDepth and link extraction functionality * fix: Remove MaxDepth references from cmd/web.go
This commit is contained in:
@@ -45,7 +45,6 @@ type SiteConfig struct {
|
||||
BaseURL string
|
||||
CSSLocator string
|
||||
ExcludeSelectors []string
|
||||
MaxDepth int
|
||||
AllowedPaths []string
|
||||
ExcludePaths []string
|
||||
OutputAlias string
|
||||
@@ -156,57 +155,6 @@ func scrapeSingleURL(url string, site SiteConfig, results chan<- struct {
|
||||
}{url, content, nil}
|
||||
}
|
||||
|
||||
func scrapeSite(site SiteConfig, results chan<- struct {
|
||||
url string
|
||||
content string
|
||||
err error
|
||||
}, limiter *rate.Limiter,
|
||||
) {
|
||||
visited := make(map[string]bool)
|
||||
queue := []string{site.BaseURL}
|
||||
|
||||
for len(queue) > 0 {
|
||||
url := queue[0]
|
||||
queue = queue[1:]
|
||||
|
||||
if visited[url] {
|
||||
continue
|
||||
}
|
||||
visited[url] = true
|
||||
|
||||
if !isAllowedURL(url, site) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Wait for rate limiter before making the request
|
||||
err := limiter.Wait(context.Background())
|
||||
if err != nil {
|
||||
results <- struct {
|
||||
url string
|
||||
content string
|
||||
err error
|
||||
}{url, "", fmt.Errorf("rate limiter error: %v", err)}
|
||||
continue
|
||||
}
|
||||
|
||||
cssLocator, excludeSelectors := getOverrides(url, site)
|
||||
content, err := scrapeURL(url, cssLocator, excludeSelectors)
|
||||
results <- struct {
|
||||
url string
|
||||
content string
|
||||
err error
|
||||
}{url, content, err}
|
||||
|
||||
if len(visited) < site.MaxDepth {
|
||||
links, _ := ExtractLinks(url)
|
||||
for _, link := range links {
|
||||
if !visited[link] && isAllowedURL(link, site) {
|
||||
queue = append(queue, link)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isAllowedURL(urlStr string, site SiteConfig) bool {
|
||||
parsedURL, err := url.Parse(urlStr)
|
||||
@@ -510,40 +458,6 @@ func scrollPage(page playwright.Page) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtractLinks extracts all links from the given URL
|
||||
func ExtractLinks(urlStr string) ([]string, error) {
|
||||
logger.Printf("Extracting links from URL: %s\n", urlStr)
|
||||
|
||||
page, err := browser.NewPage()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create page: %v", err)
|
||||
}
|
||||
defer page.Close()
|
||||
|
||||
if _, err = page.Goto(urlStr, playwright.PageGotoOptions{
|
||||
WaitUntil: playwright.WaitUntilStateNetworkidle,
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("could not go to page: %v", err)
|
||||
}
|
||||
|
||||
links, err := page.Evaluate(`() => {
|
||||
const anchors = document.querySelectorAll('a');
|
||||
return Array.from(anchors).map(a => a.href);
|
||||
}`)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not extract links: %v", err)
|
||||
}
|
||||
|
||||
var result []string
|
||||
for _, link := range links.([]interface{}) {
|
||||
// Normalize URL by removing trailing slash
|
||||
normalizedLink := strings.TrimRight(link.(string), "/")
|
||||
result = append(result, normalizedLink)
|
||||
}
|
||||
|
||||
logger.Printf("Extracted %d links\n", len(result))
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ExtractContentWithCSS extracts content from HTML using a CSS selector
|
||||
func ExtractContentWithCSS(content, includeSelector string, excludeSelectors []string) (string, error) {
|
||||
|
||||
Reference in New Issue
Block a user