5 Commits

Author SHA1 Message Date
333b9a366c fix: Resolve playwright function deprecations and io/ioutil function deprecations. 2024-09-24 15:13:36 -05:00
Arik Jones (aider)
1869dae89a docs: update configuration section in README.md 2024-09-22 18:36:17 -05:00
Arik Jones (aider)
d3ff7cb862 docs: Update README.md CLI flag documentation 2024-09-22 18:33:24 -05:00
Arik Jones (aider)
ea410e4abb feat: Update README.md to reflect recent changes in functionality 2024-09-22 18:31:06 -05:00
Arik Jones (aider)
7d8e25b1ad docs: Add CHANGELOG.md with v0.0.3 release notes 2024-09-22 18:20:25 -05:00
7 changed files with 343 additions and 287 deletions

View File

@@ -4,16 +4,18 @@ Rollup aggregates the contents of text-based files and webpages into a markdown
## Features ## Features
- File type filtering - File type filtering for targeted content aggregation
- Ignore patterns for excluding files - Ignore patterns for excluding specific files or directories
- Support for code-generated file detection - Support for code-generated file detection and exclusion
- Advanced web scraping functionality - Advanced web scraping functionality with depth control
- Verbose logging option for detailed output - Verbose logging option for detailed operation insights
- Exclusionary CSS selectors for web scraping - Exclusionary CSS selectors for precise web content extraction
- Support for multiple URLs in web scraping - Support for multiple URLs in web scraping operations
- Configurable output format for web scraping (single file or separate files) - Configurable output format for web scraping (single file or separate files)
- Configuration file support (YAML) - Flexible configuration file support (YAML)
- Generation of default configuration file - Automatic generation of default configuration file
- Custom output file naming
- Concurrent processing for improved performance
## Installation ## Installation
@@ -74,14 +76,27 @@ ignore:
code_generated: code_generated:
- **/generated/** - **/generated/**
scrape: scrape:
urls: sites:
- url: https://example.com - base_url: https://example.com
css_locator: .content css_locator: .content
exclude_selectors: exclude_selectors:
- .ads - .ads
- .navigation - .navigation
max_depth: 2
allowed_paths:
- /blog
- /docs
exclude_paths:
- /admin
output_alias: example output_alias: example
path_overrides:
- path: /special-page
css_locator: .special-content
exclude_selectors:
- .special-ads
output_type: single output_type: single
requests_per_second: 1.0
burst_limit: 3
``` ```
## Examples ## Examples
@@ -92,10 +107,10 @@ scrape:
rollup files rollup files
``` ```
2. Web scraping with multiple URLs: 2. Web scraping with multiple URLs and increased concurrency:
```bash ```bash
rollup web --urls=https://example.com,https://another-example.com rollup web --urls=https://example.com,https://another-example.com --concurrent=8
``` ```
3. Generate a default configuration file: 3. Generate a default configuration file:
@@ -104,15 +119,25 @@ scrape:
rollup generate rollup generate
``` ```
4. Use a custom configuration file: 4. Use a custom configuration file and specify output:
```bash ```bash
rollup files --config=my-config.yml rollup files --config=my-config.yml --output=project_summary.md
``` ```
5. Web scraping with separate output files: 5. Web scraping with separate output files and custom timeout:
```bash ```bash
rollup web --urls=https://example.com,https://another-example.com --output=separate rollup web --urls=https://example.com,https://another-example.com --output=separate --timeout=60
```
6. Rollup files with specific types and ignore patterns:
```bash
rollup files --types=.go,.md --ignore=vendor/**,*_test.go
```
7. Web scraping with depth and CSS selector:
```bash
rollup web --urls=https://example.com --depth=2 --css=.main-content
``` ```
## Contributing ## Contributing

View File

@@ -2,7 +2,7 @@ package cmd
import ( import (
"fmt" "fmt"
"io/ioutil" "io"
"log" "log"
"net/url" "net/url"
"os" "os"
@@ -44,7 +44,7 @@ func runWeb(cmd *cobra.Command, args []string) error {
scraper.SetupLogger(verbose) scraper.SetupLogger(verbose)
logger := log.New(os.Stdout, "WEB: ", log.LstdFlags) logger := log.New(os.Stdout, "WEB: ", log.LstdFlags)
if !verbose { if !verbose {
logger.SetOutput(ioutil.Discard) logger.SetOutput(io.Discard)
} }
logger.Printf("Starting web scraping process with verbose mode: %v", verbose) logger.Printf("Starting web scraping process with verbose mode: %v", verbose)
scraperConfig.Verbose = verbose scraperConfig.Verbose = verbose
@@ -139,7 +139,7 @@ func writeSingleFile(content map[string]string) error {
defer file.Close() defer file.Close()
for url, c := range content { for url, c := range content {
_, err = fmt.Fprintf(file, "# Content from %s\n\n%s\n\n---\n\n", url, c) _, err = fmt.Fprintf(file, "# ::: Content from %s\n\n%s\n\n---\n\n", url, c)
if err != nil { if err != nil {
return fmt.Errorf("error writing content to file: %v", err) return fmt.Errorf("error writing content to file: %v", err)
} }
@@ -161,7 +161,7 @@ func writeMultipleFiles(content map[string]string) error {
return fmt.Errorf("error creating output file %s: %v", filename, err) return fmt.Errorf("error creating output file %s: %v", filename, err)
} }
_, err = file.WriteString(fmt.Sprintf("# Content from %s\n\n%s\n", url, c)) _, err = file.WriteString(fmt.Sprintf("# ::: Content from %s\n\n%s\n", url, c))
if err != nil { if err != nil {
file.Close() file.Close()
return fmt.Errorf("error writing content to file %s: %v", filename, err) return fmt.Errorf("error writing content to file %s: %v", filename, err)
@@ -215,8 +215,10 @@ func scrapeURL(urlStr string, depth int, visited map[string]bool) (string, error
return content, nil return content, nil
} }
var testExtractAndConvertContent = extractAndConvertContent var (
var testExtractLinks = scraper.ExtractLinks testExtractAndConvertContent = extractAndConvertContent
testExtractLinks = scraper.ExtractLinks
)
func extractAndConvertContent(urlStr string) (string, error) { func extractAndConvertContent(urlStr string) (string, error) {
content, err := scraper.FetchWebpageContent(urlStr) content, err := scraper.FetchWebpageContent(urlStr)
@@ -240,7 +242,7 @@ func extractAndConvertContent(urlStr string) (string, error) {
if err != nil { if err != nil {
return "", fmt.Errorf("error parsing URL: %v", err) return "", fmt.Errorf("error parsing URL: %v", err)
} }
header := fmt.Sprintf("# Content from %s\n\n", parsedURL.String()) header := fmt.Sprintf("# ::: Content from %s\n\n", parsedURL.String())
return header + markdown + "\n\n", nil return header + markdown + "\n\n", nil
} }

View File

@@ -103,7 +103,7 @@ func mockExtractAndConvertContent(urlStr string) (string, error) {
return "Mocked content for " + urlStr, nil return "Mocked content for " + urlStr, nil
} }
func mockExtractLinks(urlStr string) ([]string, error) { func mockExtractLinks() ([]string, error) {
return []string{"http://example.com/link1", "http://example.com/link2"}, nil return []string{"http://example.com/link1", "http://example.com/link2"}, nil
} }

21
docs/CHANGELOG.md Normal file
View File

@@ -0,0 +1,21 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.0.3] - 2024-09-22
### Added
- Implemented web scraping functionality using Playwright
- Added support for CSS selectors to extract specific content
- Introduced rate limiting for web requests
- Created configuration options for scraping settings
### Changed
- Improved error handling and logging throughout the application
- Enhanced URL parsing and validation
### Fixed
- Resolved issues with concurrent scraping operations

View File

@@ -45,10 +45,10 @@ scrape:
} }
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
if _, err := tmpfile.Write(content); err != nil { if _, err = tmpfile.Write(content); err != nil {
t.Fatalf("Failed to write to temp file: %v", err) t.Fatalf("Failed to write to temp file: %v", err)
} }
if err := tmpfile.Close(); err != nil { if err = tmpfile.Close(); err != nil {
t.Fatalf("Failed to close temp file: %v", err) t.Fatalf("Failed to close temp file: %v", err)
} }

View File

@@ -1,21 +1,21 @@
package scraper package scraper
import ( import (
"context"
"fmt" "fmt"
"io/ioutil" "io"
"log" "log"
"math/rand" "math/rand"
"net/url" "net/url"
"os" "os"
"regexp" "regexp"
"strings" "strings"
"time"
"sync" "sync"
"context" "time"
md "github.com/JohannesKaufmann/html-to-markdown"
"github.com/PuerkitoBio/goquery" "github.com/PuerkitoBio/goquery"
"github.com/playwright-community/playwright-go" "github.com/playwright-community/playwright-go"
md "github.com/JohannesKaufmann/html-to-markdown"
"golang.org/x/time/rate" "golang.org/x/time/rate"
) )
@@ -81,7 +81,7 @@ func ScrapeSites(config Config) (map[string]string, error) {
fullURL := site.BaseURL + path fullURL := site.BaseURL + path
totalURLs++ totalURLs++
logger.Printf("Queueing URL for scraping: %s\n", fullURL) logger.Printf("Queueing URL for scraping: %s\n", fullURL)
scrapeSingleURL(fullURL, site, config, results, limiter) scrapeSingleURL(fullURL, site, results, limiter)
} }
}(site) }(site)
} }
@@ -108,11 +108,12 @@ func ScrapeSites(config Config) (map[string]string, error) {
return scrapedContent, nil return scrapedContent, nil
} }
func scrapeSingleURL(url string, site SiteConfig, config Config, results chan<- struct { func scrapeSingleURL(url string, site SiteConfig, results chan<- struct {
url string url string
content string content string
err error err error
}, limiter *rate.Limiter) { }, limiter *rate.Limiter,
) {
logger.Printf("Starting to scrape URL: %s\n", url) logger.Printf("Starting to scrape URL: %s\n", url)
// Wait for rate limiter before making the request // Wait for rate limiter before making the request
@@ -155,11 +156,12 @@ func scrapeSingleURL(url string, site SiteConfig, config Config, results chan<-
}{url, content, nil} }{url, content, nil}
} }
func scrapeSite(site SiteConfig, config Config, results chan<- struct { func scrapeSite(site SiteConfig, results chan<- struct {
url string url string
content string content string
err error err error
}, limiter *rate.Limiter) { }, limiter *rate.Limiter,
) {
visited := make(map[string]bool) visited := make(map[string]bool)
queue := []string{site.BaseURL} queue := []string{site.BaseURL}
@@ -296,7 +298,7 @@ func SetupLogger(verbose bool) {
if verbose { if verbose {
logger = log.New(os.Stdout, "SCRAPER: ", log.LstdFlags) logger = log.New(os.Stdout, "SCRAPER: ", log.LstdFlags)
} else { } else {
logger = log.New(ioutil.Discard, "", 0) logger = log.New(io.Discard, "", 0)
} }
} }
@@ -387,7 +389,9 @@ func FetchWebpageContent(urlStr string) (string, error) {
} }
logger.Println("Waiting for body element") logger.Println("Waiting for body element")
_, err = page.WaitForSelector("body", playwright.PageWaitForSelectorOptions{
bodyElement := page.Locator("body")
err = bodyElement.WaitFor(playwright.LocatorWaitForOptions{
State: playwright.WaitForSelectorStateVisible, State: playwright.WaitForSelectorStateVisible,
}) })
if err != nil { if err != nil {
@@ -404,7 +408,7 @@ func FetchWebpageContent(urlStr string) (string, error) {
if content == "" { if content == "" {
logger.Println(" content is empty, falling back to body content") logger.Println(" content is empty, falling back to body content")
content, err = page.InnerHTML("body") content, err = bodyElement.InnerHTML()
if err != nil { if err != nil {
logger.Printf("Error getting body content: %v\n", err) logger.Printf("Error getting body content: %v\n", err)
return "", fmt.Errorf("could not get body content: %v", err) return "", fmt.Errorf("could not get body content: %v", err)
@@ -457,6 +461,8 @@ func scrollPage(page playwright.Page) error {
() => { () => {
window.scrollTo(0, document.body.scrollHeight); window.scrollTo(0, document.body.scrollHeight);
return document.body.scrollHeight; return document.body.scrollHeight;
// wait for 500 ms
new Promise(resolve => setTimeout(resolve, 500));
} }
` `
@@ -488,7 +494,9 @@ func scrollPage(page playwright.Page) error {
previousHeight = currentHeight previousHeight = currentHeight
page.WaitForTimeout(500) // Wait for a while before scrolling again
} }
logger.Println("Scrolling back to top") logger.Println("Scrolling back to top")

View File

@@ -1,13 +1,13 @@
package scraper package scraper
import ( import (
"testing" "io"
"log"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"strings"
"reflect" "reflect"
"log" "strings"
"io/ioutil" "testing"
) )
func TestIsAllowedURL(t *testing.T) { func TestIsAllowedURL(t *testing.T) {
@@ -73,7 +73,7 @@ func TestGetOverrides(t *testing.T) {
func TestExtractContentWithCSS(t *testing.T) { func TestExtractContentWithCSS(t *testing.T) {
// Initialize logger for testing // Initialize logger for testing
logger = log.New(ioutil.Discard, "", 0) logger = log.New(io.Discard, "", 0)
html := ` html := `
<html> <html>