# Webamon.com Robots.txt # Updated: January 2025 # Allow all web crawlers to access the site User-agent: * Allow: / # Specific rules for major search engines User-agent: Googlebot Allow: / Crawl-delay: 1 User-agent: Bingbot Allow: / Crawl-delay: 1 User-agent: Slurp Allow: / Crawl-delay: 2 # AI/LLM Crawlers - Allow access for better AI search visibility User-agent: GPTBot Allow: / User-agent: ChatGPT-User Allow: / User-agent: CCBot Allow: / User-agent: anthropic-ai Allow: / User-agent: Claude-Web Allow: / # Disallow access to potentially sensitive or unnecessary directories Disallow: /admin/ Disallow: /private/ Disallow: /temp/ Disallow: /tmp/ Disallow: /cache/ Disallow: /logs/ Disallow: /backup/ Disallow: /config/ Disallow: /.git/ Disallow: /node_modules/ Disallow: /vendor/ # Disallow access to file types that shouldn't be indexed Disallow: /*.log$ Disallow: /*.sql$ Disallow: /*.gz$ Disallow: /*.tar$ Disallow: /*.zip$ Disallow: /*.bak$ # Allow access to important assets Allow: /css/ Allow: /js/ Allow: /img/ Allow: /fonts/ # Block access to potentially sensitive files Disallow: /.htaccess Disallow: /.htpasswd Disallow: /wp-config.php Disallow: /config.php Disallow: /.env # LLM.txt files for AI/LLM content discovery # These files provide structured information for Large Language Models Allow: /llm.txt Allow: /llm-full.txt # Sitemap location (update with your actual sitemap URL when created) Sitemap: https://webamon.com/sitemap.xml # Additional sitemaps for different content types # Sitemap: https://webamon.com/sitemap-pages.xml # Sitemap: https://webamon.com/sitemap-blog.xml # Sitemap: https://webamon.com/sitemap-api-docs.xml # Host declaration (helps with canonicalization) Host: https://webamon.com