<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Re: HubSpot Links Crawler 2.0 user agent hitting protected pages in APIs &amp; Integrations</title>
    <link>https://community.hubspot.com/t5/APIs-Integrations/HubSpot-Links-Crawler-2-0-user-agent-hitting-protected-pages/m-p/229546#M8442</link>
    <description>&lt;P&gt;&lt;A class="mention" href="https://community.hubspot.com/u/derek_gervais"&gt;@Derek_Gervais&lt;/A&gt; Yes, they have been implemented since day 1 as the page path is accessing an authenticated account. Here is what is looks like:&lt;/P&gt;
&lt;PRE&gt;&lt;CODE class="lang-auto"&gt;# See http://www.robotstxt.org/wc/norobots.html for documentation on how to use the robots.txt file
#
# To ban all spiders from the entire site uncomment the next two lines:
User-agent: *
Disallow: /
User-agent: AdsBot-Google
Disallow: /
&lt;/CODE&gt;&lt;/PRE&gt;</description>
    <pubDate>Tue, 27 Feb 2018 15:46:08 GMT</pubDate>
    <dc:creator>terrance</dc:creator>
    <dc:date>2018-02-27T15:46:08Z</dc:date>
  </channel>
</rss>

