# See http://www.robotstxt.org/wc/norobots.html for documentation on how to use the robots.txt file
#
# A basic robots.txt file which allows specific pages and regex matches of pages but disallows
# every other page from scraping, for all user agents.
User-Agent: *
Allow: /$
Allow: /login
Allow: /register
Allow: /employer_registrations/new
Allow: /career_fairs/*/student_preview
Allow: /career_fairs/*/employer_preview
Allow: /events/*/share_preview
Allow: /jobs/*/share_preview
Allow: /employers
Allow: /job_role_groups
Allow: /questions
Allow: /favicon-32x32.png
Allow: /favicon-16x16.png
Allow: /favicon.png
Allow: /favicon.ico
Allow: /profiles/*
Disallow: /profiles/*/posts
Disallow: /