enabled: boolean

  • Default: true

Conditionally toggle the module.

allow: string[]

  • Default: []

Allow paths to be indexed for the * user-agent (all robots).

disallow: string[]

  • Default: []

Disallow paths from being indexed for the * user-agent (all robots).

metaTag: boolean

  • Default: true

Whether to add a <meta name="robots" ...> tag to the <head> of each page.

groups: RobotsGroupInput[]

  • Default: []

Define more granular rules for the robots.txt. Each group is a set of rules for specific user agent(s).

export default defineNuxtConfig({
  robots: {
    groups: [
      {
        userAgent: ['AdsBot-Google-Mobile', 'AdsBot-Google-Mobile-Apps'],
        disallow: ['/admin'],
        allow: ['/admin/login'],
        comments: 'Allow Google AdsBot to index the login page but no-admin pages'
      },
    ]
  }
})

sitemap: MaybeArray<string>

  • Default: []

The sitemap URL(s) for the site. If you have multiple sitemaps, you can provide an array of URLs.

You must either define the runtime config siteUrl or provide the sitemap as absolute URLs.

export default defineNuxtConfig({
  robots: {
    sitemap: [
      '/sitemap-one.xml',
      '/sitemap-two.xml',
    ],
  },
})

robotsEnabledValue: string

  • Default: 'index, follow, max-image-preview:large, max-snippet:-1, max-video-preview:-1'

The value to use when the page is indexable.

robotsDisabledValue: string

  • Type: string
  • Default: 'noindex, nofollow'

The value to use when the page is not indexable.

disallowNonIndexableRoutes: boolean

  • Default: 'false'

Should route rules which disallow indexing be added to the /robots.txt file.

mergeWithRobotsTxtPath: boolean | string

  • Default: true

Specify a robots.txt path to merge the config from, relative to the root directory.

When set to true, the default path of <publicDir>/robots.txt will be used.

When set to false, no merging will occur.

blockNonSeoBots: boolean

  • Default: false

Blocks some non-SEO bots from crawling your site. This is not a replacement for a full-blown bot management solution, but it can help to reduce the load on your server.

See const.ts for the list of bots that are blocked.

export default defineNuxtConfig({
  robots: {
    blockNonSeoBots: true
  }
})

robotsTxt: boolean

  • Default: true

Whether to generate a robots.txt file. Useful for disabling when using a base URL.

cacheControl: string | false

  • Default: 'max-age=14400, must-revalidate'

Configure the Cache-Control header for the robots.txt file. By default it's cached for 4 hours and must be revalidated.

Providing false will set the header to 'no-store'.

nuxt.config.ts
export default defineNuxtConfig({
  robots: {
    cacheControl: 'max-age=14400, must-revalidate'
  }
})

disableNuxtContentIntegration: boolean

  • Default: undefined

Whether to disable the Nuxt Content Integration.

debug: boolean

  • Type: boolean
  • Default: false

Enables debug logs and a debug endpoint.

credits: boolean

  • Default: true

Control the module credit comment in the generated robots.txt file.

robots.txt
# START nuxt-robots (indexable) <- credits
# ...
# END nuxt-robots <- credits
nuxt.config.ts
export default defineNuxtConfig({
  robots: {
    credits: false
  }
})
Did this page help you?