From 96b30ae25ecf9b849522568496c305a8f62f8c66 Mon Sep 17 00:00:00 2001 From: Joshua Lochner Date: Tue, 22 Oct 2024 05:26:26 +0000 Subject: [PATCH] Fix guide URLs in README --- README.md | 4 ++-- docs/snippets/1_quick-tour.snippet | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index c2c0cb6a8..6f7ed3f70 100644 --- a/README.md +++ b/README.md @@ -108,7 +108,7 @@ const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncase }); ``` -For more information, check out the [WebGPU guide](./guides/webgpu). +For more information, check out the [WebGPU guide](https://huggingface.co/docs/transformers.js/guides/webgpu). > [!WARNING] > The WebGPU API is still experimental in many browsers, so if you run into any issues, @@ -118,7 +118,7 @@ In resource-constrained environments, such as web browsers, it is advisable to u the model to lower bandwidth and optimize performance. This can be achieved by adjusting the `dtype` option, which allows you to select the appropriate data type for your model. While the available options may vary depending on the specific model, typical choices include `"fp32"` (default for WebGPU), `"fp16"`, `"q8"` -(default for WASM), and `"q4"`. For more information, check out the [quantization guide](./guides/dtypes). +(default for WASM), and `"q4"`. For more information, check out the [quantization guide](https://huggingface.co/docs/transformers.js/guides/dtypes). ```javascript // Run the model at 4-bit quantization const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncased-finetuned-sst-2-english', { diff --git a/docs/snippets/1_quick-tour.snippet b/docs/snippets/1_quick-tour.snippet index 27fdd2214..ddf4d5744 100644 --- a/docs/snippets/1_quick-tour.snippet +++ b/docs/snippets/1_quick-tour.snippet @@ -52,7 +52,7 @@ const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncase }); ``` -For more information, check out the [WebGPU guide](./guides/webgpu). +For more information, check out the [WebGPU guide](/guides/webgpu). > [!WARNING] > The WebGPU API is still experimental in many browsers, so if you run into any issues, @@ -62,7 +62,7 @@ In resource-constrained environments, such as web browsers, it is advisable to u the model to lower bandwidth and optimize performance. This can be achieved by adjusting the `dtype` option, which allows you to select the appropriate data type for your model. While the available options may vary depending on the specific model, typical choices include `"fp32"` (default for WebGPU), `"fp16"`, `"q8"` -(default for WASM), and `"q4"`. For more information, check out the [quantization guide](./guides/dtypes). +(default for WASM), and `"q4"`. For more information, check out the [quantization guide](/guides/dtypes). ```javascript // Run the model at 4-bit quantization const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncased-finetuned-sst-2-english', {