diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3a4b17e7356..77a078c5fe9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -57,10 +57,10 @@ jobs: uses: korandoru/hawkeye@v1 - name: Cargo format run: cargo fmt --all -- --check - - name: Cargo doc - run: cargo doc --no-deps --all-features - name: Cargo clippy run: cargo clippy --all-targets --all-features --workspace -- -D warnings + - name: Cargo doc + run: cargo doc --lib --no-deps --all-features msrv_check: runs-on: ubuntu-latest @@ -103,7 +103,7 @@ jobs: - uses: actions/checkout@v3 - uses: Swatinem/rust-cache@v2 - name: Build - run: cargo build --workspace + run: cargo build -p opendal -p oli -p object_store_opendal build_all_features: runs-on: ${{ matrix.os }} @@ -129,7 +129,7 @@ jobs: - uses: Swatinem/rust-cache@v2 - name: Build - run: cargo build --all-features --workspace + run: cargo build --all-features unit: runs-on: ${{ matrix.os }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index d30bb9d3754..b71b35eb116 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -36,7 +36,7 @@ jobs: - uses: actions-rs/cargo@v1 with: command: doc - args: --no-deps --all-features -p opendal + args: --lib --no-deps --all-features -p opendal env: LD_LIBRARY_PATH: ${{ env.JAVA_HOME }}/lib/server:${{ env.LD_LIBRARY_PATH }} diff --git a/benches/ops/read.rs b/benches/ops/read.rs index 7e8df051c19..e9cbbe7eac5 100644 --- a/benches/ops/read.rs +++ b/benches/ops/read.rs @@ -55,8 +55,7 @@ fn bench_read_full(c: &mut Criterion, name: &str, op: Operator) { group.bench_with_input(size.to_string(), &(op.clone(), &path), |b, (op, path)| { b.to_async(&*TOKIO).iter(|| async { let r = op - .object(path) - .range_reader(0..=size.bytes() as u64) + .range_reader(path, 0..=size.bytes() as u64) .await .unwrap(); io::copy(r, &mut io::sink()).await.unwrap(); @@ -89,7 +88,7 @@ fn bench_read_part(c: &mut Criterion, name: &str, op: Operator) { group.throughput(criterion::Throughput::Bytes(size.bytes() as u64)); group.bench_with_input(size.to_string(), &(op.clone(), &path), |b, (op, path)| { b.to_async(&*TOKIO).iter(|| async { - let r = op.object(path).range_reader(offset..).await.unwrap(); + let r = op.range_reader(path, offset..).await.unwrap(); io::copy(r, &mut io::sink()).await.unwrap(); }) }); @@ -128,8 +127,7 @@ fn bench_read_parallel(c: &mut Criterion, name: &str, op: Operator) { .map(|_| async { let mut buf = buf.clone(); let mut r = op - .object(path) - .range_reader(offset..=offset + size.bytes() as u64) + .range_reader(path, offset..=offset + size.bytes() as u64) .await .unwrap(); r.read_exact(&mut buf).await.unwrap(); diff --git a/benches/ops/utils.rs b/benches/ops/utils.rs index fc7579006b3..4247f3e9f99 100644 --- a/benches/ops/utils.rs +++ b/benches/ops/utils.rs @@ -66,12 +66,7 @@ impl TempData { } pub fn generate(op: Operator, path: &str, content: Bytes) -> Self { - TOKIO.block_on(async { - op.object(path) - .write(content) - .await - .expect("create test data") - }); + TOKIO.block_on(async { op.write(path, content).await.expect("create test data") }); Self { op, @@ -83,11 +78,7 @@ impl TempData { impl Drop for TempData { fn drop(&mut self) { TOKIO.block_on(async { - self.op - .object(&self.path) - .delete() - .await - .expect("cleanup test data"); + self.op.delete(&self.path).await.expect("cleanup test data"); }) } } diff --git a/benches/ops/write.rs b/benches/ops/write.rs index 08a6cb3d677..a046f93ee15 100644 --- a/benches/ops/write.rs +++ b/benches/ops/write.rs @@ -53,7 +53,7 @@ fn bench_write_once(c: &mut Criterion, name: &str, op: Operator) { &(op.clone(), &path, content.clone()), |b, (op, path, content)| { b.to_async(&*TOKIO).iter(|| async { - op.object(path).write(content.clone()).await.unwrap(); + op.write(path, content.clone()).await.unwrap(); }) }, ); diff --git a/binaries/oli/src/commands/cp.rs b/binaries/oli/src/commands/cp.rs index 022b80bb9e3..0f44f95e160 100644 --- a/binaries/oli/src/commands/cp.rs +++ b/binaries/oli/src/commands/cp.rs @@ -27,15 +27,14 @@ pub async fn main(args: Option) -> Result<()> { .get_one::("source") .ok_or_else(|| anyhow!("missing source"))?; let (src_op, src_path) = parse_location(src)?; - let src_o = src_op.object(src_path); let dst = args .get_one::("destination") .ok_or_else(|| anyhow!("missing target"))?; let (dst_op, dst_path) = parse_location(dst)?; - let mut dst_w = dst_op.object(dst_path).writer().await?; + let mut dst_w = dst_op.writer(dst_path).await?; - let reader = src_o.reader().await?; + let reader = src_op.reader(src_path).await?; let buf_reader = futures::io::BufReader::with_capacity(8 * 1024 * 1024, reader); futures::io::copy_buf(buf_reader, &mut dst_w).await?; Ok(()) diff --git a/binaries/oli/src/utils/location.rs b/binaries/oli/src/utils/location.rs index a7f5b67b334..d525c741943 100644 --- a/binaries/oli/src/utils/location.rs +++ b/binaries/oli/src/utils/location.rs @@ -35,7 +35,7 @@ pub fn parse_location(s: &str) -> Result<(Operator, &str)> { None => s, }; - return Ok((Operator::create(fs)?.finish(), filename)); + return Ok((Operator::new(fs)?.finish(), filename)); } let s = s.splitn(2, "://").collect::>(); @@ -48,7 +48,7 @@ pub fn parse_location(s: &str) -> Result<(Operator, &str)> { let (bucket, location) = parse_s3_uri(s[1]); let mut builder = services::S3::default(); builder.bucket(bucket); - Ok((Operator::create(builder)?.finish(), location)) + Ok((Operator::new(builder)?.finish(), location)) } _ => todo!(), } diff --git a/bindings/nodejs/.gitignore b/bindings/nodejs/.gitignore index bf69358e301..d8c63114c5b 100644 --- a/bindings/nodejs/.gitignore +++ b/bindings/nodejs/.gitignore @@ -121,7 +121,7 @@ dist .AppleDouble .LSOverride -# Icon must end with two +# Icon must end with two Icon @@ -190,3 +190,5 @@ Cargo.lock .yarn *.node +generated.js +index.d.ts diff --git a/bindings/nodejs/Cargo.toml b/bindings/nodejs/Cargo.toml index 042e082dd11..9cfad46561e 100644 --- a/bindings/nodejs/Cargo.toml +++ b/bindings/nodejs/Cargo.toml @@ -22,7 +22,6 @@ version = "0.0.0" crate-type = ["cdylib"] [dependencies] -# Default enable napi4 feature, see https://nodejs.org/api/n-api.html#node-api-version-matrix futures = "0.3.26" napi = { version = "2.11.2", default-features = false, features = [ "napi6", diff --git a/bindings/nodejs/__test__/index.spec.mjs b/bindings/nodejs/__test__/index.spec.mjs index 885f694185b..d58f7e0582d 100644 --- a/bindings/nodejs/__test__/index.spec.mjs +++ b/bindings/nodejs/__test__/index.spec.mjs @@ -23,18 +23,16 @@ test('test memory write & read', async (t) => { let content = "hello world" let path = 'test' - let o = op.object(path) + await op.write(path, new TextEncoder().encode(content)) - await o.write(new TextEncoder().encode(content)) - - let meta = await o.stat() + let meta = await op.stat(path) t.is(meta.mode, 0) t.is(meta.contentLength, BigInt(content.length)) - let res = await o.read() + let res = await op.read(path) t.is(content, new TextDecoder().decode(res)) - await o.delete() + await op.delete(path) }) @@ -44,18 +42,16 @@ test('test memory write & read synchronously', (t) => { let content = "hello world" let path = 'test' - let o = op.object(path) - - o.writeSync(new TextEncoder().encode(content)) + op.writeSync(path, new TextEncoder().encode(content)) - let meta = o.statSync() + let meta = op.statSync(path) t.is(meta.mode, 0) t.is(meta.contentLength, BigInt(content.length)) - let res = o.readSync() + let res = op.readSync(path) t.is(content, new TextDecoder().decode(res)) - o.deleteSync() + op.deleteSync(path) }) test('test scan', async (t) => { @@ -63,28 +59,26 @@ test('test scan', async (t) => { let content = "hello world" let pathPrefix = 'test' let paths = new Array(10).fill(0).map((_, index) => pathPrefix + index) - let objects = paths.map(p => op.object(p)) - let writeTasks = objects.map((o) => new Promise(async (resolve, reject) => { - await o.write(new TextEncoder().encode(content)) + let writeTasks = paths.map((path) => new Promise(async (resolve, reject) => { + await op.write(path, new TextEncoder().encode(content)) resolve() })) await Promise.all(writeTasks) - let dir = op.object("") - let objList = await dir.scan() - let objectCount = 0 + let objList = await op.scan("") + let entryCount = 0 while (true) { - let o = await objList.next() - if (o === null) break - objectCount++ - t.is(new TextDecoder().decode(await o.read()), content) + let entry = await objList.next() + if (entry === null) break + entryCount++ + t.is(new TextDecoder().decode(await op.read(entry.path())), content) } - t.is(objectCount, paths.length) + t.is(entryCount, paths.length) - objects.forEach(async (o) => { - await o.delete() + paths.forEach(async (path) => { + await op.delete(path) }) }) diff --git a/bindings/nodejs/generated.js b/bindings/nodejs/generated.js deleted file mode 100644 index 7174a5bf813..00000000000 --- a/bindings/nodejs/generated.js +++ /dev/null @@ -1,256 +0,0 @@ -const { existsSync, readFileSync } = require('fs') -const { join } = require('path') - -const { platform, arch } = process - -let nativeBinding = null -let localFileExisted = false -let loadError = null - -function isMusl() { - // For Node 10 - if (!process.report || typeof process.report.getReport !== 'function') { - try { - const lddPath = require('child_process').execSync('which ldd').toString().trim(); - return readFileSync(lddPath, 'utf8').includes('musl') - } catch (e) { - return true - } - } else { - const { glibcVersionRuntime } = process.report.getReport().header - return !glibcVersionRuntime - } -} - -switch (platform) { - case 'android': - switch (arch) { - case 'arm64': - localFileExisted = existsSync(join(__dirname, 'opendal.android-arm64.node')) - try { - if (localFileExisted) { - nativeBinding = require('./opendal.android-arm64.node') - } else { - nativeBinding = require('opendal-android-arm64') - } - } catch (e) { - loadError = e - } - break - case 'arm': - localFileExisted = existsSync(join(__dirname, 'opendal.android-arm-eabi.node')) - try { - if (localFileExisted) { - nativeBinding = require('./opendal.android-arm-eabi.node') - } else { - nativeBinding = require('opendal-android-arm-eabi') - } - } catch (e) { - loadError = e - } - break - default: - throw new Error(`Unsupported architecture on Android ${arch}`) - } - break - case 'win32': - switch (arch) { - case 'x64': - localFileExisted = existsSync( - join(__dirname, 'opendal.win32-x64-msvc.node') - ) - try { - if (localFileExisted) { - nativeBinding = require('./opendal.win32-x64-msvc.node') - } else { - nativeBinding = require('opendal-win32-x64-msvc') - } - } catch (e) { - loadError = e - } - break - case 'ia32': - localFileExisted = existsSync( - join(__dirname, 'opendal.win32-ia32-msvc.node') - ) - try { - if (localFileExisted) { - nativeBinding = require('./opendal.win32-ia32-msvc.node') - } else { - nativeBinding = require('opendal-win32-ia32-msvc') - } - } catch (e) { - loadError = e - } - break - case 'arm64': - localFileExisted = existsSync( - join(__dirname, 'opendal.win32-arm64-msvc.node') - ) - try { - if (localFileExisted) { - nativeBinding = require('./opendal.win32-arm64-msvc.node') - } else { - nativeBinding = require('opendal-win32-arm64-msvc') - } - } catch (e) { - loadError = e - } - break - default: - throw new Error(`Unsupported architecture on Windows: ${arch}`) - } - break - case 'darwin': - localFileExisted = existsSync(join(__dirname, 'opendal.darwin-universal.node')) - try { - if (localFileExisted) { - nativeBinding = require('./opendal.darwin-universal.node') - } else { - nativeBinding = require('opendal-darwin-universal') - } - break - } catch {} - switch (arch) { - case 'x64': - localFileExisted = existsSync(join(__dirname, 'opendal.darwin-x64.node')) - try { - if (localFileExisted) { - nativeBinding = require('./opendal.darwin-x64.node') - } else { - nativeBinding = require('opendal-darwin-x64') - } - } catch (e) { - loadError = e - } - break - case 'arm64': - localFileExisted = existsSync( - join(__dirname, 'opendal.darwin-arm64.node') - ) - try { - if (localFileExisted) { - nativeBinding = require('./opendal.darwin-arm64.node') - } else { - nativeBinding = require('opendal-darwin-arm64') - } - } catch (e) { - loadError = e - } - break - default: - throw new Error(`Unsupported architecture on macOS: ${arch}`) - } - break - case 'freebsd': - if (arch !== 'x64') { - throw new Error(`Unsupported architecture on FreeBSD: ${arch}`) - } - localFileExisted = existsSync(join(__dirname, 'opendal.freebsd-x64.node')) - try { - if (localFileExisted) { - nativeBinding = require('./opendal.freebsd-x64.node') - } else { - nativeBinding = require('opendal-freebsd-x64') - } - } catch (e) { - loadError = e - } - break - case 'linux': - switch (arch) { - case 'x64': - if (isMusl()) { - localFileExisted = existsSync( - join(__dirname, 'opendal.linux-x64-musl.node') - ) - try { - if (localFileExisted) { - nativeBinding = require('./opendal.linux-x64-musl.node') - } else { - nativeBinding = require('opendal-linux-x64-musl') - } - } catch (e) { - loadError = e - } - } else { - localFileExisted = existsSync( - join(__dirname, 'opendal.linux-x64-gnu.node') - ) - try { - if (localFileExisted) { - nativeBinding = require('./opendal.linux-x64-gnu.node') - } else { - nativeBinding = require('opendal-linux-x64-gnu') - } - } catch (e) { - loadError = e - } - } - break - case 'arm64': - if (isMusl()) { - localFileExisted = existsSync( - join(__dirname, 'opendal.linux-arm64-musl.node') - ) - try { - if (localFileExisted) { - nativeBinding = require('./opendal.linux-arm64-musl.node') - } else { - nativeBinding = require('opendal-linux-arm64-musl') - } - } catch (e) { - loadError = e - } - } else { - localFileExisted = existsSync( - join(__dirname, 'opendal.linux-arm64-gnu.node') - ) - try { - if (localFileExisted) { - nativeBinding = require('./opendal.linux-arm64-gnu.node') - } else { - nativeBinding = require('opendal-linux-arm64-gnu') - } - } catch (e) { - loadError = e - } - } - break - case 'arm': - localFileExisted = existsSync( - join(__dirname, 'opendal.linux-arm-gnueabihf.node') - ) - try { - if (localFileExisted) { - nativeBinding = require('./opendal.linux-arm-gnueabihf.node') - } else { - nativeBinding = require('opendal-linux-arm-gnueabihf') - } - } catch (e) { - loadError = e - } - break - default: - throw new Error(`Unsupported architecture on Linux: ${arch}`) - } - break - default: - throw new Error(`Unsupported OS: ${platform}, architecture: ${arch}`) -} - -if (!nativeBinding) { - if (loadError) { - throw loadError - } - throw new Error(`Failed to load native binding`) -} - -const { Operator, Scheme, ObjectMode, ObjectMetadata, ObjectLister, DataObject } = nativeBinding - -module.exports.Operator = Operator -module.exports.Scheme = Scheme -module.exports.ObjectMode = ObjectMode -module.exports.ObjectMetadata = ObjectMetadata -module.exports.ObjectLister = ObjectLister -module.exports.DataObject = DataObject diff --git a/bindings/nodejs/index.d.ts b/bindings/nodejs/index.d.ts deleted file mode 100644 index 20423f87f21..00000000000 --- a/bindings/nodejs/index.d.ts +++ /dev/null @@ -1,93 +0,0 @@ -/* tslint:disable */ -/* eslint-disable */ - -/* auto-generated by NAPI-RS */ - -export const enum Scheme { - /** [azblob][crate::services::Azblob]: Azure Storage Blob services. */ - Azblob = 0, - /** [azdfs][crate::services::Azdfs]: Azure Data Lake Storage Gen2. */ - Azdfs = 1, - /** [dashmap][crate::services::Dashmap]: dashmap backend support. */ - Dashmap = 2, - /** [fs][crate::services::Fs]: POSIX alike file system. */ - Fs = 3, - /** [gcs][crate::services::Gcs]: Google Cloud Storage backend. */ - Gcs = 4, - /** [ghac][crate::services::Ghac]: Github Action Cache services. */ - Ghac = 5, - /** [hdfs][crate::services::Hdfs]: Hadoop Distributed File System. */ - Hdfs = 6, - /** [http][crate::services::Http]: HTTP backend. */ - Http = 7, - /** [ftp][crate::services::Ftp]: FTP backend. */ - Ftp = 8, - /** [ipmfs][crate::services::Ipfs]: IPFS HTTP Gateway */ - Ipfs = 9, - /** [ipmfs][crate::services::Ipmfs]: IPFS mutable file system */ - Ipmfs = 10, - /** [memcached][crate::services::Memcached]: Memcached service support. */ - Memcached = 11, - /** [memory][crate::services::Memory]: In memory backend support. */ - Memory = 12, - /** [moka][crate::services::Moka]: moka backend support. */ - Moka = 13, - /** [obs][crate::services::Obs]: Huawei Cloud OBS services. */ - Obs = 14, - /** [oss][crate::services::Oss]: Aliyun Object Storage Services */ - Oss = 15, - /** [redis][crate::services::Redis]: Redis services */ - Redis = 16, - /** [rocksdb][crate::services::Rocksdb]: RocksDB services */ - Rocksdb = 17, - /** [s3][crate::services::S3]: AWS S3 alike services. */ - S3 = 18, - /** [sled][crate::services::Sled]: Sled services */ - Sled = 19, - /** [webdav][crate::services::Webdav]: WebDAV support. */ - Webdav = 20, - /** [webhdfs][crate::services::Webhdfs]: WebHDFS RESTful API Services */ - Webhdfs = 21 -} -export const enum ObjectMode { - /** FILE means the object has data to read. */ - FILE = 0, - /** DIR means the object can be listed. */ - DIR = 1, - /** Unknown means we don't know what we can do on this object. */ - Unknown = 2 -} -export class Operator { - constructor(serviceType: Scheme, options?: Record | undefined | null) - object(path: string): DataObject -} -export class ObjectMetadata { - /** Mode of this object. */ - get mode(): ObjectMode - /** Content-Disposition of this object */ - get contentDisposition(): string | null - /** Content Length of this object */ - get contentLength(): bigint | null - /** Content MD5 of this object. */ - get contentMd5(): string | null - /** Content Type of this object. */ - get contentType(): string | null - /** ETag of this object. */ - get etag(): string | null - /** Last Modified of this object.(UTC) */ - get lastModified(): string | null -} -export class ObjectLister { - next(): Promise -} -export class DataObject { - stat(): Promise - statSync(): ObjectMetadata - write(content: Buffer): Promise - writeSync(content: Buffer): void - read(): Promise - readSync(): Buffer - scan(): Promise - delete(): Promise - deleteSync(): void -} diff --git a/bindings/nodejs/src/lib.rs b/bindings/nodejs/src/lib.rs index f57a84ae77b..2574a7bc39a 100644 --- a/bindings/nodejs/src/lib.rs +++ b/bindings/nodejs/src/lib.rs @@ -84,12 +84,12 @@ impl Operator { let ops = options.unwrap_or_default(); match service_type { Scheme::Fs => Ok(Self( - opendal::Operator::create(opendal::services::Fs::from_map(ops)) + opendal::Operator::new(opendal::services::Fs::from_map(ops)) .unwrap() .finish(), )), Scheme::Memory => Ok(Self( - opendal::Operator::create(opendal::services::Memory::default()) + opendal::Operator::new(opendal::services::Memory::default()) .unwrap() .finish(), )), @@ -98,13 +98,79 @@ impl Operator { } #[napi] - pub fn object(&self, path: String) -> DataObject { - DataObject(self.0.object(&path)) + pub async fn stat(&self, path: String) -> Result { + let meta = self.0.stat(&path).await.map_err(format_napi_error).unwrap(); + + Ok(Metadata(meta)) + } + + #[napi(js_name = "statSync")] + pub fn blocking_stat(&self, path: String) -> Result { + let meta = self + .0 + .blocking() + .stat(&path) + .map_err(format_napi_error) + .unwrap(); + + Ok(Metadata(meta)) + } + + #[napi] + pub async fn write(&self, path: String, content: Buffer) -> Result<()> { + let c = content.as_ref().to_owned(); + self.0.write(&path, c).await.map_err(format_napi_error) + } + + #[napi(js_name = "writeSync")] + pub fn blocking_write(&self, path: String, content: Buffer) -> Result<()> { + let c = content.as_ref().to_owned(); + self.0.blocking().write(&path, c).map_err(format_napi_error) + } + + #[napi] + pub async fn read(&self, path: String) -> Result { + let res = self.0.read(&path).await.map_err(format_napi_error)?; + Ok(res.into()) + } + + #[napi(js_name = "readSync")] + pub fn blocking_read(&self, path: String) -> Result { + let res = self.0.blocking().read(&path).map_err(format_napi_error)?; + Ok(res.into()) + } + + #[napi] + pub async fn scan(&self, path: String) -> Result { + Ok(Lister( + self.0.scan(&path).await.map_err(format_napi_error).unwrap(), + )) + } + + #[napi] + pub async fn delete(&self, path: String) -> Result<()> { + self.0.delete(&path).await.map_err(format_napi_error) + } + + #[napi(js_name = "deleteSync")] + pub fn blocking_delete(&self, path: String) -> Result<()> { + self.0.blocking().delete(&path).map_err(format_napi_error) + } +} + +#[napi] +pub struct Entry(opendal::Entry); + +#[napi] +impl Entry { + #[napi] + pub fn path(&self) -> String { + self.0.path().to_string() } } #[napi] -pub enum ObjectMode { +pub enum EntryMode { /// FILE means the object has data to read. FILE, /// DIR means the object can be listed. @@ -115,17 +181,17 @@ pub enum ObjectMode { #[allow(dead_code)] #[napi] -pub struct ObjectMetadata(opendal::Metadata); +pub struct Metadata(opendal::Metadata); #[napi] -impl ObjectMetadata { +impl Metadata { /// Mode of this object. #[napi(getter)] - pub fn mode(&self) -> ObjectMode { + pub fn mode(&self) -> EntryMode { match self.0.mode() { - opendal::EntryMode::DIR => ObjectMode::DIR, - opendal::EntryMode::FILE => ObjectMode::FILE, - opendal::EntryMode::Unknown => ObjectMode::Unknown, + opendal::EntryMode::DIR => EntryMode::DIR, + opendal::EntryMode::FILE => EntryMode::FILE, + opendal::EntryMode::Unknown => EntryMode::Unknown, } } @@ -176,10 +242,10 @@ impl ObjectMetadata { } #[napi] -pub struct ObjectLister(opendal::Lister); +pub struct Lister(opendal::Lister); #[napi] -impl ObjectLister { +impl Lister { /// # Safety /// /// > &mut self in async napi methods should be marked as unsafe @@ -187,75 +253,14 @@ impl ObjectLister { /// napi will make sure the function is safe, and we didn't do unsafe /// thing internally. #[napi] - pub async unsafe fn next(&mut self) -> Result> { + pub async unsafe fn next(&mut self) -> Result> { Ok(self .0 .try_next() .await .map_err(format_napi_error) .unwrap() - .map(DataObject)) - } -} - -#[napi] -pub struct DataObject(opendal::Object); - -#[napi] -impl DataObject { - #[napi] - pub async fn stat(&self) -> Result { - let meta = self.0.stat().await.map_err(format_napi_error).unwrap(); - - Ok(ObjectMetadata(meta)) - } - - #[napi(js_name = "statSync")] - pub fn blocking_stat(&self) -> Result { - let meta = self.0.blocking_stat().map_err(format_napi_error).unwrap(); - - Ok(ObjectMetadata(meta)) - } - - #[napi] - pub async fn write(&self, content: Buffer) -> Result<()> { - let c = content.as_ref().to_owned(); - self.0.write(c).await.map_err(format_napi_error) - } - - #[napi(js_name = "writeSync")] - pub fn blocking_write(&self, content: Buffer) -> Result<()> { - let c = content.as_ref().to_owned(); - self.0.blocking_write(c).map_err(format_napi_error) - } - - #[napi] - pub async fn read(&self) -> Result { - let res = self.0.read().await.map_err(format_napi_error)?; - Ok(res.into()) - } - - #[napi(js_name = "readSync")] - pub fn blocking_read(&self) -> Result { - let res = self.0.blocking_read().map_err(format_napi_error)?; - Ok(res.into()) - } - - #[napi] - pub async fn scan(&self) -> Result { - Ok(ObjectLister( - self.0.scan().await.map_err(format_napi_error).unwrap(), - )) - } - - #[napi] - pub async fn delete(&self) -> Result<()> { - self.0.delete().await.map_err(format_napi_error) - } - - #[napi(js_name = "deleteSync")] - pub fn blocking_delete(&self) -> Result<()> { - self.0.blocking_delete().map_err(format_napi_error) + .map(Entry)) } } diff --git a/bindings/object_store/src/lib.rs b/bindings/object_store/src/lib.rs index 5438f5fc362..71df71e2ef2 100644 --- a/bindings/object_store/src/lib.rs +++ b/bindings/object_store/src/lib.rs @@ -56,8 +56,9 @@ impl std::fmt::Display for OpendalStore { #[async_trait] impl ObjectStore for OpendalStore { async fn put(&self, location: &Path, bytes: Bytes) -> Result<()> { - let o = self.inner.object(location.as_ref()); - Ok(o.write(bytes) + Ok(self + .inner + .write(location.as_ref(), bytes) .await .map_err(|err| format_object_store_error(err, location.as_ref()))?) } @@ -84,9 +85,9 @@ impl ObjectStore for OpendalStore { } async fn get(&self, location: &Path) -> Result { - let o = self.inner.object(location.as_ref()); - let r = o - .reader() + let r = self + .inner + .reader(location.as_ref()) .await .map_err(|err| format_object_store_error(err, location.as_ref()))?; @@ -94,9 +95,9 @@ impl ObjectStore for OpendalStore { } async fn get_range(&self, location: &Path, range: Range) -> Result { - let o = self.inner.object(location.as_ref()); - let bs = o - .range_read(range.start as u64..range.end as u64) + let bs = self + .inner + .range_read(location.as_ref(), range.start as u64..range.end as u64) .await .map_err(|err| format_object_store_error(err, location.as_ref()))?; @@ -104,9 +105,9 @@ impl ObjectStore for OpendalStore { } async fn head(&self, location: &Path) -> Result { - let o = self.inner.object(location.as_ref()); - let meta = o - .stat() + let meta = self + .inner + .stat(location.as_ref()) .await .map_err(|err| format_object_store_error(err, location.as_ref()))?; @@ -127,8 +128,8 @@ impl ObjectStore for OpendalStore { } async fn delete(&self, location: &Path) -> Result<()> { - let o = self.inner.object(location.as_ref()); - o.delete() + self.inner + .delete(location.as_ref()) .await .map_err(|err| format_object_store_error(err, location.as_ref()))?; @@ -174,14 +175,14 @@ impl ObjectStore for OpendalStore { fn format_object_store_error(err: opendal::Error, path: &str) -> object_store::Error { use opendal::ErrorKind; match err.kind() { - ErrorKind::ObjectNotFound => object_store::Error::NotFound { + ErrorKind::NotFound => object_store::Error::NotFound { path: path.to_string(), source: Box::new(err), }, ErrorKind::Unsupported => object_store::Error::NotSupported { source: Box::new(err), }, - ErrorKind::ObjectAlreadyExists => object_store::Error::AlreadyExists { + ErrorKind::AlreadyExists => object_store::Error::AlreadyExists { path: path.to_string(), source: Box::new(err), }, @@ -223,9 +224,7 @@ mod tests { #[tokio::test] async fn test_basic() { - let op = Operator::create(services::Memory::default()) - .unwrap() - .finish(); + let op = Operator::new(services::Memory::default()).unwrap().finish(); let object_store: Arc = Arc::new(OpendalStore::new(op)); // Retrieve a specific file diff --git a/bindings/python/src/lib.rs b/bindings/python/src/lib.rs index cdd45b02298..9afca223e92 100644 --- a/bindings/python/src/lib.rs +++ b/bindings/python/src/lib.rs @@ -25,50 +25,35 @@ struct Operator(od::Operator); impl Operator { #[new] pub fn new() -> Self { - let op = od::Operator::create(od::services::Memory::default()) + let op = od::Operator::new(od::services::Memory::default()) .unwrap() .finish(); Operator(op) } - pub fn object(&self, path: &str) -> Object { - let o = self.0.object(path); - - Object(o) - } -} - -#[pyclass] -struct Object(od::Object); - -#[pymethods] -impl Object { - pub fn path(&self) -> &str { - self.0.path() - } - - pub fn blocking_read(&self) -> PyResult> { - self.0.blocking_read().map_err(format_pyerr) + pub fn blocking_read(&self, path: &str) -> PyResult> { + self.0.blocking().read(path).map_err(format_pyerr) } - pub fn read<'p>(&'p self, py: Python<'p>) -> PyResult<&'p PyAny> { + pub fn read<'p>(&'p self, py: Python<'p>, path: &'p str) -> PyResult<&'p PyAny> { let this = self.0.clone(); + let path = path.to_string(); future_into_py(py, async move { - let res: Vec = this.read().await.map_err(format_pyerr)?; + let res: Vec = this.read(&path).await.map_err(format_pyerr)?; Ok(res) }) } - pub fn blocking_write(&self, bs: Vec) -> PyResult<()> { - self.0.blocking_write(bs).map_err(format_pyerr) + pub fn blocking_write(&self, path: &str, bs: Vec) -> PyResult<()> { + self.0.blocking().write(path, bs).map_err(format_pyerr) } } fn format_pyerr(err: od::Error) -> PyErr { use od::ErrorKind::*; match err.kind() { - ObjectNotFound => PyFileNotFoundError::new_err(err.to_string()), + NotFound => PyFileNotFoundError::new_err(err.to_string()), _ => PyBaseException::new_err(err.to_string()), } } @@ -76,6 +61,5 @@ fn format_pyerr(err: od::Error) -> PyErr { #[pymodule] fn opendal(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::()?; - m.add_class::()?; Ok(()) } diff --git a/examples/object.rs b/examples/object.rs index e6a3516faaf..35c3c7ca197 100644 --- a/examples/object.rs +++ b/examples/object.rs @@ -31,25 +31,22 @@ async fn main() -> Result<()> { let builder = services::Memory::default(); // Init an operator - let op = Operator::create(builder)? + let op = Operator::new(builder)? // Init with logging layer enabled. .layer(LoggingLayer::default()) .finish(); debug!("operator: {op:?}"); - // Create an object handler. - let o = op.object("test"); - // Write data into object test. - o.write("Hello, World!").await?; + op.write("test", "Hello, World!").await?; // Read data from object. - let bs = o.read().await?; + let bs = op.read("test").await?; info!("content: {}", String::from_utf8_lossy(&bs)); // Get object metadata. - let meta = o.stat().await?; + let meta = op.stat("test").await?; info!("meta: {:?}", meta); // Have fun! diff --git a/src/docs/concepts.rs b/src/docs/concepts.rs index fb19aa6abf0..dc16b8fc22c 100644 --- a/src/docs/concepts.rs +++ b/src/docs/concepts.rs @@ -20,7 +20,6 @@ //! //! - [`Builder`]: Build an instance of underlying services. //! - [`Operator`]: A bridge between underlying implementation detail and unified abstraction. -//! - [`Object`]: The smallest unit representing a file/dir/... with path in specified services. //! //! If you are interested in internal implementation details, please have a look at [`internals`][super::internals]. //! @@ -74,7 +73,7 @@ //! builder.bucket("example"); //! builder.root("/path/to/file"); //! -//! let op = Operator::create(builder)?.finish(); +//! let op = Operator::new(builder)?.finish(); //! # Ok(()) //! # } //! ``` @@ -83,31 +82,24 @@ //! - `Operator` doesn't have generic parameters or lifetimes, so it's **easy** to use it everywhere. //! - `Operator` implements `Send` and `Sync`, so it's **safe** to send it between threads. //! -//! # Object -//! -//! [`Object`] is the smallest unit representing a file/dir/... with path in specified services. All actual operations will happen on `Object`: +//! After get an `Operator`, we can do operations on different paths. //! //! //! ```text -//! ┌────────────┐ -//! abc │ Object ├───► read() -//! ┌────────►│ path: abc │ -//! │ └────────────┘ +//! ┌──────────────┐ +//! ┌────────►│ read("abc") │ +//! │ └──────────────┘ //! ┌───────────┐ │ -//! │ Operator │ │ def ┌────────────┐ -//! │ ┌───────┐ ├────┼────────►│ Object ├───► write() -//! │ │Service│ │ │ │ path: def │ -//! └─┴───────┴─┘ │ └────────────┘ -//! │ -//! │ ghi/ ┌────────────┐ -//! └────────►│ Object ├───► list() -//! │ path: ghi/ │ -//! └────────────┘ +//! │ Operator │ │ ┌──────────────┐ +//! │ ┌───────┐ ├────┼────────►│ write("def") │ +//! │ │Service│ │ │ └──────────────┘ +//! └─┴───────┴─┘ │ +//! │ ┌──────────────┐ +//! └────────►│ list("ghi/") │ +//! └──────────────┘ //! ``` //! -//! Every `Object` is coresbonding to a `path` which is related to `Operator`'s root. `Object` is the same as `Operator` which is **cheap**, **easy** and **safe** to use everywhere. -//! -//! We can read data from an object in this way: +//! We can read data with given path in this way: //! //! ```no_run //! # use opendal::Result; @@ -119,13 +111,11 @@ //! builder.bucket("example"); //! builder.root("/path/to/file"); //! -//! let op = Operator::create(builder)?.finish(); -//! let o = op.object("abc"); -//! let bs: Vec = o.read().await?; +//! let op = Operator::new(builder)?.finish(); +//! let bs: Vec = o.read("abc").await?; //! # Ok(()) //! # } //! ``` //! //! [`Builder`]: crate::Builder //! [`Operator`]: crate::Operator -//! [`Object`]: crate::Object diff --git a/src/docs/internals/accessor.rs b/src/docs/internals/accessor.rs index 8ccc7c187b0..149beffe9d6 100644 --- a/src/docs/internals/accessor.rs +++ b/src/docs/internals/accessor.rs @@ -116,14 +116,14 @@ //! - Most APIs accept `path` and `OpXxx`, and returns `RpXxx`. //! - Most APIs have `async` and `blocking` variants, they share the same semantics but may have different underlying implementations. //! -//! [`Accessor`] can declare their capabilities via [`AccessorMetadata`]'s `set_capabilities`: +//! [`Accessor`] can declare their capabilities via [`AccessorInfo`]'s `set_capabilities`: //! //! ```ignore //! impl Accessor for MyBackend { -//! fn metadata(&self) -> AccessorMetadata { +//! fn metadata(&self) -> AccessorInfo { //! use AccessorCapability::*; //! -//! let mut am = AccessorMetadata::default(); +//! let mut am = AccessorInfo::default(); //! am.set_capabilities(Read | Write | List | Scan | Presign | Batch); //! //! am @@ -205,7 +205,7 @@ //! /// // NOTE: the root must be absolute path. //! /// builder.root("/path/to/dir"); //! /// -//! /// let op: Operator = Operator::create(builder)?.finish(); +//! /// let op: Operator = Operator::new(builder)?.finish(); //! /// //! /// // Create an object handle to start operation on object. //! /// let _: Object = op.object("test_file"); @@ -291,11 +291,11 @@ //! type Pager = (); //! type BlockingPager = (); //! -//! fn metadata(&self) -> AccessorMetadata { +//! fn metadata(&self) -> AccessorInfo { //! use AccessorCapability::*; //! use AccessorHint::*; //! -//! let mut am = AccessorMetadata::default(); +//! let mut am = AccessorInfo::default(); //! am.set_scheme(Scheme::Duck) //! .set_root(&self.root) //! .set_capabilities(Read); @@ -319,6 +319,6 @@ //! [`Accessor`]: crate::raw::Accessor //! [`Operation`]: crate::raw::Operation //! [`AccessorCapability`]: crate::raw::AccessorCapability -//! [`AccessorMetadata`]: crate::raw::AccessorMetadata +//! [`AccessorInfo`]: crate::raw::AccessorInfo //! [`Scheme`]: crate::Scheme //! [`Builder`]: crate::Builder diff --git a/src/docs/internals/mod.rs b/src/docs/internals/mod.rs index 5349e3a1629..b678e930eef 100644 --- a/src/docs/internals/mod.rs +++ b/src/docs/internals/mod.rs @@ -16,7 +16,7 @@ //! //! OpenDAL has provides unified abstraction via two-level API sets: //! -//! - Public API like [`Operator`], [`Object`] provides user level API. +//! - Public API like [`Operator`] provides user level API. //! - Raw API like [`Accessor`], [`Layer`] provides developer level API. //! //! OpenDAL tries it's best to keep the public API stable. But raw APIs @@ -52,7 +52,6 @@ //! //! [`Builder`]: crate::Builder //! [`Operator`]: crate::Operator -//! [`Object`]: crate::Object //! [`Accessor`]: crate::raw::Accessor //! [`Layer`]: crate::raw::Layer diff --git a/src/docs/mod.rs b/src/docs/mod.rs index ee117e822d1..37c6dc56724 100644 --- a/src/docs/mod.rs +++ b/src/docs/mod.rs @@ -14,7 +14,7 @@ //! This module holds documentation for OpenDAL. //! -//! It's highly recommended that you start by reading [`concepts'] first. +//! It's highly recommended that you start by reading [`concepts`] first. pub mod comparisons; diff --git a/src/docs/upgrade.md b/src/docs/upgrade.md index c86e6b0cc2e..d85e8342212 100644 --- a/src/docs/upgrade.md +++ b/src/docs/upgrade.md @@ -27,7 +27,7 @@ In v0.28, we introduced [Query Based Metadata][crate::docs::rfcs::rfc_1398_query + ) -> Result>; ``` -Please visit [`Object::metadata()`][crate::Object::metadata()]'s example for more details. +Please visit `Object::metadata()`'s example for more details. # Upgrade to v0.27 diff --git a/src/layers/chaos.rs b/src/layers/chaos.rs index b17c0b7b929..94aaac08bec 100644 --- a/src/layers/chaos.rs +++ b/src/layers/chaos.rs @@ -52,7 +52,7 @@ use crate::*; /// use opendal::Operator; /// use opendal::Scheme; /// -/// let _ = Operator::create(services::Memory::default()) +/// let _ = Operator::new(services::Memory::default()) /// .expect("must init") /// .layer(ChaosLayer::new(0.1)) /// .finish(); diff --git a/src/layers/complete.rs b/src/layers/complete.rs index 63ed3e52ad3..43f442f2f5c 100644 --- a/src/layers/complete.rs +++ b/src/layers/complete.rs @@ -122,7 +122,7 @@ impl Layer for CompleteLayer { type LayeredAccessor = CompleteReaderAccessor; fn layer(&self, inner: A) -> Self::LayeredAccessor { - let meta = inner.metadata(); + let meta = inner.info(); CompleteReaderAccessor { meta, inner: Arc::new(inner), @@ -132,7 +132,7 @@ impl Layer for CompleteLayer { /// Provide reader wrapper for backend. pub struct CompleteReaderAccessor { - meta: AccessorMetadata, + meta: AccessorInfo, inner: Arc, } diff --git a/src/layers/concurrent_limit.rs b/src/layers/concurrent_limit.rs index 3f0c9eb2b33..0d19633ea85 100644 --- a/src/layers/concurrent_limit.rs +++ b/src/layers/concurrent_limit.rs @@ -43,7 +43,7 @@ use crate::*; /// use opendal::Operator; /// use opendal::Scheme; /// -/// let _ = Operator::create(services::Memory::default()) +/// let _ = Operator::new(services::Memory::default()) /// .expect("must init") /// .layer(ConcurrentLimitLayer::new(1024)) /// .finish(); diff --git a/src/layers/error_context.rs b/src/layers/error_context.rs index 668b2210b34..8da01ac7ba5 100644 --- a/src/layers/error_context.rs +++ b/src/layers/error_context.rs @@ -44,14 +44,14 @@ impl Layer for ErrorContextLayer { type LayeredAccessor = ErrorContextAccessor; fn layer(&self, inner: A) -> Self::LayeredAccessor { - let meta = inner.metadata(); + let meta = inner.info(); ErrorContextAccessor { meta, inner } } } /// Provide error context wrapper for backend. pub struct ErrorContextAccessor { - meta: AccessorMetadata, + meta: AccessorInfo, inner: A, } @@ -75,7 +75,7 @@ impl LayeredAccessor for ErrorContextAccessor { &self.inner } - fn metadata(&self) -> AccessorMetadata { + fn metadata(&self) -> AccessorInfo { self.meta.clone() } diff --git a/src/layers/immutable_index.rs b/src/layers/immutable_index.rs index 6ed5e35122d..30953785b3a 100644 --- a/src/layers/immutable_index.rs +++ b/src/layers/immutable_index.rs @@ -144,8 +144,8 @@ impl LayeredAccessor for ImmutableIndexAccessor { } /// Add list capabilities for underlying storage services. - fn metadata(&self) -> AccessorMetadata { - let mut meta = self.inner.metadata(); + fn metadata(&self) -> AccessorInfo { + let mut meta = self.inner.info(); meta.set_capabilities( meta.capabilities() | AccessorCapability::List | AccessorCapability::Scan, ); @@ -288,7 +288,7 @@ mod tests { iil.insert(i.to_string()) } - let op = Operator::create(Http::from_iter( + let op = Operator::new(Http::from_iter( vec![("endpoint".to_string(), "https://xuanwo.io".to_string())].into_iter(), ))? .layer(LoggingLayer::default()) @@ -297,7 +297,7 @@ mod tests { let mut map = HashMap::new(); let mut set = HashSet::new(); - let mut ds = op.object("").list().await?; + let mut ds = op.list("").await?; while let Some(entry) = ds.try_next().await? { debug!("got entry: {}", entry.path()); assert!( @@ -307,7 +307,7 @@ mod tests { ); map.insert( entry.path().to_string(), - entry.metadata(Metakey::Mode).await?.mode(), + op.metadata(&entry, Metakey::Mode).await?.mode(), ); } @@ -326,14 +326,14 @@ mod tests { iil.insert(i.to_string()) } - let op = Operator::create(Http::from_iter( + let op = Operator::new(Http::from_iter( vec![("endpoint".to_string(), "https://xuanwo.io".to_string())].into_iter(), ))? .layer(LoggingLayer::default()) .layer(iil) .finish(); - let mut ds = op.object("/").scan().await?; + let mut ds = op.scan("/").await?; let mut set = HashSet::new(); let mut map = HashMap::new(); while let Some(entry) = ds.try_next().await? { @@ -345,7 +345,7 @@ mod tests { ); map.insert( entry.path().to_string(), - entry.metadata(Metakey::Mode).await?.mode(), + op.metadata(&entry, Metakey::Mode).await?.mode(), ); } @@ -370,7 +370,7 @@ mod tests { iil.insert(i.to_string()) } - let op = Operator::create(Http::from_iter( + let op = Operator::new(Http::from_iter( vec![("endpoint".to_string(), "https://xuanwo.io".to_string())].into_iter(), ))? .layer(LoggingLayer::default()) @@ -380,7 +380,7 @@ mod tests { // List / let mut map = HashMap::new(); let mut set = HashSet::new(); - let mut ds = op.object("/").list().await?; + let mut ds = op.list("/").await?; while let Some(entry) = ds.try_next().await? { assert!( set.insert(entry.path().to_string()), @@ -389,7 +389,7 @@ mod tests { ); map.insert( entry.path().to_string(), - entry.metadata(Metakey::Mode).await?.mode(), + op.metadata(&entry, Metakey::Mode).await?.mode(), ); } @@ -399,7 +399,7 @@ mod tests { // List dataset/stateful/ let mut map = HashMap::new(); let mut set = HashSet::new(); - let mut ds = op.object("dataset/stateful/").list().await?; + let mut ds = op.list("dataset/stateful/").await?; while let Some(entry) = ds.try_next().await? { assert!( set.insert(entry.path().to_string()), @@ -408,7 +408,7 @@ mod tests { ); map.insert( entry.path().to_string(), - entry.metadata(Metakey::Mode).await?.mode(), + op.metadata(&entry, Metakey::Mode).await?.mode(), ); } @@ -431,14 +431,14 @@ mod tests { iil.insert(i.to_string()) } - let op = Operator::create(Http::from_iter( + let op = Operator::new(Http::from_iter( vec![("endpoint".to_string(), "https://xuanwo.io".to_string())].into_iter(), ))? .layer(LoggingLayer::default()) .layer(iil) .finish(); - let mut ds = op.object("/").scan().await?; + let mut ds = op.scan("/").await?; let mut map = HashMap::new(); let mut set = HashSet::new(); @@ -450,7 +450,7 @@ mod tests { ); map.insert( entry.path().to_string(), - entry.metadata(Metakey::Mode).await?.mode(), + op.metadata(&entry, Metakey::Mode).await?.mode(), ); } diff --git a/src/layers/logging.rs b/src/layers/logging.rs index c4e8a9b8abe..41f73528f11 100644 --- a/src/layers/logging.rs +++ b/src/layers/logging.rs @@ -56,7 +56,7 @@ use crate::*; /// use opendal::Operator; /// use opendal::Scheme; /// -/// let _ = Operator::create(services::Memory::default()) +/// let _ = Operator::new(services::Memory::default()) /// .expect("must init") /// .layer(LoggingLayer::default()) /// .finish(); @@ -95,7 +95,7 @@ impl Default for LoggingLayer { impl LoggingLayer { /// Setting the log level while expected error happened. /// - /// For example: accessor returns ObjectNotFound. + /// For example: accessor returns NotFound. /// /// `None` means disable the log for error. pub fn with_error_level(mut self, level: Option) -> Self { @@ -118,7 +118,7 @@ impl Layer for LoggingLayer { type LayeredAccessor = LoggingAccessor; fn layer(&self, inner: A) -> Self::LayeredAccessor { - let meta = inner.metadata(); + let meta = inner.info(); LoggingAccessor { scheme: meta.scheme(), inner, @@ -175,19 +175,19 @@ impl LayeredAccessor for LoggingAccessor { &self.inner } - fn metadata(&self) -> AccessorMetadata { + fn metadata(&self) -> AccessorInfo { debug!( target: LOGGING_TARGET, "service={} operation={} -> started", self.scheme, - Operation::Metadata + Operation::Info ); - let result = self.inner.metadata(); + let result = self.inner.info(); debug!( target: LOGGING_TARGET, "service={} operation={} -> finished: {:?}", self.scheme, - Operation::Metadata, + Operation::Info, result ); diff --git a/src/layers/metrics.rs b/src/layers/metrics.rs index b17582abb73..35ffd0517a5 100644 --- a/src/layers/metrics.rs +++ b/src/layers/metrics.rs @@ -90,7 +90,7 @@ static LABEL_ERROR: &str = "error"; /// use opendal::services; /// use opendal::Operator; /// -/// let _ = Operator::create(services::Memory::default()) +/// let _ = Operator::new(services::Memory::default()) /// .expect("must init") /// .layer(MetricsLayer) /// .finish(); @@ -118,7 +118,7 @@ impl Layer for MetricsLayer { type LayeredAccessor = MetricsAccessor; fn layer(&self, inner: A) -> Self::LayeredAccessor { - let meta = inner.metadata(); + let meta = inner.info(); MetricsAccessor { inner, @@ -200,12 +200,12 @@ impl MetricsHandler { requests_total_metadata: register_counter!( METRIC_REQUESTS_TOTAL, LABEL_SERVICE => service, - LABEL_OPERATION => Operation::Metadata.into_static(), + LABEL_OPERATION => Operation::Info.into_static(), ), requests_duration_seconds_metadata: register_histogram!( METRIC_REQUESTS_DURATION_SECONDS, LABEL_SERVICE => service, - LABEL_OPERATION => Operation::Metadata.into_static(), + LABEL_OPERATION => Operation::Info.into_static(), ), requests_total_create: register_counter!( @@ -447,11 +447,11 @@ impl LayeredAccessor for MetricsAccessor { &self.inner } - fn metadata(&self) -> AccessorMetadata { + fn metadata(&self) -> AccessorInfo { self.handle.requests_total_metadata.increment(1); let start = Instant::now(); - let result = self.inner.metadata(); + let result = self.inner.info(); let dur = start.elapsed().as_secs_f64(); self.handle.requests_duration_seconds_metadata.record(dur); diff --git a/src/layers/retry.rs b/src/layers/retry.rs index e4258f28ac9..8008bd8357e 100644 --- a/src/layers/retry.rs +++ b/src/layers/retry.rs @@ -57,7 +57,7 @@ use crate::*; /// use opendal::Operator; /// use opendal::Scheme; /// -/// let _ = Operator::create(services::Memory::default()) +/// let _ = Operator::new(services::Memory::default()) /// .expect("must init") /// .layer(RetryLayer::new()) /// .finish(); @@ -76,7 +76,7 @@ impl RetryLayer { /// use opendal::Operator; /// use opendal::Scheme; /// - /// let _ = Operator::create(services::Memory::default()) + /// let _ = Operator::new(services::Memory::default()) /// .expect("must init") /// .layer(RetryLayer::new()); /// ``` @@ -756,6 +756,7 @@ impl oio::BlockingPage for RetryWrapper

{ #[cfg(test)] mod tests { + use std::collections::HashMap; use std::io; use std::sync::Arc; use std::sync::Mutex; @@ -769,6 +770,26 @@ mod tests { use super::*; + #[derive(Default, Clone)] + struct MockBuilder { + attempt: Arc>, + } + + impl Builder for MockBuilder { + const SCHEME: Scheme = Scheme::Custom("mock"); + type Accessor = MockService; + + fn from_map(_: HashMap) -> Self { + Self::default() + } + + fn build(&mut self) -> Result { + Ok(MockService { + attempt: self.attempt.clone(), + }) + } + } + #[derive(Debug, Clone, Default)] struct MockService { attempt: Arc>, @@ -783,8 +804,8 @@ mod tests { type Pager = MockPager; type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { - let mut am = AccessorMetadata::default(); + fn info(&self) -> AccessorInfo { + let mut am = AccessorInfo::default(); am.set_capabilities(AccessorCapability::List); am.set_hints(AccessorHint::ReadStreamable); @@ -872,7 +893,7 @@ mod tests { self.attempt += 1; match self.attempt { 1 => Err(Error::new( - ErrorKind::ObjectRateLimited, + ErrorKind::RateLimited, "retriable rate limited error from pager", ) .set_temporary()), @@ -906,10 +927,13 @@ mod tests { async fn test_retry_read() { let _ = env_logger::try_init(); - let srv = Arc::new(MockService::default()); - let op = Operator::new(srv.clone()).layer(RetryLayer::new()).finish(); + let builder = MockBuilder::default(); + let op = Operator::new(builder.clone()) + .unwrap() + .layer(RetryLayer::new()) + .finish(); - let mut r = op.object("retryable_error").reader().await.unwrap(); + let mut r = op.reader("retryable_error").await.unwrap(); let mut content = Vec::new(); let size = r .read_to_end(&mut content) @@ -918,21 +942,23 @@ mod tests { assert_eq!(size, 13); assert_eq!(content, "Hello, World!".as_bytes()); // The error is retryable, we should request it 1 + 10 times. - assert_eq!(*srv.attempt.lock().unwrap(), 5); + assert_eq!(*builder.attempt.lock().unwrap(), 5); } #[tokio::test] async fn test_retry_list() { let _ = env_logger::try_init(); - let srv = Arc::new(MockService::default()); - let op = Operator::new(srv.clone()).layer(RetryLayer::new()).finish(); + let builder = MockBuilder::default(); + let op = Operator::new(builder.clone()) + .unwrap() + .layer(RetryLayer::new()) + .finish(); let expected = vec!["hello", "world", "2023/", "0208/"]; let mut lister = op - .object("retryable_error/") - .list() + .list("retryable_error/") .await .expect("service must support list"); let mut actual = Vec::new(); diff --git a/src/layers/tracing.rs b/src/layers/tracing.rs index e04915185b5..9f7a1362afc 100644 --- a/src/layers/tracing.rs +++ b/src/layers/tracing.rs @@ -38,7 +38,7 @@ use crate::*; /// use opendal::services; /// use opendal::Operator; /// -/// let _ = Operator::create(services::Memory::default()) +/// let _ = Operator::new(services::Memory::default()) /// .expect("must init") /// .layer(TracingLayer) /// .finish(); @@ -84,8 +84,8 @@ use crate::*; /// .write("0".repeat(16 * 1024 * 1024).into_bytes()) /// .await /// .expect("must succeed"); -/// op.object("test").stat().await.expect("must succeed"); -/// op.object("test").read().await.expect("must succeed"); +/// op.stat("test").await.expect("must succeed"); +/// op.read("test").await.expect("must succeed"); /// }); /// /// // Shut down the current tracer provider. This will invoke the shutdown @@ -144,8 +144,8 @@ impl LayeredAccessor for TracingAccessor { } #[tracing::instrument(level = "debug")] - fn metadata(&self) -> AccessorMetadata { - self.inner.metadata() + fn metadata(&self) -> AccessorInfo { + self.inner.info() } #[tracing::instrument(level = "debug", skip(self))] diff --git a/src/lib.rs b/src/lib.rs index 915ac68c786..d2051518572 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -34,27 +34,24 @@ //! builder.bucket("test"); //! //! // Init an operator -//! let op = Operator::create(builder)? +//! let op = Operator::new(builder)? //! // Init with logging layer enabled. //! .layer(LoggingLayer::default()) //! .finish(); //! -//! // Create object handler. -//! let mut o = op.object("test_file"); -//! //! // Write data -//! o.write("Hello, World!").await?; +//! op.write("path/to/file", "Hello, World!").await?; //! //! // Read data -//! let bs = o.read().await?; +//! let bs = op.read("path/to/file").await?; //! //! // Fetch metadata -//! let meta = o.stat().await?; +//! let meta = op.stat("path/to/file").await?; //! let mode = meta.mode(); //! let length = meta.content_length(); //! //! // Delete -//! o.delete().await?; +//! op.delete("path/to/file").await?; //! //! Ok(()) //! } @@ -89,10 +86,9 @@ mod tests { /// unexpected struct/enum size change. #[test] fn assert_size() { - assert_eq!(88, size_of::()); - assert_eq!(16, size_of::()); - assert_eq!(112, size_of::()); - assert_eq!(32, size_of::()); + assert_eq!(88, size_of::()); + assert_eq!(24, size_of::()); + assert_eq!(216, size_of::()); assert_eq!(192, size_of::()); assert_eq!(1, size_of::()); assert_eq!(24, size_of::()); diff --git a/src/raw/accessor.rs b/src/raw/accessor.rs index 792ea01e272..d47a66939c0 100644 --- a/src/raw/accessor.rs +++ b/src/raw/accessor.rs @@ -61,19 +61,19 @@ pub trait Accessor: Send + Sync + Debug + Unpin + 'static { /// `blocking_list` or `scan` operation. type BlockingPager: oio::BlockingPage; - /// Invoke the `metadata` operation to get metadata of accessor. + /// Invoke the `info` operation to get metadata of accessor. /// /// # Notes /// /// This function is required to be implemented. /// - /// By returning AccessorMetadata, underlying services can declare + /// By returning AccessorInfo, underlying services can declare /// some useful information about it self. /// /// - scheme: declare the scheme of backend. /// - capabilities: declare the capabilities of current backend. /// - hints: declare the hints of current backend - fn metadata(&self) -> AccessorMetadata; + fn info(&self) -> AccessorInfo; /// Invoke the `create` operation on the specified path /// @@ -330,8 +330,8 @@ impl Accessor for Arc { type Pager = T::Pager; type BlockingPager = T::BlockingPager; - fn metadata(&self) -> AccessorMetadata { - self.as_ref().metadata() + fn info(&self) -> AccessorInfo { + self.as_ref().info() } async fn create(&self, path: &str, args: OpCreate) -> Result { @@ -403,7 +403,7 @@ pub type FusedAccessor = Arc< /// Metadata for accessor, users can use this metadata to get information of underlying backend. #[derive(Clone, Debug, Default)] -pub struct AccessorMetadata { +pub struct AccessorInfo { scheme: Scheme, root: String, name: String, @@ -411,7 +411,7 @@ pub struct AccessorMetadata { hints: FlagSet, } -impl AccessorMetadata { +impl AccessorInfo { /// [`Scheme`] of backend. pub fn scheme(&self) -> Scheme { self.scheme diff --git a/src/raw/adapters/kv/api.rs b/src/raw/adapters/kv/api.rs index 843e6a07d61..59153e01406 100644 --- a/src/raw/adapters/kv/api.rs +++ b/src/raw/adapters/kv/api.rs @@ -111,7 +111,7 @@ pub struct Metadata { } impl Metadata { - /// Create a new KeyValueAccessorMetadata. + /// Create a new KeyValueAccessorInfo. pub fn new( scheme: Scheme, name: &str, @@ -140,9 +140,9 @@ impl Metadata { } } -impl From for AccessorMetadata { - fn from(m: Metadata) -> AccessorMetadata { - let mut am = AccessorMetadata::default(); +impl From for AccessorInfo { + fn from(m: Metadata) -> AccessorInfo { + let mut am = AccessorInfo::default(); am.set_name(m.name()); am.set_scheme(m.scheme()); am.set_capabilities(m.capabilities()); diff --git a/src/raw/adapters/kv/backend.rs b/src/raw/adapters/kv/backend.rs index 5d34345ca5f..e4953c73fbc 100644 --- a/src/raw/adapters/kv/backend.rs +++ b/src/raw/adapters/kv/backend.rs @@ -57,8 +57,8 @@ impl Accessor for Backend { type Pager = KvPager; type BlockingPager = KvPager; - fn metadata(&self) -> AccessorMetadata { - let mut am: AccessorMetadata = self.kv.metadata().into(); + fn info(&self) -> AccessorInfo { + let mut am: AccessorInfo = self.kv.metadata().into(); am.set_root(&self.root) .set_hints(AccessorHint::ReadStreamable | AccessorHint::ReadSeekable); @@ -83,12 +83,7 @@ impl Accessor for Backend { let bs = match self.kv.get(&p).await? { Some(bs) => bs, - None => { - return Err(Error::new( - ErrorKind::ObjectNotFound, - "kv doesn't have this path", - )) - } + None => return Err(Error::new(ErrorKind::NotFound, "kv doesn't have this path")), }; let bs = self.apply_range(bs, args.range()); @@ -102,12 +97,7 @@ impl Accessor for Backend { let bs = match self.kv.blocking_get(&p)? { Some(bs) => bs, - None => { - return Err(Error::new( - ErrorKind::ObjectNotFound, - "kv doesn't have this path", - )) - } + None => return Err(Error::new(ErrorKind::NotFound, "kv doesn't have this path")), }; let bs = self.apply_range(bs, args.range()); @@ -137,10 +127,7 @@ impl Accessor for Backend { Some(bs) => Ok(RpStat::new( Metadata::new(EntryMode::FILE).with_content_length(bs.len() as u64), )), - None => Err(Error::new( - ErrorKind::ObjectNotFound, - "kv doesn't have this path", - )), + None => Err(Error::new(ErrorKind::NotFound, "kv doesn't have this path")), } } } @@ -156,10 +143,7 @@ impl Accessor for Backend { Some(bs) => Ok(RpStat::new( Metadata::new(EntryMode::FILE).with_content_length(bs.len() as u64), )), - None => Err(Error::new( - ErrorKind::ObjectNotFound, - "kv doesn't have this path", - )), + None => Err(Error::new(ErrorKind::NotFound, "kv doesn't have this path")), } } } diff --git a/src/raw/layer.rs b/src/raw/layer.rs index 8c8531ca832..49222a198e8 100644 --- a/src/raw/layer.rs +++ b/src/raw/layer.rs @@ -144,8 +144,8 @@ pub trait LayeredAccessor: Send + Sync + Debug + Unpin + 'static { fn inner(&self) -> &Self::Inner; - fn metadata(&self) -> AccessorMetadata { - self.inner().metadata() + fn metadata(&self) -> AccessorInfo { + self.inner().info() } async fn create(&self, path: &str, args: OpCreate) -> Result { @@ -206,7 +206,7 @@ impl Accessor for L { type Pager = L::Pager; type BlockingPager = L::BlockingPager; - fn metadata(&self) -> AccessorMetadata { + fn info(&self) -> AccessorInfo { (self as &L).metadata() } @@ -311,8 +311,8 @@ mod tests { type Pager = (); type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { - let mut am = AccessorMetadata::default(); + fn info(&self) -> AccessorInfo { + let mut am = AccessorInfo::default(); am.set_scheme(Scheme::Custom("test")); am } @@ -335,12 +335,12 @@ mod tests { deleted: Arc::new(Mutex::new(false)), }; - let op = Operator::create(Memory::default()) + let op = Operator::new(Memory::default()) .unwrap() .layer(&test) .finish(); - op.object("xxxxx").delete().await.unwrap(); + op.delete("xxxxx").await.unwrap(); assert!(*test.deleted.clone().lock().await); } diff --git a/src/raw/mod.rs b/src/raw/mod.rs index 07f2d134388..37b2f9cfc56 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -27,7 +27,7 @@ mod accessor; pub use accessor::Accessor; pub use accessor::AccessorCapability; pub use accessor::AccessorHint; -pub use accessor::AccessorMetadata; +pub use accessor::AccessorInfo; pub use accessor::FusedAccessor; mod layer; diff --git a/src/raw/oio/entry.rs b/src/raw/oio/entry.rs index 2cc85b1853e..88f216fab48 100644 --- a/src/raw/oio/entry.rs +++ b/src/raw/oio/entry.rs @@ -12,13 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::raw::FusedAccessor; -use crate::EntryMode; -use crate::Metadata; -use crate::Object; +use crate::*; -/// Entry is returned by `Page` or `BlockingPage` -/// during list operations. +/// Entry is returned by `Page` or `BlockingPage` during list operations. +/// +/// # Notes +/// +/// Differences between `crate::Entry` and `oio::Entry`: +/// +/// - `crate::Entry` is the user's public API and have less public methods. +/// - `oio::Entry` is the raw API and doesn't expose to users. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Entry { path: String, @@ -49,7 +52,7 @@ impl Entry { self } - /// Get the path of object entry. + /// Get the path of entry. pub fn path(&self) -> &str { &self.path } @@ -69,8 +72,10 @@ impl Entry { self.meta.mode() } - /// Consume to convert into an object. - pub(crate) fn into_object(self, acc: FusedAccessor) -> Object { - Object::with(acc, &self.path, Some(self.meta)) + /// Consume self to convert into an Entry. + /// + /// NOTE: implement this by hand to avoid leadking raw entry to endusers. + pub(crate) fn into_entry(self) -> crate::Entry { + crate::Entry::new(self.path, self.meta) } } diff --git a/src/raw/oio/into_reader/by_range.rs b/src/raw/oio/into_reader/by_range.rs index 67cc7db0fcb..3157f853a5f 100644 --- a/src/raw/oio/into_reader/by_range.rs +++ b/src/raw/oio/into_reader/by_range.rs @@ -318,8 +318,8 @@ mod tests { type Pager = (); type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { - let mut am = AccessorMetadata::default(); + fn info(&self) -> AccessorInfo { + let mut am = AccessorInfo::default(); am.set_capabilities(AccessorCapability::Read); am diff --git a/src/raw/oio/to_flat_pager.rs b/src/raw/oio/to_flat_pager.rs index 2ccc423e7b0..77ad1287ef6 100644 --- a/src/raw/oio/to_flat_pager.rs +++ b/src/raw/oio/to_flat_pager.rs @@ -25,7 +25,7 @@ use crate::*; pub fn to_flat_pager(acc: A, path: &str, size: usize) -> ToFlatPager { #[cfg(debug_assertions)] { - let meta = acc.metadata(); + let meta = acc.info(); debug_assert!( !meta.capabilities().contains(AccessorCapability::Scan), "service already supports scan, call to_flat_pager must be a mistake" @@ -245,8 +245,8 @@ mod tests { type Pager = (); type BlockingPager = MockPager; - fn metadata(&self) -> AccessorMetadata { - let mut am = AccessorMetadata::default(); + fn info(&self) -> AccessorInfo { + let mut am = AccessorInfo::default(); am.set_capabilities(AccessorCapability::List); am diff --git a/src/raw/operation.rs b/src/raw/operation.rs index 10aeb88176e..d5f53b241ea 100644 --- a/src/raw/operation.rs +++ b/src/raw/operation.rs @@ -19,8 +19,8 @@ use std::fmt::Formatter; #[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] #[non_exhaustive] pub enum Operation { - /// Operation for [`crate::raw::Accessor::metadata`] - Metadata, + /// Operation for [`crate::raw::Accessor::info`] + Info, /// Operation for [`crate::raw::Accessor::create`] Create, /// Operation for [`crate::raw::Accessor::read`] @@ -64,7 +64,7 @@ impl Operation { impl Default for Operation { fn default() -> Self { - Operation::Metadata + Operation::Info } } @@ -77,7 +77,7 @@ impl Display for Operation { impl From for &'static str { fn from(v: Operation) -> &'static str { match v { - Operation::Metadata => "metadata", + Operation::Info => "metadata", Operation::Create => "create", Operation::Read => "read", Operation::Write => "write", diff --git a/src/services/azblob/backend.rs b/src/services/azblob/backend.rs index 571dc5d20b4..3d63c74debf 100644 --- a/src/services/azblob/backend.rs +++ b/src/services/azblob/backend.rs @@ -82,7 +82,6 @@ const X_MS_BLOB_TYPE: &str = "x-ms-blob-type"; /// /// use anyhow::Result; /// use opendal::services::Azblob; -/// use opendal::Object; /// use opendal::Operator; /// /// #[tokio::main] @@ -110,10 +109,7 @@ const X_MS_BLOB_TYPE: &str = "x-ms-blob-type"; /// builder.account_key("Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="); /// /// // `Accessor` provides the low level APIs, we will use `Operator` normally. -/// let op: Operator = Operator::create(builder)?.finish(); -/// -/// // Create an object handle to start operation on object. -/// let _: Object = op.object("test_file"); +/// let op: Operator = Operator::new(builder)?.finish(); /// /// Ok(()) /// } @@ -282,7 +278,7 @@ impl AzblobBuilder { } else { let account_name = conn_map.get("AccountName").ok_or_else(|| { Error::new( - ErrorKind::BackendConfigInvalid, + ErrorKind::ConfigInvalid, "connection string must have AccountName", ) .with_operation("Builder::from_connection_string") @@ -290,7 +286,7 @@ impl AzblobBuilder { builder.account_name(account_name); let account_key = conn_map.get("AccountKey").ok_or_else(|| { Error::new( - ErrorKind::BackendConfigInvalid, + ErrorKind::ConfigInvalid, "connection string must have AccountKey", ) .with_operation("Builder::from_connection_string") @@ -307,7 +303,7 @@ impl AzblobBuilder { .as_ref() .ok_or_else(|| { Error::new( - ErrorKind::BackendConfigInvalid, + ErrorKind::ConfigInvalid, "connection string must have AccountName", ) .with_operation("Builder::from_connection_string") @@ -346,21 +342,17 @@ impl Builder for AzblobBuilder { // Handle endpoint, region and container name. let container = match self.container.is_empty() { false => Ok(&self.container), - true => Err( - Error::new(ErrorKind::BackendConfigInvalid, "container is empty") - .with_operation("Builder::build") - .with_context("service", Scheme::Azblob), - ), + true => Err(Error::new(ErrorKind::ConfigInvalid, "container is empty") + .with_operation("Builder::build") + .with_context("service", Scheme::Azblob)), }?; debug!("backend use container {}", &container); let endpoint = match &self.endpoint { Some(endpoint) => Ok(endpoint.clone()), - None => Err( - Error::new(ErrorKind::BackendConfigInvalid, "endpoint is empty") - .with_operation("Builder::build") - .with_context("service", Scheme::Azblob), - ), + None => Err(Error::new(ErrorKind::ConfigInvalid, "endpoint is empty") + .with_operation("Builder::build") + .with_context("service", Scheme::Azblob)), }?; debug!("backend use endpoint {}", &container); @@ -381,7 +373,7 @@ impl Builder for AzblobBuilder { } let signer = signer_builder.build().map_err(|e| { - Error::new(ErrorKind::BackendConfigInvalid, "build AzureStorageSigner") + Error::new(ErrorKind::ConfigInvalid, "build AzureStorageSigner") .with_operation("Builder::build") .with_context("service", Scheme::Azblob) .with_context("endpoint", &endpoint) @@ -422,11 +414,11 @@ impl Accessor for AzblobBackend { type Pager = AzblobPager; type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { + fn info(&self) -> AccessorInfo { use AccessorCapability::*; use AccessorHint::*; - let mut am = AccessorMetadata::default(); + let mut am = AccessorInfo::default(); am.set_scheme(Scheme::Azblob) .set_root(&self.root) .set_name(&self.container) diff --git a/src/services/azblob/error.rs b/src/services/azblob/error.rs index af7d0eb6382..54a40147e3d 100644 --- a/src/services/azblob/error.rs +++ b/src/services/azblob/error.rs @@ -63,8 +63,8 @@ pub async fn parse_error(resp: Response) -> Result { let bs = body.bytes().await?; let (kind, retryable) = match parts.status { - StatusCode::NOT_FOUND => (ErrorKind::ObjectNotFound, false), - StatusCode::FORBIDDEN => (ErrorKind::ObjectPermissionDenied, false), + StatusCode::NOT_FOUND => (ErrorKind::NotFound, false), + StatusCode::FORBIDDEN => (ErrorKind::PermissionDenied, false), StatusCode::INTERNAL_SERVER_ERROR | StatusCode::BAD_GATEWAY | StatusCode::SERVICE_UNAVAILABLE diff --git a/src/services/azdfs/backend.rs b/src/services/azdfs/backend.rs index 9db75d7fa04..e6033bb2a92 100644 --- a/src/services/azdfs/backend.rs +++ b/src/services/azdfs/backend.rs @@ -74,7 +74,6 @@ use crate::*; /// /// use anyhow::Result; /// use opendal::services::Azdfs; -/// use opendal::Object; /// use opendal::Operator; /// /// #[tokio::main] @@ -101,10 +100,7 @@ use crate::*; /// builder.account_key("account_key"); /// /// // `Accessor` provides the low level APIs, we will use `Operator` normally. -/// let op: Operator = Operator::create(builder)?.finish(); -/// -/// // Create an object handle to start operation on object. -/// let _: Object = op.object("test_file"); +/// let op: Operator = Operator::new(builder)?.finish(); /// /// Ok(()) /// } @@ -221,21 +217,17 @@ impl Builder for AzdfsBuilder { // Handle endpoint, region and container name. let filesystem = match self.filesystem.is_empty() { false => Ok(&self.filesystem), - true => Err( - Error::new(ErrorKind::BackendConfigInvalid, "filesystem is empty") - .with_operation("Builder::build") - .with_context("service", Scheme::Azdfs), - ), + true => Err(Error::new(ErrorKind::ConfigInvalid, "filesystem is empty") + .with_operation("Builder::build") + .with_context("service", Scheme::Azdfs)), }?; debug!("backend use filesystem {}", &filesystem); let endpoint = match &self.endpoint { Some(endpoint) => Ok(endpoint.clone()), - None => Err( - Error::new(ErrorKind::BackendConfigInvalid, "endpoint is empty") - .with_operation("Builder::build") - .with_context("service", Scheme::Azdfs), - ), + None => Err(Error::new(ErrorKind::ConfigInvalid, "endpoint is empty") + .with_operation("Builder::build") + .with_context("service", Scheme::Azdfs)), }?; debug!("backend use endpoint {}", &filesystem); @@ -254,7 +246,7 @@ impl Builder for AzdfsBuilder { } let signer = signer_builder.build().map_err(|e| { - Error::new(ErrorKind::BackendConfigInvalid, "build AzureStorageSigner") + Error::new(ErrorKind::ConfigInvalid, "build AzureStorageSigner") .with_operation("Builder::build") .with_context("service", Scheme::Azdfs) .with_context("endpoint", &endpoint) @@ -307,8 +299,8 @@ impl Accessor for AzdfsBackend { type Pager = AzdfsPager; type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { - let mut am = AccessorMetadata::default(); + fn info(&self) -> AccessorInfo { + let mut am = AccessorInfo::default(); am.set_scheme(Scheme::Azdfs) .set_root(&self.root) .set_name(&self.filesystem) diff --git a/src/services/azdfs/error.rs b/src/services/azdfs/error.rs index a90a9a9c5ba..0b24199f97a 100644 --- a/src/services/azdfs/error.rs +++ b/src/services/azdfs/error.rs @@ -63,8 +63,8 @@ pub async fn parse_error(resp: Response) -> Result { let bs = body.bytes().await?; let (kind, retryable) = match parts.status { - StatusCode::NOT_FOUND => (ErrorKind::ObjectNotFound, false), - StatusCode::FORBIDDEN => (ErrorKind::ObjectPermissionDenied, false), + StatusCode::NOT_FOUND => (ErrorKind::NotFound, false), + StatusCode::FORBIDDEN => (ErrorKind::PermissionDenied, false), StatusCode::INTERNAL_SERVER_ERROR | StatusCode::BAD_GATEWAY | StatusCode::SERVICE_UNAVAILABLE diff --git a/src/services/dashmap/backend.rs b/src/services/dashmap/backend.rs index 8c4c76fed9e..1c224f174e8 100644 --- a/src/services/dashmap/backend.rs +++ b/src/services/dashmap/backend.rs @@ -109,9 +109,9 @@ mod tests { #[test] fn test_accessor_metadata_name() { let b1 = DashmapBuilder::default().build().unwrap(); - assert_eq!(b1.metadata().name(), b1.metadata().name()); + assert_eq!(b1.info().name(), b1.info().name()); let b2 = DashmapBuilder::default().build().unwrap(); - assert_ne!(b1.metadata().name(), b2.metadata().name()) + assert_ne!(b1.info().name(), b2.info().name()) } } diff --git a/src/services/fs/backend.rs b/src/services/fs/backend.rs index b21dd07d007..8a2a99adb1e 100644 --- a/src/services/fs/backend.rs +++ b/src/services/fs/backend.rs @@ -61,7 +61,6 @@ use crate::*; /// /// use anyhow::Result; /// use opendal::services::Fs; -/// use opendal::Object; /// use opendal::Operator; /// /// #[tokio::main] @@ -74,10 +73,7 @@ use crate::*; /// builder.root("/tmp"); /// /// // `Accessor` provides the low level APIs, we will use `Operator` normally. -/// let op: Operator = Operator::create(builder)?.finish(); -/// -/// // Create an object handle to start operation on object. -/// let _: Object = op.object("test_file"); +/// let op: Operator = Operator::new(builder)?.finish(); /// /// Ok(()) /// } @@ -145,7 +141,7 @@ impl Builder for FsBuilder { let root = match self.root.take() { Some(root) => Ok(root), None => Err(Error::new( - ErrorKind::BackendConfigInvalid, + ErrorKind::ConfigInvalid, "root is not specified", )), }?; @@ -295,8 +291,8 @@ impl Accessor for FsBackend { type Pager = Option>; type BlockingPager = Option>; - fn metadata(&self) -> AccessorMetadata { - let mut am = AccessorMetadata::default(); + fn info(&self) -> AccessorInfo { + let mut am = AccessorInfo::default(); am.set_scheme(Scheme::Fs) .set_root(&self.root.to_string_lossy()) .set_capabilities( @@ -372,13 +368,13 @@ impl Accessor for FsBackend { let meta = f.metadata().await.map_err(parse_io_error)?; if meta.is_dir() != path.ends_with('/') { return Err(Error::new( - ErrorKind::ObjectNotFound, + ErrorKind::NotFound, "file mode is not match with its path", )); } if meta.is_dir() { return Err(Error::new( - ErrorKind::ObjectIsADirectory, + ErrorKind::IsADirectory, "given path is a directory", )); } @@ -449,7 +445,7 @@ impl Accessor for FsBackend { if self.enable_path_check && meta.is_dir() != path.ends_with('/') { return Err(Error::new( - ErrorKind::ObjectNotFound, + ErrorKind::NotFound, "file mode is not match with its path", )); } @@ -561,13 +557,13 @@ impl Accessor for FsBackend { let meta = f.metadata().map_err(parse_io_error)?; if meta.is_dir() != path.ends_with('/') { return Err(Error::new( - ErrorKind::ObjectNotFound, + ErrorKind::NotFound, "file mode is not match with its path", )); } if meta.is_dir() { return Err(Error::new( - ErrorKind::ObjectIsADirectory, + ErrorKind::IsADirectory, "given path is a directory", )); } @@ -635,7 +631,7 @@ impl Accessor for FsBackend { if self.enable_path_check && meta.is_dir() != path.ends_with('/') { return Err(Error::new( - ErrorKind::ObjectNotFound, + ErrorKind::NotFound, "file mode is not match with its path", )); } diff --git a/src/services/fs/error.rs b/src/services/fs/error.rs index d64f609ee7a..bdaa4fa2fc0 100644 --- a/src/services/fs/error.rs +++ b/src/services/fs/error.rs @@ -22,8 +22,8 @@ pub fn parse_io_error(err: io::Error) -> Error { use io::ErrorKind::*; let (kind, retryable) = match err.kind() { - NotFound => (ErrorKind::ObjectNotFound, false), - PermissionDenied => (ErrorKind::ObjectPermissionDenied, false), + NotFound => (ErrorKind::NotFound, false), + PermissionDenied => (ErrorKind::PermissionDenied, false), Interrupted | UnexpectedEof | TimedOut | WouldBlock => (ErrorKind::Unexpected, true), _ => (ErrorKind::Unexpected, true), }; diff --git a/src/services/ftp/backend.rs b/src/services/ftp/backend.rs index 901dbb88039..9a34c9767fd 100644 --- a/src/services/ftp/backend.rs +++ b/src/services/ftp/backend.rs @@ -83,7 +83,7 @@ use crate::*; /// /// builder.endpoint("127.0.0.1"); /// -/// let op: Operator = Operator::create(builder)?.finish(); +/// let op: Operator = Operator::new(builder)?.finish(); /// let _obj: Object = op.object("test_file"); /// Ok(()) /// } @@ -158,22 +158,15 @@ impl Builder for FtpBuilder { fn build(&mut self) -> Result { debug!("ftp backend build started: {:?}", &self); let endpoint = match &self.endpoint { - None => { - return Err(Error::new( - ErrorKind::BackendConfigInvalid, - "endpoint is empty", - )) - } + None => return Err(Error::new(ErrorKind::ConfigInvalid, "endpoint is empty")), Some(v) => v, }; let endpoint_uri = match endpoint.parse::() { Err(e) => { - return Err( - Error::new(ErrorKind::BackendConfigInvalid, "endpoint is invalid") - .with_context("endpoint", endpoint) - .set_source(e), - ); + return Err(Error::new(ErrorKind::ConfigInvalid, "endpoint is invalid") + .with_context("endpoint", endpoint) + .set_source(e)); } Ok(uri) => uri, }; @@ -191,7 +184,7 @@ impl Builder for FtpBuilder { Some(s) => { return Err(Error::new( - ErrorKind::BackendConfigInvalid, + ErrorKind::ConfigInvalid, "endpoint is unsupported or invalid", ) .with_context("endpoint", s)); @@ -318,8 +311,8 @@ impl Accessor for FtpBackend { type Pager = FtpPager; type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { - let mut am = AccessorMetadata::default(); + fn info(&self) -> AccessorInfo { + let mut am = AccessorInfo::default(); am.set_scheme(Scheme::Ftp) .set_root(&self.root) .set_capabilities( @@ -511,7 +504,7 @@ impl FtpBackend { if files.is_empty() { Err(Error::new( - ErrorKind::ObjectNotFound, + ErrorKind::NotFound, "file is not found during list", )) } else { @@ -551,6 +544,6 @@ mod build_test { let b = builder.build(); assert!(b.is_err()); let e = b.unwrap_err(); - assert_eq!(e.kind(), ErrorKind::BackendConfigInvalid); + assert_eq!(e.kind(), ErrorKind::ConfigInvalid); } } diff --git a/src/services/ftp/err.rs b/src/services/ftp/err.rs index 6aeacca609c..6ac634f35c6 100644 --- a/src/services/ftp/err.rs +++ b/src/services/ftp/err.rs @@ -28,7 +28,7 @@ impl From for Error { (ErrorKind::Unexpected, true) } FtpError::UnexpectedResponse(ref resp) if resp.status == Status::FileUnavailable => { - (ErrorKind::ObjectNotFound, false) + (ErrorKind::NotFound, false) } // Allow retry bad response. FtpError::BadResponse => (ErrorKind::Unexpected, true), diff --git a/src/services/gcs/backend.rs b/src/services/gcs/backend.rs index a5bb4f5f383..be632f5aaa6 100644 --- a/src/services/gcs/backend.rs +++ b/src/services/gcs/backend.rs @@ -71,7 +71,6 @@ const DEFAULT_GCS_SCOPE: &str = "https://www.googleapis.com/auth/devstorage.read /// ```no_run /// use anyhow::Result; /// use opendal::services::Gcs; -/// use opendal::Object; /// use opendal::Operator; /// /// #[tokio::main] @@ -87,8 +86,7 @@ const DEFAULT_GCS_SCOPE: &str = "https://www.googleapis.com/auth/devstorage.read /// // set the credentials for GCS OAUTH2 authentication /// builder.credential("authentication token"); /// -/// let op: Operator = Operator::create(builder)?.finish(); -/// let _: Object = op.object("test_file"); +/// let op: Operator = Operator::new(builder)?.finish(); /// Ok(()) /// } /// ``` @@ -251,11 +249,9 @@ impl Builder for GcsBuilder { // Handle endpoint and bucket name let bucket = match self.bucket.is_empty() { false => Ok(&self.bucket), - true => Err( - Error::new(ErrorKind::BackendConfigInvalid, "bucket is empty") - .with_operation("Builder::build") - .with_context("service", Scheme::Gcs), - ), + true => Err(Error::new(ErrorKind::ConfigInvalid, "bucket is empty") + .with_operation("Builder::build") + .with_context("service", Scheme::Gcs)), }?; // TODO: server side encryption @@ -296,7 +292,7 @@ impl Builder for GcsBuilder { signer_builder.credential_path(cred); } let signer = signer_builder.build().map_err(|e| { - Error::new(ErrorKind::BackendConfigInvalid, "build GoogleSigner") + Error::new(ErrorKind::ConfigInvalid, "build GoogleSigner") .with_operation("Builder::build") .with_context("service", Scheme::Gcs) .with_context("bucket", bucket) @@ -351,11 +347,11 @@ impl Accessor for GcsBackend { type Pager = GcsPager; type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { + fn info(&self) -> AccessorInfo { use AccessorCapability::*; use AccessorHint::*; - let mut am = AccessorMetadata::default(); + let mut am = AccessorInfo::default(); am.set_scheme(Scheme::Gcs) .set_root(&self.root) .set_name(&self.bucket) diff --git a/src/services/gcs/error.rs b/src/services/gcs/error.rs index 709e72fd6dd..6a7df6af378 100644 --- a/src/services/gcs/error.rs +++ b/src/services/gcs/error.rs @@ -52,8 +52,8 @@ pub async fn parse_error(resp: Response) -> Result { let bs = body.bytes().await?; let (kind, retryable) = match parts.status { - StatusCode::NOT_FOUND => (ErrorKind::ObjectNotFound, false), - StatusCode::FORBIDDEN => (ErrorKind::ObjectPermissionDenied, false), + StatusCode::NOT_FOUND => (ErrorKind::NotFound, false), + StatusCode::FORBIDDEN => (ErrorKind::PermissionDenied, false), StatusCode::INTERNAL_SERVER_ERROR | StatusCode::BAD_GATEWAY | StatusCode::SERVICE_UNAVAILABLE diff --git a/src/services/ghac/backend.rs b/src/services/ghac/backend.rs index 46f147fde82..ca47f16edff 100644 --- a/src/services/ghac/backend.rs +++ b/src/services/ghac/backend.rs @@ -124,7 +124,6 @@ const GITHUB_API_VERSION: &str = "2022-11-28"; /// /// use anyhow::Result; /// use opendal::services::Ghac; -/// use opendal::Object; /// use opendal::Operator; /// /// #[tokio::main] @@ -136,10 +135,7 @@ const GITHUB_API_VERSION: &str = "2022-11-28"; /// // NOTE: the root must be absolute path. /// builder.root("/path/to/dir"); /// -/// let op: Operator = Operator::create(builder)?.finish(); -/// -/// // Create an object handle to start operation on object. -/// let _: Object = op.object("test_file"); +/// let op: Operator = Operator::new(builder)?.finish(); /// /// Ok(()) /// } @@ -238,7 +234,7 @@ impl Builder for GhacBuilder { cache_url: env::var(ACTIONS_CACHE_URL).map_err(|err| { Error::new( - ErrorKind::BackendConfigInvalid, + ErrorKind::ConfigInvalid, "ACTIONS_CACHE_URL not found, maybe not in github action environment?", ) .with_operation("Builder::build") @@ -246,7 +242,7 @@ impl Builder for GhacBuilder { })?, catch_token: env::var(ACTIONS_RUNTIME_TOKEN).map_err(|err| { Error::new( - ErrorKind::BackendConfigInvalid, + ErrorKind::ConfigInvalid, "ACTIONS_RUNTIME_TOKEN not found, maybe not in github action environment?", ) .with_operation("Builder::build") @@ -296,8 +292,8 @@ impl Accessor for GhacBackend { type Pager = (); type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { - let mut am = AccessorMetadata::default(); + fn info(&self) -> AccessorInfo { + let mut am = AccessorInfo::default(); am.set_scheme(Scheme::Ghac) .set_root(&self.root) .set_name(&self.version) @@ -460,7 +456,7 @@ impl Accessor for GhacBackend { async fn delete(&self, path: &str, _: OpDelete) -> Result { if self.api_token.is_empty() { return Err(Error::new( - ErrorKind::ObjectPermissionDenied, + ErrorKind::PermissionDenied, "github token is not configured, delete is permission denied", )); } diff --git a/src/services/ghac/error.rs b/src/services/ghac/error.rs index 90d4f7d7c8a..275e3fd79aa 100644 --- a/src/services/ghac/error.rs +++ b/src/services/ghac/error.rs @@ -25,10 +25,10 @@ pub async fn parse_error(resp: Response) -> Result { let (parts, body) = resp.into_parts(); let (kind, retryable) = match parts.status { - StatusCode::NOT_FOUND | StatusCode::NO_CONTENT => (ErrorKind::ObjectNotFound, false), - StatusCode::CONFLICT => (ErrorKind::ObjectAlreadyExists, false), - StatusCode::FORBIDDEN => (ErrorKind::ObjectPermissionDenied, false), - StatusCode::TOO_MANY_REQUESTS => (ErrorKind::ObjectRateLimited, true), + StatusCode::NOT_FOUND | StatusCode::NO_CONTENT => (ErrorKind::NotFound, false), + StatusCode::CONFLICT => (ErrorKind::AlreadyExists, false), + StatusCode::FORBIDDEN => (ErrorKind::PermissionDenied, false), + StatusCode::TOO_MANY_REQUESTS => (ErrorKind::RateLimited, true), StatusCode::INTERNAL_SERVER_ERROR | StatusCode::BAD_GATEWAY | StatusCode::SERVICE_UNAVAILABLE diff --git a/src/services/hdfs/backend.rs b/src/services/hdfs/backend.rs index 9182cd08687..a5e3989bdb4 100644 --- a/src/services/hdfs/backend.rs +++ b/src/services/hdfs/backend.rs @@ -116,7 +116,7 @@ use crate::*; /// builder.root("/tmp"); /// /// // `Accessor` provides the low level APIs, we will use `Operator` normally. -/// let op: Operator = Operator::create(builder)?.finish(); +/// let op: Operator = Operator::new(builder)?.finish(); /// /// // Create an object handle to start operation on object. /// let _: Object = op.object("test_file"); @@ -179,10 +179,8 @@ impl Builder for HdfsBuilder { let name_node = match &self.name_node { Some(v) => v, None => { - return Err( - Error::new(ErrorKind::BackendConfigInvalid, "name node is empty") - .with_context("service", Scheme::Hdfs), - ) + return Err(Error::new(ErrorKind::ConfigInvalid, "name node is empty") + .with_context("service", Scheme::Hdfs)) } }; @@ -228,8 +226,8 @@ impl Accessor for HdfsBackend { type Pager = Option; type BlockingPager = Option; - fn metadata(&self) -> AccessorMetadata { - let mut am = AccessorMetadata::default(); + fn info(&self) -> AccessorInfo { + let mut am = AccessorInfo::default(); am.set_scheme(Scheme::Hdfs) .set_root(&self.root) .set_capabilities( diff --git a/src/services/hdfs/error.rs b/src/services/hdfs/error.rs index ff2d1e78dde..bb5cfc6db4a 100644 --- a/src/services/hdfs/error.rs +++ b/src/services/hdfs/error.rs @@ -26,8 +26,8 @@ pub fn parse_io_error(err: io::Error) -> Error { use io::ErrorKind::*; let (kind, retryable) = match err.kind() { - NotFound => (ErrorKind::ObjectNotFound, false), - PermissionDenied => (ErrorKind::ObjectPermissionDenied, false), + NotFound => (ErrorKind::NotFound, false), + PermissionDenied => (ErrorKind::PermissionDenied, false), Interrupted | UnexpectedEof | TimedOut | WouldBlock => (ErrorKind::Unexpected, true), _ => (ErrorKind::Unexpected, true), }; diff --git a/src/services/http/backend.rs b/src/services/http/backend.rs index 20e18dfc770..b2ba4a30cd0 100644 --- a/src/services/http/backend.rs +++ b/src/services/http/backend.rs @@ -60,7 +60,6 @@ use crate::*; /// ```no_run /// use anyhow::Result; /// use opendal::services::Http; -/// use opendal::Object; /// use opendal::Operator; /// /// #[tokio::main] @@ -70,8 +69,7 @@ use crate::*; /// /// builder.endpoint("127.0.0.1"); /// -/// let op: Operator = Operator::create(builder)?.finish(); -/// let _obj: Object = op.object("test_file"); +/// let op: Operator = Operator::new(builder)?.finish(); /// Ok(()) /// } /// ``` @@ -184,10 +182,8 @@ impl Builder for HttpBuilder { let endpoint = match &self.endpoint { Some(v) => v, None => { - return Err( - Error::new(ErrorKind::BackendConfigInvalid, "endpoint is empty") - .with_context("service", Scheme::Http), - ) + return Err(Error::new(ErrorKind::ConfigInvalid, "endpoint is empty") + .with_context("service", Scheme::Http)) } }; @@ -253,8 +249,8 @@ impl Accessor for HttpBackend { type Pager = (); type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { - let mut ma = AccessorMetadata::default(); + fn info(&self) -> AccessorInfo { + let mut ma = AccessorInfo::default(); ma.set_scheme(Scheme::Http) .set_root(&self.root) .set_capabilities(AccessorCapability::Read) @@ -373,9 +369,9 @@ mod tests { let mut builder = HttpBuilder::default(); builder.endpoint(&mock_server.uri()); builder.root("/"); - let op = Operator::create(builder)?.finish(); + let op = Operator::new(builder)?.finish(); - let bs = op.object("hello").read().await?; + let bs = op.read("hello").await?; assert_eq!(bs, b"Hello, World!"); Ok(()) @@ -403,9 +399,9 @@ mod tests { builder.endpoint(&mock_server.uri()); builder.root("/"); builder.username(username).password(password); - let op = Operator::create(builder)?.finish(); + let op = Operator::new(builder)?.finish(); - let bs = op.object("hello").read().await?; + let bs = op.read("hello").await?; assert_eq!(bs, b"Hello, World!"); Ok(()) @@ -433,9 +429,9 @@ mod tests { builder.endpoint(&mock_server.uri()); builder.root("/"); builder.token(token); - let op = Operator::create(builder)?.finish(); + let op = Operator::new(builder)?.finish(); - let bs = op.object("hello").read().await?; + let bs = op.read("hello").await?; assert_eq!(bs, b"Hello, World!"); Ok(()) @@ -455,10 +451,8 @@ mod tests { let mut builder = HttpBuilder::default(); builder.endpoint(&mock_server.uri()); builder.root("/"); - let op = Operator::create(builder)?.finish(); - - let o = op.object("hello"); - let bs = o.stat().await?; + let op = Operator::new(builder)?.finish(); + let bs = op.stat("hello").await?; assert_eq!(bs.mode(), EntryMode::FILE); assert_eq!(bs.content_length(), 128); diff --git a/src/services/http/error.rs b/src/services/http/error.rs index 42805ed3408..672b504d11d 100644 --- a/src/services/http/error.rs +++ b/src/services/http/error.rs @@ -26,8 +26,8 @@ pub async fn parse_error(resp: Response) -> Result { let bs = body.bytes().await?; let (kind, retryable) = match parts.status { - StatusCode::NOT_FOUND => (ErrorKind::ObjectNotFound, false), - StatusCode::FORBIDDEN => (ErrorKind::ObjectPermissionDenied, false), + StatusCode::NOT_FOUND => (ErrorKind::NotFound, false), + StatusCode::FORBIDDEN => (ErrorKind::PermissionDenied, false), StatusCode::INTERNAL_SERVER_ERROR | StatusCode::BAD_GATEWAY | StatusCode::SERVICE_UNAVAILABLE diff --git a/src/services/ipfs/backend.rs b/src/services/ipfs/backend.rs index c00b712024c..d1ec2a354d8 100644 --- a/src/services/ipfs/backend.rs +++ b/src/services/ipfs/backend.rs @@ -70,7 +70,7 @@ use crate::*; /// // set the root for OpenDAL /// builder.root("/ipfs/QmPpCt1aYGb9JWJRmXRUnmJtVgeFFTJGzWFYEEX7bo9zGJ"); /// -/// let op: Operator = Operator::create(builder)?.finish(); +/// let op: Operator = Operator::new(builder)?.finish(); /// /// // Create an object handle to start operation on object. /// let _: Object = op.object("test_file"); @@ -151,7 +151,7 @@ impl Builder for IpfsBuilder { let root = normalize_root(&self.root.take().unwrap_or_default()); if !root.starts_with("/ipfs/") && !root.starts_with("/ipns/") { return Err(Error::new( - ErrorKind::BackendConfigInvalid, + ErrorKind::ConfigInvalid, "root must start with /ipfs/ or /ipns/", ) .with_context("service", Scheme::Ipfs) @@ -161,11 +161,9 @@ impl Builder for IpfsBuilder { let endpoint = match &self.endpoint { Some(endpoint) => Ok(endpoint.clone()), - None => Err( - Error::new(ErrorKind::BackendConfigInvalid, "endpoint is empty") - .with_context("service", Scheme::Ipfs) - .with_context("root", &root), - ), + None => Err(Error::new(ErrorKind::ConfigInvalid, "endpoint is empty") + .with_context("service", Scheme::Ipfs) + .with_context("root", &root)), }?; debug!("backend use endpoint {}", &endpoint); @@ -214,8 +212,8 @@ impl Accessor for IpfsBackend { type Pager = DirStream; type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { - let mut ma = AccessorMetadata::default(); + fn info(&self) -> AccessorInfo { + let mut ma = AccessorInfo::default(); ma.set_scheme(Scheme::Ipfs) .set_root(&self.root) .set_capabilities(AccessorCapability::Read | AccessorCapability::List) diff --git a/src/services/ipfs/error.rs b/src/services/ipfs/error.rs index 52d80f49543..bdd8914d5c0 100644 --- a/src/services/ipfs/error.rs +++ b/src/services/ipfs/error.rs @@ -26,8 +26,8 @@ pub async fn parse_error(resp: Response) -> Result { let bs = body.bytes().await?; let (kind, retryable) = match parts.status { - StatusCode::NOT_FOUND => (ErrorKind::ObjectNotFound, false), - StatusCode::FORBIDDEN => (ErrorKind::ObjectPermissionDenied, false), + StatusCode::NOT_FOUND => (ErrorKind::NotFound, false), + StatusCode::FORBIDDEN => (ErrorKind::PermissionDenied, false), StatusCode::INTERNAL_SERVER_ERROR | StatusCode::BAD_GATEWAY | StatusCode::SERVICE_UNAVAILABLE diff --git a/src/services/ipmfs/backend.rs b/src/services/ipmfs/backend.rs index 04a0a196413..11d8624ee8b 100644 --- a/src/services/ipmfs/backend.rs +++ b/src/services/ipmfs/backend.rs @@ -66,8 +66,8 @@ impl Accessor for IpmfsBackend { type Pager = IpmfsPager; type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { - let mut am = AccessorMetadata::default(); + fn info(&self) -> AccessorInfo { + let mut am = AccessorInfo::default(); am.set_scheme(Scheme::Ipmfs) .set_root(&self.root) .set_capabilities( diff --git a/src/services/ipmfs/builder.rs b/src/services/ipmfs/builder.rs index d8cc64caf90..5c22d03e9c1 100644 --- a/src/services/ipmfs/builder.rs +++ b/src/services/ipmfs/builder.rs @@ -47,7 +47,6 @@ use crate::*; /// ```no_run /// use anyhow::Result; /// use opendal::services::Ipmfs; -/// use opendal::Object; /// use opendal::Operator; /// /// #[tokio::main] @@ -58,10 +57,7 @@ use crate::*; /// // set the storage bucket for OpenDAL /// builder.endpoint("http://127.0.0.1:5001"); /// -/// let op: Operator = Operator::create(builder)?.finish(); -/// -/// // Create an object handle to start operation on object. -/// let _: Object = op.object("test_file"); +/// let op: Operator = Operator::new(builder)?.finish(); /// /// Ok(()) /// } diff --git a/src/services/ipmfs/error.rs b/src/services/ipmfs/error.rs index 9cea226d1a6..764660282d2 100644 --- a/src/services/ipmfs/error.rs +++ b/src/services/ipmfs/error.rs @@ -52,7 +52,7 @@ pub async fn parse_error(resp: Response) -> Result { StatusCode::INTERNAL_SERVER_ERROR => { if let Some(ie) = &ipfs_error { match ie.message.as_str() { - "file does not exist" => (ErrorKind::ObjectNotFound, false), + "file does not exist" => (ErrorKind::NotFound, false), _ => (ErrorKind::Unexpected, false), } } else { diff --git a/src/services/memcached/backend.rs b/src/services/memcached/backend.rs index 2a5afb30c6f..408c4e8d156 100644 --- a/src/services/memcached/backend.rs +++ b/src/services/memcached/backend.rs @@ -62,7 +62,7 @@ use crate::*; /// /// builder.endpoint("tcp://127.0.0.1:11211"); /// -/// let op: Operator = Operator::create(builder)?.finish(); +/// let op: Operator = Operator::new(builder)?.finish(); /// let _: Object = op.object("test_file"); /// Ok(()) /// } @@ -124,11 +124,11 @@ impl Builder for MemcachedBuilder { fn build(&mut self) -> Result { let endpoint = self.endpoint.clone().ok_or_else(|| { - Error::new(ErrorKind::BackendConfigInvalid, "endpoint is empty") + Error::new(ErrorKind::ConfigInvalid, "endpoint is empty") .with_context("service", Scheme::Memcached) })?; let uri = http::Uri::try_from(&endpoint).map_err(|err| { - Error::new(ErrorKind::BackendConfigInvalid, "endpoint is invalid") + Error::new(ErrorKind::ConfigInvalid, "endpoint is invalid") .with_context("service", Scheme::Memcached) .with_context("endpoint", &endpoint) .set_source(err) @@ -141,7 +141,7 @@ impl Builder for MemcachedBuilder { // We only support tcp by now. if scheme != "tcp" { return Err(Error::new( - ErrorKind::BackendConfigInvalid, + ErrorKind::ConfigInvalid, "endpoint is using invalid scheme", ) .with_context("service", Scheme::Memcached) @@ -154,22 +154,20 @@ impl Builder for MemcachedBuilder { let host = if let Some(host) = uri.host() { host.to_string() } else { - return Err(Error::new( - ErrorKind::BackendConfigInvalid, - "endpoint doesn't have host", - ) - .with_context("service", Scheme::Memcached) - .with_context("endpoint", &endpoint)); + return Err( + Error::new(ErrorKind::ConfigInvalid, "endpoint doesn't have host") + .with_context("service", Scheme::Memcached) + .with_context("endpoint", &endpoint), + ); }; let port = if let Some(port) = uri.port_u16() { port } else { - return Err(Error::new( - ErrorKind::BackendConfigInvalid, - "endpoint doesn't have port", - ) - .with_context("service", Scheme::Memcached) - .with_context("endpoint", &endpoint)); + return Err( + Error::new(ErrorKind::ConfigInvalid, "endpoint doesn't have port") + .with_context("service", Scheme::Memcached) + .with_context("endpoint", &endpoint), + ); }; let endpoint = format!("{host}:{port}",); @@ -208,11 +206,8 @@ impl Adapter { let mgr = MemcacheConnectionManager::new(&self.endpoint); bb8::Pool::builder().build(mgr).await.map_err(|err| { - Error::new( - ErrorKind::BackendConfigInvalid, - "connect to memecached failed", - ) - .set_source(err) + Error::new(ErrorKind::ConfigInvalid, "connect to memecached failed") + .set_source(err) }) }) .await?; @@ -278,9 +273,9 @@ fn parse_io_error(err: std::io::Error) -> Error { use std::io::ErrorKind::*; let (kind, retryable) = match err.kind() { - NotFound => (ErrorKind::ObjectNotFound, false), - AlreadyExists => (ErrorKind::ObjectNotFound, false), - PermissionDenied => (ErrorKind::ObjectPermissionDenied, false), + NotFound => (ErrorKind::NotFound, false), + AlreadyExists => (ErrorKind::NotFound, false), + PermissionDenied => (ErrorKind::PermissionDenied, false), Interrupted | UnexpectedEof | TimedOut | WouldBlock => (ErrorKind::Unexpected, true), _ => (ErrorKind::Unexpected, true), }; diff --git a/src/services/memory/backend.rs b/src/services/memory/backend.rs index 9857008ca59..02f9de1a4a4 100644 --- a/src/services/memory/backend.rs +++ b/src/services/memory/backend.rs @@ -131,9 +131,9 @@ mod tests { #[test] fn test_accessor_metadata_name() { let b1 = MemoryBuilder::default().build().unwrap(); - assert_eq!(b1.metadata().name(), b1.metadata().name()); + assert_eq!(b1.info().name(), b1.info().name()); let b2 = MemoryBuilder::default().build().unwrap(); - assert_ne!(b1.metadata().name(), b2.metadata().name()) + assert_ne!(b1.info().name(), b2.info().name()) } } diff --git a/src/services/obs/backend.rs b/src/services/obs/backend.rs index aba0e39de0b..9cd3777664b 100644 --- a/src/services/obs/backend.rs +++ b/src/services/obs/backend.rs @@ -63,7 +63,6 @@ use crate::*; /// ```no_run /// use anyhow::Result; /// use opendal::services::Obs; -/// use opendal::Object; /// use opendal::Operator; /// /// #[tokio::main] @@ -81,10 +80,7 @@ use crate::*; /// builder.access_key_id("access_key_id"); /// builder.secret_access_key("secret_access_key"); /// -/// let op: Operator = Operator::create(builder)?.finish(); -/// -/// // Create an object handle to start operation on object. -/// let _: Object = op.object("test_file"); +/// let op: Operator = Operator::new(builder)?.finish(); /// /// Ok(()) /// } @@ -208,23 +204,19 @@ impl Builder for ObsBuilder { let bucket = match &self.bucket { Some(bucket) => Ok(bucket.to_string()), - None => Err( - Error::new(ErrorKind::BackendConfigInvalid, "bucket is empty") - .with_context("service", Scheme::Obs), - ), + None => Err(Error::new(ErrorKind::ConfigInvalid, "bucket is empty") + .with_context("service", Scheme::Obs)), }?; debug!("backend use bucket {}", &bucket); let uri = match &self.endpoint { Some(endpoint) => endpoint.parse::().map_err(|err| { - Error::new(ErrorKind::BackendConfigInvalid, "endpoint is invalid") + Error::new(ErrorKind::ConfigInvalid, "endpoint is invalid") .with_context("service", Scheme::Obs) .set_source(err) }), - None => Err( - Error::new(ErrorKind::BackendConfigInvalid, "endpoint is empty") - .with_context("service", Scheme::Obs), - ), + None => Err(Error::new(ErrorKind::ConfigInvalid, "endpoint is empty") + .with_context("service", Scheme::Obs)), }?; let scheme = match uri.scheme_str() { @@ -309,11 +301,11 @@ impl Accessor for ObsBackend { type Pager = ObsPager; type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { + fn info(&self) -> AccessorInfo { use AccessorCapability::*; use AccessorHint::*; - let mut am = AccessorMetadata::default(); + let mut am = AccessorInfo::default(); am.set_scheme(Scheme::Obs) .set_root(&self.root) .set_name(&self.bucket) diff --git a/src/services/obs/error.rs b/src/services/obs/error.rs index 699e67bff4c..ad80105b361 100644 --- a/src/services/obs/error.rs +++ b/src/services/obs/error.rs @@ -40,8 +40,8 @@ pub async fn parse_error(resp: Response) -> Result { let bs = body.bytes().await?; let (kind, retryable) = match parts.status { - StatusCode::NOT_FOUND => (ErrorKind::ObjectNotFound, false), - StatusCode::FORBIDDEN => (ErrorKind::ObjectPermissionDenied, false), + StatusCode::NOT_FOUND => (ErrorKind::NotFound, false), + StatusCode::FORBIDDEN => (ErrorKind::PermissionDenied, false), StatusCode::INTERNAL_SERVER_ERROR | StatusCode::BAD_GATEWAY | StatusCode::SERVICE_UNAVAILABLE diff --git a/src/services/oss/backend.rs b/src/services/oss/backend.rs index aec40d9f0b7..228bd4d1f9f 100644 --- a/src/services/oss/backend.rs +++ b/src/services/oss/backend.rs @@ -78,7 +78,6 @@ use crate::*; /// /// use anyhow::Result; /// use opendal::services::Oss; -/// use opendal::Object; /// use opendal::Operator; /// /// #[tokio::main] @@ -105,10 +104,7 @@ use crate::*; /// builder.access_key_id("access_key_id"); /// builder.access_key_secret("access_key_secret"); /// -/// let op: Operator = Operator::create(builder)?.finish(); -/// -/// // Create an object handle to start operation on object. -/// let _: Object = op.object("test_file"); +/// let op: Operator = Operator::new(builder)?.finish(); /// /// Ok(()) /// } @@ -245,13 +241,13 @@ impl OssBuilder { let (endpoint, host) = match endpoint.clone() { Some(ep) => { let uri = ep.parse::().map_err(|err| { - Error::new(ErrorKind::BackendConfigInvalid, "endpoint is invalid") + Error::new(ErrorKind::ConfigInvalid, "endpoint is invalid") .with_context("service", Scheme::Oss) .with_context("endpoint", &ep) .set_source(err) })?; let host = uri.host().ok_or_else(|| { - Error::new(ErrorKind::BackendConfigInvalid, "endpoint host is empty") + Error::new(ErrorKind::ConfigInvalid, "endpoint host is empty") .with_context("service", Scheme::Oss) .with_context("endpoint", &ep) })?; @@ -261,7 +257,7 @@ impl OssBuilder { "http" | "https" => format!("{scheme_str}://{full_host}"), _ => { return Err(Error::new( - ErrorKind::BackendConfigInvalid, + ErrorKind::ConfigInvalid, "endpoint protocol is invalid", ) .with_context("service", Scheme::Oss)); @@ -272,10 +268,8 @@ impl OssBuilder { (endpoint, full_host) } None => { - return Err( - Error::new(ErrorKind::BackendConfigInvalid, "endpoint is empty") - .with_context("service", Scheme::Oss), - ); + return Err(Error::new(ErrorKind::ConfigInvalid, "endpoint is empty") + .with_context("service", Scheme::Oss)); } }; Ok((endpoint, host)) @@ -313,10 +307,8 @@ impl Builder for OssBuilder { // Handle endpoint, region and bucket name. let bucket = match self.bucket.is_empty() { false => Ok(&self.bucket), - true => Err( - Error::new(ErrorKind::BackendConfigInvalid, "bucket is empty") - .with_context("service", Scheme::Oss), - ), + true => Err(Error::new(ErrorKind::ConfigInvalid, "bucket is empty") + .with_context("service", Scheme::Oss)), }?; let client = if let Some(client) = self.http_client.take() { @@ -354,7 +346,7 @@ impl Builder for OssBuilder { } let signer = signer_builder.build().map_err(|e| { - Error::new(ErrorKind::BackendConfigInvalid, "build AliyunOssSigner") + Error::new(ErrorKind::ConfigInvalid, "build AliyunOssSigner") .with_context("service", Scheme::Oss) .with_context("endpoint", &endpoint) .with_context("bucket", bucket) @@ -411,11 +403,11 @@ impl Accessor for OssBackend { type Pager = OssPager; type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { + fn info(&self) -> AccessorInfo { use AccessorCapability::*; use AccessorHint::*; - let mut am = AccessorMetadata::default(); + let mut am = AccessorInfo::default(); am.set_scheme(Scheme::Oss) .set_root(&self.root) .set_name(&self.bucket) diff --git a/src/services/oss/error.rs b/src/services/oss/error.rs index fa43efb009e..1f98cb297d8 100644 --- a/src/services/oss/error.rs +++ b/src/services/oss/error.rs @@ -39,8 +39,8 @@ pub async fn parse_error(resp: Response) -> Result { let bs = body.bytes().await?; let (kind, retryable) = match parts.status { - StatusCode::NOT_FOUND => (ErrorKind::ObjectNotFound, false), - StatusCode::FORBIDDEN => (ErrorKind::ObjectPermissionDenied, false), + StatusCode::NOT_FOUND => (ErrorKind::NotFound, false), + StatusCode::FORBIDDEN => (ErrorKind::PermissionDenied, false), StatusCode::INTERNAL_SERVER_ERROR | StatusCode::BAD_GATEWAY | StatusCode::SERVICE_UNAVAILABLE diff --git a/src/services/redis/backend.rs b/src/services/redis/backend.rs index 975a1921167..ce9381cb395 100644 --- a/src/services/redis/backend.rs +++ b/src/services/redis/backend.rs @@ -74,7 +74,7 @@ const DEFAULT_REDIS_PORT: u16 = 6379; /// let mut builder = Redis::default(); /// /// // this will build a Operator accessing Redis which runs on tcp://localhost:6379 -/// let op: Operator = Operator::create(builder)?.finish(); +/// let op: Operator = Operator::new(builder)?.finish(); /// let _: Object = op.object("test_file"); /// Ok(()) /// } @@ -208,7 +208,7 @@ impl Builder for RedisBuilder { .unwrap_or_else(|| DEFAULT_REDIS_ENDPOINT.to_string()); let ep_url = endpoint.parse::().map_err(|e| { - Error::new(ErrorKind::BackendConfigInvalid, "endpoint is invalid") + Error::new(ErrorKind::ConfigInvalid, "endpoint is invalid") .with_context("service", Scheme::Redis) .with_context("endpoint", endpoint) .set_source(e) @@ -229,12 +229,11 @@ impl Builder for RedisBuilder { ConnectionAddr::Unix(path) } Some(s) => { - return Err(Error::new( - ErrorKind::BackendConfigInvalid, - "invalid or unsupported scheme", + return Err( + Error::new(ErrorKind::ConfigInvalid, "invalid or unsupported scheme") + .with_context("service", Scheme::Redis) + .with_context("scheme", s), ) - .with_context("service", Scheme::Redis) - .with_context("scheme", s)) } }; @@ -250,14 +249,11 @@ impl Builder for RedisBuilder { }; let client = Client::open(con_info).map_err(|e| { - Error::new( - ErrorKind::BackendConfigInvalid, - "invalid or unsupported scheme", - ) - .with_context("service", Scheme::Redis) - .with_context("endpoint", self.endpoint.as_ref().unwrap()) - .with_context("db", self.db.to_string()) - .set_source(e) + Error::new(ErrorKind::ConfigInvalid, "invalid or unsupported scheme") + .with_context("service", Scheme::Redis) + .with_context("endpoint", self.endpoint.as_ref().unwrap()) + .with_context("db", self.db.to_string()) + .set_source(e) })?; let root = normalize_root( diff --git a/src/services/rocksdb/backend.rs b/src/services/rocksdb/backend.rs index 856c20097db..7f41aeee30f 100644 --- a/src/services/rocksdb/backend.rs +++ b/src/services/rocksdb/backend.rs @@ -66,7 +66,7 @@ use crate::*; /// let mut builder = Rocksdb::default(); /// builder.datadir("/tmp/opendal/rocksdb"); /// -/// let op: Operator = Operator::create(builder)?.finish(); +/// let op: Operator = Operator::new(builder)?.finish(); /// let _: Object = op.object("test_file"); /// Ok(()) /// } @@ -113,20 +113,14 @@ impl Builder for RocksdbBuilder { fn build(&mut self) -> Result { let path = self.datadir.take().ok_or_else(|| { - Error::new( - ErrorKind::BackendConfigInvalid, - "datadir is required but not set", - ) - .with_context("service", Scheme::Rocksdb) + Error::new(ErrorKind::ConfigInvalid, "datadir is required but not set") + .with_context("service", Scheme::Rocksdb) })?; let db = TransactionDB::open_default(&path).map_err(|e| { - Error::new( - ErrorKind::BackendConfigInvalid, - "open default transaction db", - ) - .with_context("service", Scheme::Rocksdb) - .with_context("datadir", path) - .set_source(e) + Error::new(ErrorKind::ConfigInvalid, "open default transaction db") + .with_context("service", Scheme::Rocksdb) + .with_context("datadir", path) + .set_source(e) })?; Ok(RocksdbBackend::new(Adapter { db: Arc::new(db) })) diff --git a/src/services/s3/backend.rs b/src/services/s3/backend.rs index 52ccb75cc0f..aff7ae67d5f 100644 --- a/src/services/s3/backend.rs +++ b/src/services/s3/backend.rs @@ -153,7 +153,6 @@ mod constants { /// /// use anyhow::Result; /// use opendal::services::S3; -/// use opendal::Object; /// use opendal::Operator; /// /// #[tokio::main] @@ -184,10 +183,7 @@ mod constants { /// builder.access_key_id("access_key_id"); /// builder.secret_access_key("secret_access_key"); /// -/// let op: Operator = Operator::create(builder)?.finish(); -/// -/// // Create an object handle to start operation on object. -/// let _: Object = op.object("test_file"); +/// let op: Operator = Operator::new(builder)?.finish(); /// /// Ok(()) /// } @@ -210,7 +206,7 @@ mod constants { /// // Enable SSE-C /// builder.server_side_encryption_with_customer_key("AES256", "customer_key".as_bytes()); /// -/// let op = Operator::create(builder)?.finish(); +/// let op = Operator::new(builder)?.finish(); /// info!("operator: {:?}", op); /// /// // Writing your testing code here. @@ -236,7 +232,7 @@ mod constants { /// // Enable SSE-KMS with aws managed kms key /// builder.server_side_encryption_with_aws_managed_kms_key(); /// -/// let op = Operator::create(builder)?.finish(); +/// let op = Operator::new(builder)?.finish(); /// info!("operator: {:?}", op); /// /// // Writing your testing code here. @@ -262,7 +258,7 @@ mod constants { /// // Enable SSE-KMS with customer managed kms key /// builder.server_side_encryption_with_customer_managed_kms_key("aws_kms_key_id"); /// -/// let op = Operator::create(builder)?.finish(); +/// let op = Operator::new(builder)?.finish(); /// info!("operator: {:?}", op); /// /// // Writing your testing code here. @@ -288,7 +284,7 @@ mod constants { /// // Enable SSE-S3 /// builder.server_side_encryption_with_s3_key(); /// -/// let op = Operator::create(builder)?.finish(); +/// let op = Operator::new(builder)?.finish(); /// info!("operator: {:?}", op); /// /// // Writing your testing code here. @@ -850,10 +846,7 @@ impl Builder for S3Builder { // Handle bucket name. let bucket = match self.bucket.is_empty() { false => Ok(&self.bucket), - true => Err(Error::new( - ErrorKind::BackendConfigInvalid, - "bucket is empty", - )), + true => Err(Error::new(ErrorKind::ConfigInvalid, "bucket is empty")), }?; debug!("backend use bucket {}", &bucket); @@ -861,7 +854,7 @@ impl Builder for S3Builder { None => None, Some(v) => Some(v.parse().map_err(|e| { Error::new( - ErrorKind::BackendConfigInvalid, + ErrorKind::ConfigInvalid, "server_side_encryption value is invalid", ) .with_context("value", v) @@ -874,7 +867,7 @@ impl Builder for S3Builder { None => None, Some(v) => Some(v.parse().map_err(|e| { Error::new( - ErrorKind::BackendConfigInvalid, + ErrorKind::ConfigInvalid, "server_side_encryption_aws_kms_key_id value is invalid", ) .with_context("value", v) @@ -887,7 +880,7 @@ impl Builder for S3Builder { None => None, Some(v) => Some(v.parse().map_err(|e| { Error::new( - ErrorKind::BackendConfigInvalid, + ErrorKind::ConfigInvalid, "server_side_encryption_customer_algorithm value is invalid", ) .with_context("value", v) @@ -899,7 +892,7 @@ impl Builder for S3Builder { None => None, Some(v) => Some(v.parse().map_err(|e| { Error::new( - ErrorKind::BackendConfigInvalid, + ErrorKind::ConfigInvalid, "server_side_encryption_customer_key value is invalid", ) .with_context("value", v) @@ -911,7 +904,7 @@ impl Builder for S3Builder { None => None, Some(v) => Some(v.parse().map_err(|e| { Error::new( - ErrorKind::BackendConfigInvalid, + ErrorKind::ConfigInvalid, "server_side_encryption_customer_key_md5 value is invalid", ) .with_context("value", v) @@ -1113,11 +1106,11 @@ impl Accessor for S3Backend { type Pager = S3Pager; type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { + fn info(&self) -> AccessorInfo { use AccessorCapability::*; use AccessorHint::*; - let mut am = AccessorMetadata::default(); + let mut am = AccessorInfo::default(); am.set_scheme(Scheme::S3) .set_root(&self.root) .set_name(&self.bucket) diff --git a/src/services/s3/error.rs b/src/services/s3/error.rs index a469f6a1b42..a40c35d6e5a 100644 --- a/src/services/s3/error.rs +++ b/src/services/s3/error.rs @@ -39,8 +39,8 @@ pub async fn parse_error(resp: Response) -> Result { let bs = body.bytes().await?; let (kind, retryable) = match parts.status { - StatusCode::NOT_FOUND => (ErrorKind::ObjectNotFound, false), - StatusCode::FORBIDDEN => (ErrorKind::ObjectPermissionDenied, false), + StatusCode::NOT_FOUND => (ErrorKind::NotFound, false), + StatusCode::FORBIDDEN => (ErrorKind::PermissionDenied, false), StatusCode::INTERNAL_SERVER_ERROR | StatusCode::BAD_GATEWAY | StatusCode::SERVICE_UNAVAILABLE diff --git a/src/services/sled/backend.rs b/src/services/sled/backend.rs index 238698462af..3bfc3503b67 100644 --- a/src/services/sled/backend.rs +++ b/src/services/sled/backend.rs @@ -66,7 +66,7 @@ use crate::*; /// let mut builder = Sled::default(); /// builder.datadir("/tmp/opendal/sled"); /// -/// let op: Operator = Operator::create(builder)?.finish(); +/// let op: Operator = Operator::new(builder)?.finish(); /// let _: Object = op.object("test_file"); /// Ok(()) /// } @@ -99,15 +99,12 @@ impl Builder for SledBuilder { fn build(&mut self) -> Result { let datadir_path = self.datadir.take().ok_or_else(|| { - Error::new( - ErrorKind::BackendConfigInvalid, - "datadir is required but not set", - ) - .with_context("service", Scheme::Sled) + Error::new(ErrorKind::ConfigInvalid, "datadir is required but not set") + .with_context("service", Scheme::Sled) })?; let db = sled::open(&datadir_path).map_err(|e| { - Error::new(ErrorKind::BackendConfigInvalid, "open db") + Error::new(ErrorKind::ConfigInvalid, "open db") .with_context("service", Scheme::Sled) .with_context("datadir", datadir_path.clone()) .set_source(e) diff --git a/src/services/webdav/backend.rs b/src/services/webdav/backend.rs index 4debe479e70..befe133397f 100644 --- a/src/services/webdav/backend.rs +++ b/src/services/webdav/backend.rs @@ -64,7 +64,6 @@ use crate::*; /// ```no_run /// use anyhow::Result; /// use opendal::services::Webdav; -/// use opendal::Object; /// use opendal::Operator; /// /// #[tokio::main] @@ -77,8 +76,7 @@ use crate::*; /// .username("xxx") /// .password("xxx"); /// -/// let op: Operator = Operator::create(builder)?.finish(); -/// let _obj: Object = op.object("test_file"); +/// let op: Operator = Operator::new(builder)?.finish(); /// Ok(()) /// } /// ``` @@ -191,10 +189,8 @@ impl Builder for WebdavBuilder { let endpoint = match &self.endpoint { Some(v) => v, None => { - return Err( - Error::new(ErrorKind::BackendConfigInvalid, "endpoint is empty") - .with_context("service", Scheme::Webdav), - ) + return Err(Error::new(ErrorKind::ConfigInvalid, "endpoint is empty") + .with_context("service", Scheme::Webdav)) } }; @@ -259,8 +255,8 @@ impl Accessor for WebdavBackend { type Pager = WebdavPager; type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { - let mut ma = AccessorMetadata::default(); + fn info(&self) -> AccessorInfo { + let mut ma = AccessorInfo::default(); ma.set_scheme(Scheme::Webdav) .set_root(&self.root) .set_capabilities( diff --git a/src/services/webdav/error.rs b/src/services/webdav/error.rs index 42805ed3408..672b504d11d 100644 --- a/src/services/webdav/error.rs +++ b/src/services/webdav/error.rs @@ -26,8 +26,8 @@ pub async fn parse_error(resp: Response) -> Result { let bs = body.bytes().await?; let (kind, retryable) = match parts.status { - StatusCode::NOT_FOUND => (ErrorKind::ObjectNotFound, false), - StatusCode::FORBIDDEN => (ErrorKind::ObjectPermissionDenied, false), + StatusCode::NOT_FOUND => (ErrorKind::NotFound, false), + StatusCode::FORBIDDEN => (ErrorKind::PermissionDenied, false), StatusCode::INTERNAL_SERVER_ERROR | StatusCode::BAD_GATEWAY | StatusCode::SERVICE_UNAVAILABLE diff --git a/src/services/webhdfs/backend.rs b/src/services/webhdfs/backend.rs index 45e82ba647d..1a2e9d3f86b 100644 --- a/src/services/webhdfs/backend.rs +++ b/src/services/webhdfs/backend.rs @@ -78,7 +78,6 @@ const WEBHDFS_DEFAULT_ENDPOINT: &str = "http://127.0.0.1:9870"; /// /// use anyhow::Result; /// use opendal::services::Webhdfs; -/// use opendal::Object; /// use opendal::Operator; /// /// #[tokio::main] @@ -98,10 +97,7 @@ const WEBHDFS_DEFAULT_ENDPOINT: &str = "http://127.0.0.1:9870"; /// // set the delegation_token for builder /// builder.delegation("delegation_token"); /// -/// let op: Operator = Operator::create(builder)?.finish(); -/// -/// // create an object handler to start operation on object. -/// let _: Object = op.object("test_file"); +/// let op: Operator = Operator::new(builder)?.finish(); /// /// Ok(()) /// } @@ -508,11 +504,8 @@ impl WebhdfsBackend { match file_status.ty { FileStatusType::File => { error!("working directory is occupied!"); - return Err(Error::new( - ErrorKind::BackendConfigInvalid, - "root is occupied!", - ) - .with_context("service", Scheme::Webhdfs)); + return Err(Error::new(ErrorKind::ConfigInvalid, "root is occupied!") + .with_context("service", Scheme::Webhdfs)); } FileStatusType::Directory => { debug!("working directory exists, do nothing"); @@ -541,8 +534,8 @@ impl Accessor for WebhdfsBackend { type Pager = WebhdfsPager; type BlockingPager = (); - fn metadata(&self) -> AccessorMetadata { - let mut am = AccessorMetadata::default(); + fn info(&self) -> AccessorInfo { + let mut am = AccessorInfo::default(); am.set_scheme(Scheme::Webhdfs) .set_root(&self.root) .set_capabilities( @@ -600,7 +593,7 @@ impl Accessor for WebhdfsBackend { let meta = parse_into_object_metadata(path, resp.headers())?; Ok((RpRead::with_metadata(meta), resp.into_body())) } - StatusCode::NOT_FOUND => Err(Error::new(ErrorKind::ObjectNotFound, "object not found") + StatusCode::NOT_FOUND => Err(Error::new(ErrorKind::NotFound, "object not found") .with_context("service", Scheme::Webhdfs)), _ => Err(parse_error(resp).await?), } diff --git a/src/services/webhdfs/error.rs b/src/services/webhdfs/error.rs index d2b16736592..f2f9d1398e7 100644 --- a/src/services/webhdfs/error.rs +++ b/src/services/webhdfs/error.rs @@ -45,10 +45,8 @@ pub(super) async fn parse_error(resp: Response) -> Result Result { let (kind, retryable) = match parts.status { - StatusCode::NOT_FOUND => (ErrorKind::ObjectNotFound, false), - StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => { - (ErrorKind::ObjectPermissionDenied, false) - } + StatusCode::NOT_FOUND => (ErrorKind::NotFound, false), + StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => (ErrorKind::PermissionDenied, false), // passing invalid arguments will return BAD_REQUEST // should be unretriable StatusCode::BAD_REQUEST => (ErrorKind::Unexpected, false), diff --git a/src/types/entry.rs b/src/types/entry.rs new file mode 100644 index 00000000000..b5351319d15 --- /dev/null +++ b/src/types/entry.rs @@ -0,0 +1,61 @@ +// Copyright 2022 Datafuse Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::raw::*; +use crate::*; + +/// Entry is the file/dir entry returned by `Lister`. +#[derive(Clone, Debug)] +pub struct Entry { + path: String, + meta: Metadata, +} + +impl Entry { + /// Create an entry. + /// + /// # Notes + /// + /// This function is crate internal only. Users don't have public + /// methods to construct an entry. The only way to get an entry + /// is `Operator::list` or `Operator::scan`. + pub(crate) fn new(path: String, meta: Metadata) -> Self { + Self { path, meta } + } + + /// Path of entry. Path is relative to operator's root. + /// Only valid in current operator. + pub fn path(&self) -> &str { + &self.path + } + + /// Name of entry. Name is the last segment of path. + /// + /// If this object is a dir, `Name` MUST endswith `/` + /// Otherwise, `Name` MUST NOT endswith `/`. + pub fn name(&self) -> &str { + get_basename(&self.path) + } + + /// Get the metadata of entry. + /// + /// # Notes + /// + /// This function is crate internal only. Becuase the returning + /// metadata could be incomplete. Users must use `Operator::metadata` + /// to query the cached metadata instead. + pub(crate) fn metadata(&self) -> &Metadata { + &self.meta + } +} diff --git a/src/types/error.rs b/src/types/error.rs index 2cfa743a0c9..0060ba86fdf 100644 --- a/src/types/error.rs +++ b/src/types/error.rs @@ -23,8 +23,8 @@ //! use opendal::ErrorKind; //! # #[tokio::main] //! # async fn test(op: Operator) -> Result<()> { -//! if let Err(e) = op.object("test_file").stat().await { -//! if e.kind() == ErrorKind::ObjectNotFound { +//! if let Err(e) = op.stat("test_file").await { +//! if e.kind() == ErrorKind::NotFound { //! println!("object not exist") //! } //! } @@ -52,20 +52,19 @@ pub enum ErrorKind { Unsupported, /// The config for backend is invalid. - BackendConfigInvalid, - - /// Object is not found. - ObjectNotFound, + ConfigInvalid, + /// The given path is not found. + NotFound, /// Object doesn't have enough permission for this operation - ObjectPermissionDenied, + PermissionDenied, /// Object is a directory. - ObjectIsADirectory, + IsADirectory, /// Object is not a directory. - ObjectNotADirectory, + NotADirectory, /// Object already exists thus we failed to the specified operation on it. - ObjectAlreadyExists, + AlreadyExists, /// Requests that sent to this object is over the limit, please slow down. - ObjectRateLimited, + RateLimited, } impl ErrorKind { @@ -86,13 +85,13 @@ impl From for &'static str { match v { ErrorKind::Unexpected => "Unexpected", ErrorKind::Unsupported => "Unsupported", - ErrorKind::BackendConfigInvalid => "BackendConfigInvalid", - ErrorKind::ObjectNotFound => "ObjectNotFound", - ErrorKind::ObjectPermissionDenied => "ObjectPermissionDenied", - ErrorKind::ObjectIsADirectory => "ObjectIsADirectory", - ErrorKind::ObjectNotADirectory => "ObjectNotADirectory", - ErrorKind::ObjectAlreadyExists => "ObjectAlreadyExists", - ErrorKind::ObjectRateLimited => "ObjectRateLimited", + ErrorKind::ConfigInvalid => "BackendConfigInvalid", + ErrorKind::NotFound => "NotFound", + ErrorKind::PermissionDenied => "ObjectPermissionDenied", + ErrorKind::IsADirectory => "ObjectIsADirectory", + ErrorKind::NotADirectory => "ObjectNotADirectory", + ErrorKind::AlreadyExists => "AlreadyExists", + ErrorKind::RateLimited => "ObjectRateLimited", } } } @@ -303,8 +302,8 @@ impl Error { impl From for io::Error { fn from(err: Error) -> Self { let kind = match err.kind() { - ErrorKind::ObjectNotFound => io::ErrorKind::NotFound, - ErrorKind::ObjectPermissionDenied => io::ErrorKind::PermissionDenied, + ErrorKind::NotFound => io::ErrorKind::NotFound, + ErrorKind::PermissionDenied => io::ErrorKind::PermissionDenied, _ => io::ErrorKind::Other, }; diff --git a/src/types/list.rs b/src/types/list.rs index e81abf7c7b3..38efe63d96c 100644 --- a/src/types/list.rs +++ b/src/types/list.rs @@ -33,7 +33,6 @@ use crate::*; /// User can use object lister as `Stream>` or /// call `next_page` directly. pub struct Lister { - acc: FusedAccessor, pager: Option, buf: VecDeque, @@ -46,9 +45,8 @@ pub struct Lister { impl Lister { /// Create a new object lister. - pub(crate) fn new(acc: FusedAccessor, pager: oio::Pager) -> Self { + pub(crate) fn new(pager: oio::Pager) -> Self { Self { - acc, pager: Some(pager), buf: VecDeque::default(), fut: None, @@ -61,7 +59,7 @@ impl Lister { /// /// Don't mix the usage of `next_page` and `Stream>`. /// Always using the same calling style. - pub async fn next_page(&mut self) -> Result>> { + pub async fn next_page(&mut self) -> Result>> { debug_assert!( self.fut.is_none(), "there are ongoing futures for next page" @@ -85,21 +83,16 @@ impl Lister { } }; - Ok(Some( - entries - .into_iter() - .map(|v| v.into_object(self.acc.clone())) - .collect(), - )) + Ok(Some(entries.into_iter().map(|v| v.into_entry()).collect())) } } impl Stream for Lister { - type Item = Result; + type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if let Some(oe) = self.buf.pop_front() { - return Poll::Ready(Some(Ok(oe.into_object(self.acc.clone())))); + return Poll::Ready(Some(Ok(oe.into_entry()))); } if let Some(fut) = self.fut.as_mut() { @@ -134,23 +127,21 @@ impl Stream for Lister { /// /// Users can construct Lister by `blocking_list` or `blocking_scan`. pub struct BlockingLister { - acc: FusedAccessor, pager: oio::BlockingPager, buf: VecDeque, } impl BlockingLister { /// Create a new object lister. - pub(crate) fn new(acc: FusedAccessor, pager: oio::BlockingPager) -> Self { + pub(crate) fn new(pager: oio::BlockingPager) -> Self { Self { - acc, pager, buf: VecDeque::default(), } } /// next_page can be used to fetch a new object page. - pub fn next_page(&mut self) -> Result>> { + pub fn next_page(&mut self) -> Result>> { let entries = if !self.buf.is_empty() { mem::take(&mut self.buf) } else { @@ -163,22 +154,17 @@ impl BlockingLister { } }; - Ok(Some( - entries - .into_iter() - .map(|v| v.into_object(self.acc.clone())) - .collect(), - )) + Ok(Some(entries.into_iter().map(|v| v.into_entry()).collect())) } } /// TODO: we can implement next_chunk. impl Iterator for BlockingLister { - type Item = Result; + type Item = Result; fn next(&mut self) -> Option { if let Some(oe) = self.buf.pop_front() { - return Some(Ok(oe.into_object(self.acc.clone()))); + return Some(Ok(oe.into_entry())); } self.buf = match self.pager.next() { diff --git a/src/types/mod.rs b/src/types/mod.rs index 4d2958e5875..8d23a5253a1 100644 --- a/src/types/mod.rs +++ b/src/types/mod.rs @@ -15,14 +15,13 @@ mod mode; pub use mode::EntryMode; +mod entry; +pub use entry::Entry; + mod metadata; pub use metadata::Metadata; pub use metadata::Metakey; -#[allow(clippy::module_inception)] -mod object; -pub use object::Object; - mod reader; pub use reader::BlockingReader; pub use reader::Reader; @@ -36,10 +35,10 @@ pub use list::BlockingLister; pub use list::Lister; mod operator; -pub use operator::BatchOperator; +pub use operator::BlockingOperator; pub use operator::Operator; pub use operator::OperatorBuilder; -pub use operator::OperatorMetadata; +pub use operator::OperatorInfo; mod builder; pub use builder::Builder; diff --git a/src/types/object.rs b/src/types/object.rs deleted file mode 100644 index 0aded73a39b..00000000000 --- a/src/types/object.rs +++ /dev/null @@ -1,1362 +0,0 @@ -// Copyright 2022 Datafuse Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::fmt::Debug; -use std::io::Read; -use std::ops::RangeBounds; -use std::sync::Arc; - -use bytes::Bytes; -use flagset::FlagSet; -use time::Duration; - -use super::BlockingLister; -use super::BlockingReader; -use super::Lister; -use crate::ops::*; -use crate::raw::*; -use crate::*; - -/// Object is the handler for all object related operations. -/// -/// # Notes -/// -/// Object will cache part of object metadata that pre-fetch by list or stat -/// operations. It's better to reuse the same object whenever possible. -#[derive(Clone, Debug)] -pub struct Object { - acc: FusedAccessor, - path: Arc, - - meta: Option>, -} - -impl Object { - /// Creates a new Object with normalized path. - /// - /// - All path will be converted into relative path (without any leading `/`) - /// - Path endswith `/` means it's a dir path. - /// - Otherwise, it's a file path. - pub(crate) fn new(acc: FusedAccessor, path: &str) -> Self { - Self::with(acc, path, None) - } - - pub(crate) fn with(acc: FusedAccessor, path: &str, meta: Option) -> Self { - Self { - acc, - path: Arc::new(normalize_path(path)), - meta: meta.map(Arc::new), - } - } - - pub(crate) fn accessor(&self) -> FusedAccessor { - self.acc.clone() - } - - /// ID of object. - /// - /// ID is the unique id of object in the underlying backend. In different backend, - /// the id could have different meaning. - /// - /// For example: - /// - /// - In `fs`: id is the absolute path of file, like `/path/to/dir/test_object`. - /// - In `s3`: id is the full object key, like `path/to/dir/test_object` - /// - /// # Example - /// - /// ``` - /// use anyhow::Result; - /// use futures::io; - /// use opendal::Operator; - /// - /// #[tokio::main] - /// async fn test(op: Operator) -> Result<()> { - /// let id = op.object("test").id(); - /// - /// Ok(()) - /// } - /// ``` - pub fn id(&self) -> String { - format!("{}{}", self.acc.metadata().root(), self.path) - } - - /// Path of object. Path is relative to operator's root. - /// Only valid in current operator. - /// - /// The value is the same with `Metadata::path()`. - /// - /// # Example - /// - /// ``` - /// use anyhow::Result; - /// use futures::io; - /// use opendal::Operator; - /// - /// #[tokio::main] - /// async fn test(op: Operator) -> Result<()> { - /// let path = op.object("test").path(); - /// - /// Ok(()) - /// } - /// ``` - pub fn path(&self) -> &str { - &self.path - } - - /// Name of object. Name is the last segment of path. - /// - /// If this object is a dir, `Name` MUST endswith `/` - /// Otherwise, `Name` MUST NOT endswith `/`. - /// - /// The value is the same with `Metadata::name()`. - /// - /// # Example - /// - /// ``` - /// use anyhow::Result; - /// use futures::io; - /// use opendal::Operator; - /// - /// #[tokio::main] - /// async fn test(op: Operator) -> Result<()> { - /// let name = op.object("test").name(); - /// - /// Ok(()) - /// } - /// ``` - pub fn name(&self) -> &str { - get_basename(&self.path) - } - - /// Create an empty object, like using the following linux commands: - /// - /// - `touch path/to/file` - /// - `mkdir path/to/dir/` - /// - /// # Behavior - /// - /// - Create on existing dir will succeed. - /// - Create on existing file will overwrite and truncate it. - /// - /// # Examples - /// - /// ## Create an empty file - /// - /// ``` - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::TryStreamExt; - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let mut o = op.object("path/to/file"); - /// let _ = o.create().await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// ## Create a dir - /// - /// ``` - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::TryStreamExt; - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let mut o = op.object("path/to/dir/"); - /// let _ = o.create().await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn create(&self) -> Result<()> { - let _ = if self.path.ends_with('/') { - self.acc - .create(self.path(), OpCreate::new(EntryMode::DIR)) - .await? - } else { - self.acc - .create(self.path(), OpCreate::new(EntryMode::FILE)) - .await? - }; - - Ok(()) - } - - /// Create an empty object, like using the following linux commands: - /// - /// - `touch path/to/file` - /// - `mkdir path/to/dir/` - /// - /// # Behavior - /// - /// - Create on existing dir will succeed. - /// - Create on existing file will overwrite and truncate it. - /// - /// # Examples - /// - /// ## Create an empty file - /// - /// ```no_run - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::TryStreamExt; - /// # fn test(op: Operator) -> Result<()> { - /// let mut o = op.object("path/to/file"); - /// let _ = o.blocking_create()?; - /// # Ok(()) - /// # } - /// ``` - /// - /// ## Create a dir - /// - /// ```no_run - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::TryStreamExt; - /// # async fn test(op: Operator) -> Result<()> { - /// let mut o = op.object("path/to/dir/"); - /// let _ = o.blocking_create()?; - /// # Ok(()) - /// # } - /// ``` - pub fn blocking_create(&self) -> Result<()> { - if self.path.ends_with('/') { - self.acc - .blocking_create(self.path(), OpCreate::new(EntryMode::DIR))?; - } else { - self.acc - .blocking_create(self.path(), OpCreate::new(EntryMode::FILE))?; - }; - - Ok(()) - } - - /// Read the whole object into a bytes. - /// - /// This function will allocate a new bytes internally. For more precise memory control or - /// reading data lazily, please use [`Object::reader`] - /// - /// # Examples - /// - /// ``` - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::TryStreamExt; - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let mut o = op.object("path/to/file"); - /// # o.write(vec![0; 4096]).await?; - /// let bs = o.read().await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn read(&self) -> Result> { - Operator::from_inner(self.acc.clone()) - .read(self.path()) - .await - } - - /// Read the whole object into a bytes. - /// - /// This function will allocate a new bytes internally. For more precise memory control or - /// reading data lazily, please use [`Object::blocking_reader`] - /// - /// # Examples - /// - /// ```no_run - /// # use std::io::Result; - /// # use opendal::Operator; - /// # - /// # fn test(op: Operator) -> Result<()> { - /// # let mut o = op.object("path/to/file"); - /// # o.blocking_write(vec![0; 4096])?; - /// let bs = o.blocking_read()?; - /// # Ok(()) - /// # } - /// ``` - pub fn blocking_read(&self) -> Result> { - self.blocking_range_read(..) - } - - /// Read the specified range of object into a bytes. - /// - /// This function will allocate a new bytes internally. For more precise memory control or - /// reading data lazily, please use [`Object::range_reader`] - /// - /// # Notes - /// - /// - The returning content's length may be smaller than the range specified. - /// - /// # Examples - /// - /// ``` - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::TryStreamExt; - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let mut o = op.object("path/to/file"); - /// # o.write(vec![0; 4096]).await?; - /// let bs = o.range_read(1024..2048).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn range_read(&self, range: impl RangeBounds) -> Result> { - Operator::from_inner(self.acc.clone()) - .range_read(self.path(), range) - .await - } - - /// Read the specified range of object into a bytes. - /// - /// This function will allocate a new bytes internally. For more precise memory control or - /// reading data lazily, please use [`Object::blocking_range_reader`] - /// - /// # Examples - /// - /// ```no_run - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::TryStreamExt; - /// # use opendal::Scheme; - /// # async fn test(op: Operator) -> Result<()> { - /// # let mut o = op.object("path/to/file"); - /// # o.blocking_write(vec![0; 4096])?; - /// let bs = o.blocking_range_read(1024..2048)?; - /// # Ok(()) - /// # } - /// ``` - pub fn blocking_range_read(&self, range: impl RangeBounds) -> Result> { - if !validate_path(self.path(), EntryMode::FILE) { - return Err( - Error::new(ErrorKind::ObjectIsADirectory, "read path is a directory") - .with_operation("Object::blocking_range_read") - .with_context("service", self.accessor().metadata().scheme().into_static()) - .with_context("path", self.path()), - ); - } - - let br = BytesRange::from(range); - let (rp, mut s) = self - .acc - .blocking_read(self.path(), OpRead::new().with_range(br))?; - - let mut buffer = Vec::with_capacity(rp.into_metadata().content_length() as usize); - s.read_to_end(&mut buffer).map_err(|err| { - Error::new(ErrorKind::Unexpected, "blocking range read failed") - .with_operation("Object::blocking_range_read") - .with_context("service", self.accessor().metadata().scheme().into_static()) - .with_context("path", self.path()) - .with_context("range", br.to_string()) - .set_source(err) - })?; - - Ok(buffer) - } - - /// Create a new reader which can read the whole object. - /// - /// # Examples - /// - /// ```no_run - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::TryStreamExt; - /// # use opendal::Scheme; - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let o = op.object("path/to/file"); - /// let r = o.reader().await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn reader(&self) -> Result { - self.range_reader(..).await - } - - /// Create a new reader which can read the whole object. - /// - /// # Examples - /// - /// ```no_run - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::TryStreamExt; - /// # fn test(op: Operator) -> Result<()> { - /// let o = op.object("path/to/file"); - /// let r = o.blocking_reader()?; - /// # Ok(()) - /// # } - /// ``` - pub fn blocking_reader(&self) -> Result { - self.blocking_range_reader(..) - } - - /// Create a new reader which can read the specified range. - /// - /// # Notes - /// - /// - The returning content's length may be smaller than the range specified. - /// - /// # Examples - /// - /// ```no_run - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::TryStreamExt; - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let o = op.object("path/to/file"); - /// let r = o.range_reader(1024..2048).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn range_reader(&self, range: impl RangeBounds) -> Result { - if !validate_path(self.path(), EntryMode::FILE) { - return Err( - Error::new(ErrorKind::ObjectIsADirectory, "read path is a directory") - .with_operation("Object::range_reader") - .with_context("service", self.accessor().metadata().scheme().into_static()) - .with_context("path", self.path()), - ); - } - - let op = OpRead::new().with_range(range.into()); - - Reader::create(self.accessor(), self.path(), op).await - } - - /// Create a new reader which can read the specified range. - /// - /// # Examples - /// - /// ```no_run - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::TryStreamExt; - /// # fn test(op: Operator) -> Result<()> { - /// let o = op.object("path/to/file"); - /// let r = o.blocking_range_reader(1024..2048)?; - /// # Ok(()) - /// # } - /// ``` - pub fn blocking_range_reader(&self, range: impl RangeBounds) -> Result { - if !validate_path(self.path(), EntryMode::FILE) { - return Err( - Error::new(ErrorKind::ObjectIsADirectory, "read path is a directory") - .with_operation("Object::blocking_range_reader") - .with_context("service", self.accessor().metadata().scheme().into_static()) - .with_context("path", self.path()), - ); - } - - let op = OpRead::new().with_range(range.into()); - - BlockingReader::create(self.accessor(), self.path(), op) - } - - /// Write bytes into object. - /// - /// # Notes - /// - /// - Write will make sure all bytes has been written, or an error will be returned. - /// - /// # Examples - /// - /// ``` - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::StreamExt; - /// # use futures::SinkExt; - /// use bytes::Bytes; - /// - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let mut o = op.object("path/to/file"); - /// let _ = o.write(vec![0; 4096]).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn write(&self, bs: impl Into) -> Result<()> { - self.write_with(OpWrite::new(), bs).await - } - - /// Write multiple bytes into object. - /// - /// # Notes - /// - /// - Write will make sure all bytes has been written, or an error will be returned. - /// - /// # Examples - /// - /// ``` - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::StreamExt; - /// # use futures::SinkExt; - /// use bytes::Bytes; - /// - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let mut w = op.object("path/to/file").writer().await?; - /// w.append(vec![0; 4096]).await?; - /// w.append(vec![1; 4096]).await?; - /// w.close().await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn writer(&self) -> Result { - if !validate_path(self.path(), EntryMode::FILE) { - return Err( - Error::new(ErrorKind::ObjectIsADirectory, "write path is a directory") - .with_operation("Object::write_with") - .with_context("service", self.accessor().metadata().scheme().into_static()) - .with_context("path", self.path()), - ); - } - - let op = OpWrite::default().with_append(); - Writer::create(self.accessor(), self.path(), op).await - } - - /// Write data with option described in OpenDAL [rfc-0661](../../docs/rfcs/0661-path-in-accessor.md) - /// - /// # Notes - /// - /// - Write will make sure all bytes has been written, or an error will be returned. - /// - /// # Examples - /// - /// ```no_run - /// # use std::io::Result; - /// # use opendal::Operator; - /// use bytes::Bytes; - /// use opendal::ops::OpWrite; - /// - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let mut o = op.object("path/to/file"); - /// let bs = b"hello, world!".to_vec(); - /// let args = OpWrite::new().with_content_type("text/plain"); - /// let _ = o.write_with(args, bs).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn write_with(&self, args: OpWrite, bs: impl Into) -> Result<()> { - if !validate_path(self.path(), EntryMode::FILE) { - return Err( - Error::new(ErrorKind::ObjectIsADirectory, "write path is a directory") - .with_operation("Object::write_with") - .with_context("service", self.accessor().metadata().scheme().into_static()) - .with_context("path", self.path()), - ); - } - - let (_, mut w) = self.acc.write(self.path(), args).await?; - w.write(bs.into()).await?; - w.close().await?; - - Ok(()) - } - - /// Write bytes into object. - /// - /// # Notes - /// - /// - Write will make sure all bytes has been written, or an error will be returned. - /// - /// # Examples - /// - /// ```no_run - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::StreamExt; - /// # use futures::SinkExt; - /// use bytes::Bytes; - /// - /// # fn test(op: Operator) -> Result<()> { - /// let mut o = op.object("path/to/file"); - /// let _ = o.blocking_write(vec![0; 4096])?; - /// # Ok(()) - /// # } - /// ``` - pub fn blocking_write(&self, bs: impl Into) -> Result<()> { - self.blocking_write_with(OpWrite::new(), bs) - } - - /// Write multiple bytes into object. - /// - /// # Notes - /// - /// - Write will make sure all bytes has been written, or an error will be returned. - /// - /// # Examples - /// - /// ```no_run - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::StreamExt; - /// # use futures::SinkExt; - /// use bytes::Bytes; - /// - /// # fn test(op: Operator) -> Result<()> { - /// let mut w = op.object("path/to/file").blocking_writer()?; - /// w.append(vec![0; 4096])?; - /// w.append(vec![1; 4096])?; - /// w.close()?; - /// # Ok(()) - /// # } - /// ``` - pub fn blocking_writer(&self) -> Result { - if !validate_path(self.path(), EntryMode::FILE) { - return Err( - Error::new(ErrorKind::ObjectIsADirectory, "write path is a directory") - .with_operation("Object::write_with") - .with_context("service", self.accessor().metadata().scheme().into_static()) - .with_context("path", self.path()), - ); - } - - let op = OpWrite::default().with_append(); - BlockingWriter::create(self.accessor(), self.path(), op) - } - - /// Write data with option described in OpenDAL [rfc-0661](../../docs/rfcs/0661-path-in-accessor.md) - /// - /// # Notes - /// - /// - Write will make sure all bytes has been written, or an error will be returned. - /// - /// # Examples - /// - /// ```no_run - /// # use opendal::Result; - /// # use opendal::Operator; - /// use bytes::Bytes; - /// use opendal::ops::OpWrite; - /// - /// # async fn test(op: Operator) -> Result<()> { - /// let mut o = op.object("hello.txt"); - /// let bs = b"hello, world!".to_vec(); - /// let ow = OpWrite::new().with_content_type("text/plain"); - /// let _ = o.blocking_write_with(ow, bs)?; - /// # Ok(()) - /// # } - /// ``` - pub fn blocking_write_with(&self, args: OpWrite, bs: impl Into) -> Result<()> { - if !validate_path(self.path(), EntryMode::FILE) { - return Err( - Error::new(ErrorKind::ObjectIsADirectory, "write path is a directory") - .with_operation("Object::blocking_write_with") - .with_context("service", self.accessor().metadata().scheme().into_static()) - .with_context("path", self.path()), - ); - } - - let (_, mut w) = self.acc.blocking_write(self.path(), args)?; - w.write(bs.into())?; - w.close()?; - - Ok(()) - } - - /// Delete object. - /// - /// # Notes - /// - /// - Delete not existing error won't return errors. - /// - /// # Examples - /// - /// ``` - /// # use anyhow::Result; - /// # use futures::io; - /// # use opendal::Operator; - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// op.object("test").delete().await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn delete(&self) -> Result<()> { - let _ = self.acc.delete(self.path(), OpDelete::new()).await?; - - Ok(()) - } - - /// Delete object. - /// - /// # Notes - /// - /// - Delete not existing error won't return errors. - /// - /// # Examples - /// - /// ```no_run - /// # use anyhow::Result; - /// # use futures::io; - /// # use opendal::Operator; - /// # fn test(op: Operator) -> Result<()> { - /// op.object("test").blocking_delete()?; - /// # Ok(()) - /// # } - /// ``` - pub fn blocking_delete(&self) -> Result<()> { - let _ = self.acc.blocking_delete(self.path(), OpDelete::new())?; - - Ok(()) - } - - /// List current dir object. - /// - /// This function will create a new handle to list objects. - /// - /// An error will be returned if object path doesn't end with `/`. - /// - /// # Examples - /// - /// ```no_run - /// # use anyhow::Result; - /// # use futures::io; - /// # use opendal::Operator; - /// # use opendal::EntryMode; - /// # use futures::TryStreamExt; - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let o = op.object("path/to/dir/"); - /// let mut ds = o.list().await?; - /// while let Some(mut de) = ds.try_next().await? { - /// let meta = de - /// .metadata({ - /// use opendal::Metakey::*; - /// Mode - /// }) - /// .await?; - /// match meta.mode() { - /// EntryMode::FILE => { - /// println!("Handling file") - /// } - /// EntryMode::DIR => { - /// println!("Handling dir like start a new list via meta.path()") - /// } - /// EntryMode::Unknown => continue, - /// } - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn list(&self) -> Result { - if !validate_path(self.path(), EntryMode::DIR) { - return Err(Error::new( - ErrorKind::ObjectNotADirectory, - "the path trying to list is not a directory", - ) - .with_operation("Object::list") - .with_context("service", self.accessor().metadata().scheme().into_static()) - .with_context("path", self.path())); - } - - let (_, pager) = self.acc.list(self.path(), OpList::new()).await?; - - Ok(Lister::new(self.acc.clone(), pager)) - } - - /// List current dir object. - /// - /// This function will create a new handle to list objects. - /// - /// An error will be returned if object path doesn't end with `/`. - /// - /// # Examples - /// - /// ```no_run - /// # use opendal::Result; - /// # use futures::io; - /// # use opendal::Operator; - /// # use opendal::EntryMode; - /// # fn test(op: Operator) -> Result<()> { - /// let o = op.object("path/to/dir/"); - /// let mut ds = o.blocking_list()?; - /// while let Some(mut de) = ds.next() { - /// let meta = de?.blocking_metadata({ - /// use opendal::Metakey::*; - /// Mode - /// })?; - /// match meta.mode() { - /// EntryMode::FILE => { - /// println!("Handling file") - /// } - /// EntryMode::DIR => { - /// println!("Handling dir like start a new list via meta.path()") - /// } - /// EntryMode::Unknown => continue, - /// } - /// } - /// # Ok(()) - /// # } - /// ``` - pub fn blocking_list(&self) -> Result { - if !validate_path(self.path(), EntryMode::DIR) { - return Err(Error::new( - ErrorKind::ObjectNotADirectory, - "the path trying to list is not a directory", - ) - .with_operation("Object::blocking_list") - .with_context("service", self.accessor().metadata().scheme().into_static()) - .with_context("path", self.path())); - } - - let (_, pager) = self.acc.blocking_list(self.path(), OpList::new())?; - Ok(BlockingLister::new(self.acc.clone(), pager)) - } - - /// List dir in flat way. - /// - /// This function will create a new handle to list objects. - /// - /// An error will be returned if object path doesn't end with `/`. - /// - /// # Examples - /// - /// ```no_run - /// # use anyhow::Result; - /// # use futures::io; - /// # use opendal::Operator; - /// # use opendal::EntryMode; - /// # use futures::TryStreamExt; - /// # - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let o = op.object("path/to/dir/"); - /// let mut ds = o.scan().await?; - /// while let Some(mut de) = ds.try_next().await? { - /// let meta = de - /// .metadata({ - /// use opendal::Metakey::*; - /// Mode - /// }) - /// .await?; - /// match meta.mode() { - /// EntryMode::FILE => { - /// println!("Handling file") - /// } - /// EntryMode::DIR => { - /// println!("Handling dir like start a new list via meta.path()") - /// } - /// EntryMode::Unknown => continue, - /// } - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn scan(&self) -> Result { - if !validate_path(self.path(), EntryMode::DIR) { - return Err(Error::new( - ErrorKind::ObjectNotADirectory, - "the path trying to list is not a directory", - ) - .with_operation("Object::list") - .with_context("service", self.accessor().metadata().scheme().into_static()) - .with_context("path", self.path())); - } - - let (_, pager) = self.acc.scan(self.path(), OpScan::new()).await?; - - Ok(Lister::new(self.acc.clone(), pager)) - } - - /// List dir in flat way. - /// - /// This function will create a new handle to list objects. - /// - /// An error will be returned if object path doesn't end with `/`. - /// - /// # Examples - /// - /// ```no_run - /// # use opendal::Result; - /// # use futures::io; - /// # use opendal::Operator; - /// # use opendal::EntryMode; - /// # fn test(op: Operator) -> Result<()> { - /// let o = op.object("path/to/dir/"); - /// let mut ds = o.blocking_list()?; - /// while let Some(mut de) = ds.next() { - /// let meta = de?.blocking_metadata({ - /// use opendal::Metakey::*; - /// Mode - /// })?; - /// match meta.mode() { - /// EntryMode::FILE => { - /// println!("Handling file") - /// } - /// EntryMode::DIR => { - /// println!("Handling dir like start a new list via meta.path()") - /// } - /// EntryMode::Unknown => continue, - /// } - /// } - /// # Ok(()) - /// # } - /// ``` - pub fn blocking_scan(&self) -> Result { - if !validate_path(self.path(), EntryMode::DIR) { - return Err(Error::new( - ErrorKind::ObjectNotADirectory, - "the path trying to list is not a directory", - ) - .with_operation("Object::blocking_scan") - .with_context("service", self.accessor().metadata().scheme().into_static()) - .with_context("path", self.path())); - } - - let (_, pager) = self.acc.blocking_scan(self.path(), OpScan::new())?; - Ok(BlockingLister::new(self.acc.clone(), pager)) - } - - /// Get current object's metadata **without cache** directly. - /// - /// # Notes - /// - /// Use `stat` if you: - /// - /// - Want detect the outside changes of object. - /// - Don't want to read from cached object metadata. - /// - /// You may want to use `metadata` if you are working with objects - /// returned by [`Lister`]. It's highly possible that metadata - /// you want has already been cached. - /// - /// # Examples - /// - /// ``` - /// # use anyhow::Result; - /// # use futures::io; - /// # use opendal::Operator; - /// use opendal::ErrorKind; - /// # - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// if let Err(e) = op.object("test").stat().await { - /// if e.kind() == ErrorKind::ObjectNotFound { - /// println!("object not exist") - /// } - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn stat(&self) -> Result { - let rp = self.acc.stat(self.path(), OpStat::new()).await?; - let meta = rp.into_metadata(); - - Ok(meta) - } - - /// Get current object's metadata **without cache** directly. - /// - /// # Notes - /// - /// Use `stat` if you: - /// - /// - Want detect the outside changes of object. - /// - Don't want to read from cached object metadata. - /// - /// You may want to use `metadata` if you are working with objects - /// returned by [`Lister`]. It's highly possible that metadata - /// you want has already been cached. - /// - /// # Examples - /// - /// ``` - /// # use anyhow::Result; - /// # use futures::io; - /// # use opendal::Operator; - /// use opendal::ErrorKind; - /// # - /// # fn test(op: Operator) -> Result<()> { - /// if let Err(e) = op.object("test").blocking_stat() { - /// if e.kind() == ErrorKind::ObjectNotFound { - /// println!("object not exist") - /// } - /// } - /// # Ok(()) - /// # } - /// ``` - pub fn blocking_stat(&self) -> Result { - let rp = self.acc.blocking_stat(self.path(), OpStat::new())?; - let meta = rp.into_metadata(); - - Ok(meta) - } - - /// Get current object's metadata with cache. - /// - /// `metadata` will check the given query with already cached metadata - /// first. And query from storage if not found. - /// - /// # Notes - /// - /// Use `metadata` if you are working with objects returned by - /// [`Lister`]. It's highly possible that metadata you want - /// has already been cached. - /// - /// You may want to use `stat`, if you: - /// - /// - Want detect the outside changes of object. - /// - Don't want to read from cached object metadata. - /// - /// # Behavior - /// - /// Visiting not fetched metadata will lead to panic in debug build. - /// It must be a bug, please fix it instead. - /// - /// # Examples - /// - /// ## Query already cached metadata - /// - /// By query metadata with `None`, we can only query in-memory metadata - /// cache. In this way, we can make sure that no API call will send. - /// - /// ``` - /// # use anyhow::Result; - /// # use opendal::Operator; - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let meta = op.object("test").metadata(None).await?; - /// // content length COULD be correct. - /// let _ = meta.content_length(); - /// // etag COULD be correct. - /// let _ = meta.etag(); - /// # Ok(()) - /// # } - /// ``` - /// - /// ## Query content length and content type - /// - /// ``` - /// # use anyhow::Result; - /// # use opendal::Operator; - /// use opendal::Metakey; - /// - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let meta = op - /// .object("test") - /// .metadata({ - /// use Metakey::*; - /// ContentLength | ContentType - /// }) - /// .await?; - /// // content length MUST be correct. - /// let _ = meta.content_length(); - /// // etag COULD be correct. - /// let _ = meta.etag(); - /// # Ok(()) - /// # } - /// ``` - /// - /// ## Query all metadata - /// - /// By query metadata with `Complete`, we can make sure that we have fetched all metadata of this object. - /// - /// ``` - /// # use anyhow::Result; - /// # use opendal::Operator; - /// use opendal::Metakey; - /// - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let meta = op.object("test").metadata({ Metakey::Complete }).await?; - /// // content length MUST be correct. - /// let _ = meta.content_length(); - /// // etag MUST be correct. - /// let _ = meta.etag(); - /// # Ok(()) - /// # } - /// ``` - pub async fn metadata(&self, flags: impl Into>) -> Result> { - if let Some(meta) = &self.meta { - if meta.bit().contains(flags) || meta.bit().contains(Metakey::Complete) { - return Ok(meta.clone()); - } - } - - let meta = Arc::new(self.stat().await?); - Ok(meta) - } - - /// Get current object's metadata with cache in blocking way. - /// - /// `metadata` will check the given query with already cached metadata - /// first. And query from storage if not found. - /// - /// # Notes - /// - /// Use `metadata` if you are working with objects returned by - /// [`Lister`]. It's highly possible that metadata you want - /// has already been cached. - /// - /// You may want to use `stat`, if you: - /// - /// - Want detect the outside changes of object. - /// - Don't want to read from cached object metadata. - /// - /// # Behavior - /// - /// Visiting not fetched metadata will lead to panic in debug build. - /// It must be a bug, please fix it instead. - /// - /// # Examples - /// - /// ## Query already cached metadata - /// - /// By query metadata with `None`, we can only query in-memory metadata - /// cache. In this way, we can make sure that no API call will send. - /// - /// ``` - /// # use anyhow::Result; - /// # use opendal::Operator; - /// # fn test(op: Operator) -> Result<()> { - /// let meta = op.object("test").blocking_metadata(None)?; - /// // content length COULD be correct. - /// let _ = meta.content_length(); - /// // etag COULD be correct. - /// let _ = meta.etag(); - /// # Ok(()) - /// # } - /// ``` - /// - /// ## Query content length and content type - /// - /// ``` - /// # use anyhow::Result; - /// # use opendal::Operator; - /// use opendal::Metakey; - /// - /// # fn test(op: Operator) -> Result<()> { - /// let meta = op.object("test").blocking_metadata({ - /// use Metakey::*; - /// ContentLength | ContentType - /// })?; - /// // content length MUST be correct. - /// let _ = meta.content_length(); - /// // etag COULD be correct. - /// let _ = meta.etag(); - /// # Ok(()) - /// # } - /// ``` - /// - /// ## Query all metadata - /// - /// By query metadata with `Complete`, we can make sure that we have fetched all metadata of this object. - /// - /// ``` - /// # use anyhow::Result; - /// # use opendal::Operator; - /// use opendal::Metakey; - /// - /// # fn test(op: Operator) -> Result<()> { - /// let meta = op.object("test").blocking_metadata({ Metakey::Complete })?; - /// // content length MUST be correct. - /// let _ = meta.content_length(); - /// // etag MUST be correct. - /// let _ = meta.etag(); - /// # Ok(()) - /// # } - /// ``` - pub fn blocking_metadata(&self, flags: impl Into>) -> Result> { - if let Some(meta) = &self.meta { - if meta.bit().contains(flags) || meta.bit().contains(Metakey::Complete) { - return Ok(meta.clone()); - } - } - - let meta = Arc::new(self.blocking_stat()?); - Ok(meta) - } - - /// Check if this object exists or not. - /// - /// # Example - /// - /// ``` - /// use anyhow::Result; - /// use futures::io; - /// use opendal::Operator; - /// - /// #[tokio::main] - /// async fn test(op: Operator) -> Result<()> { - /// let _ = op.object("test").is_exist().await?; - /// - /// Ok(()) - /// } - /// ``` - pub async fn is_exist(&self) -> Result { - let r = self.stat().await; - match r { - Ok(_) => Ok(true), - Err(err) => match err.kind() { - ErrorKind::ObjectNotFound => Ok(false), - _ => Err(err), - }, - } - } - - /// Check if this object exists or not. - /// - /// # Example - /// - /// ```no_run - /// use anyhow::Result; - /// use opendal::Operator; - /// fn test(op: Operator) -> Result<()> { - /// let _ = op.object("test").blocking_is_exist()?; - /// - /// Ok(()) - /// } - /// ``` - pub fn blocking_is_exist(&self) -> Result { - let r = self.blocking_stat(); - match r { - Ok(_) => Ok(true), - Err(err) => match err.kind() { - ErrorKind::ObjectNotFound => Ok(false), - _ => Err(err), - }, - } - } - - /// Presign an operation for stat(head). - /// - /// # Example - /// - /// ```no_run - /// use anyhow::Result; - /// use futures::io; - /// use opendal::Operator; - /// use time::Duration; - /// - /// #[tokio::main] - /// async fn test(op: Operator) -> Result<()> { - /// let signed_req = op.object("test").presign_stat(Duration::hours(1))?; - /// let req = http::Request::builder() - /// .method(signed_req.method()) - /// .uri(signed_req.uri()) - /// .body(())?; - /// - /// # Ok(()) - /// # } - /// ``` - pub fn presign_stat(&self, expire: Duration) -> Result { - let op = OpPresign::new(OpStat::new(), expire); - - let rp = self.acc.presign(self.path(), op)?; - Ok(rp.into_presigned_request()) - } - - /// Presign an operation for read. - /// - /// # Example - /// - /// ```no_run - /// use anyhow::Result; - /// use futures::io; - /// use opendal::Operator; - /// use time::Duration; - /// - /// #[tokio::main] - /// async fn test(op: Operator) -> Result<()> { - /// let signed_req = op.object("test.txt").presign_read(Duration::hours(1))?; - /// # Ok(()) - /// # } - /// ``` - /// - /// - `signed_req.method()`: `GET` - /// - `signed_req.uri()`: `https://s3.amazonaws.com/examplebucket/test.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=access_key_id/20130721/us-east-1/s3/aws4_request&X-Amz-Date=20130721T201207Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=` - /// - `signed_req.headers()`: `{ "host": "s3.amazonaws.com" }` - /// - /// We can download this object via `curl` or other tools without credentials: - /// - /// ```shell - /// curl "https://s3.amazonaws.com/examplebucket/test.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=access_key_id/20130721/us-east-1/s3/aws4_request&X-Amz-Date=20130721T201207Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=" -O /tmp/test.txt - /// ``` - pub fn presign_read(&self, expire: Duration) -> Result { - let op = OpPresign::new(OpRead::new(), expire); - - let rp = self.acc.presign(self.path(), op)?; - Ok(rp.into_presigned_request()) - } - - /// Presign an operation for write. - /// - /// # Example - /// - /// ```no_run - /// use anyhow::Result; - /// use futures::io; - /// use opendal::Operator; - /// use time::Duration; - /// - /// #[tokio::main] - /// async fn test(op: Operator) -> Result<()> { - /// let signed_req = op.object("test.txt").presign_write(Duration::hours(1))?; - /// # Ok(()) - /// # } - /// ``` - /// - /// - `signed_req.method()`: `PUT` - /// - `signed_req.uri()`: `https://s3.amazonaws.com/examplebucket/test.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=access_key_id/20130721/us-east-1/s3/aws4_request&X-Amz-Date=20130721T201207Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=` - /// - `signed_req.headers()`: `{ "host": "s3.amazonaws.com" }` - /// - /// We can upload file as this object via `curl` or other tools without credential: - /// - /// ```shell - /// curl -X PUT "https://s3.amazonaws.com/examplebucket/test.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=access_key_id/20130721/us-east-1/s3/aws4_request&X-Amz-Date=20130721T201207Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=" -d "Hello, World!" - /// ``` - pub fn presign_write(&self, expire: Duration) -> Result { - self.presign_write_with(OpWrite::new(), expire) - } - - /// Presign an operation for write with option described in OpenDAL [rfc-0661](../../docs/rfcs/0661-path-in-accessor.md) - /// - /// You can pass `OpWrite` to this method to specify the content length and content type. - /// - /// # Example - /// - /// ```no_run - /// use anyhow::Result; - /// use futures::io; - /// use opendal::ops::OpWrite; - /// use opendal::Operator; - /// use time::Duration; - /// - /// #[tokio::main] - /// async fn test(op: Operator) -> Result<()> { - /// let args = OpWrite::new().with_content_type("text/csv"); - /// let signed_req = op.object("test").presign_write_with(args, Duration::hours(1))?; - /// let req = http::Request::builder() - /// .method(signed_req.method()) - /// .uri(signed_req.uri()) - /// .body(())?; - /// - /// # Ok(()) - /// # } - /// ``` - pub fn presign_write_with(&self, op: OpWrite, expire: Duration) -> Result { - let op = OpPresign::new(op, expire); - - let rp = self.acc.presign(self.path(), op)?; - Ok(rp.into_presigned_request()) - } -} diff --git a/src/types/operator/api_async.rs b/src/types/operator/api_async.rs deleted file mode 100644 index 874fbbad7be..00000000000 --- a/src/types/operator/api_async.rs +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2022 Datafuse Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::ops::RangeBounds; - -use futures::AsyncReadExt; -use futures::StreamExt; -use tokio::io::ReadBuf; - -use crate::ops::*; -use crate::raw::*; -use crate::*; - -/// Operato async API. -impl Operator { - /// Check if this operator can work correctly. - /// - /// We will send a `list` request to path and return any errors we met. - /// - /// ``` - /// # use std::sync::Arc; - /// # use anyhow::Result; - /// use opendal::Operator; - /// - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// op.check().await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn check(&self) -> Result<()> { - let mut ds = self.object("/").list().await?; - - match ds.next().await { - Some(Err(e)) if e.kind() != ErrorKind::ObjectNotFound => Err(e), - _ => Ok(()), - } - } - - /// Read the whole object into a bytes. - /// - /// This function will allocate a new bytes internally. For more precise memory control or - /// reading data lazily, please use [`Object::reader`] - /// - /// # Examples - /// - /// ``` - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::TryStreamExt; - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let mut o = op.object("path/to/file"); - /// # o.write(vec![0; 4096]).await?; - /// let bs = o.read().await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn read(&self, path: &str) -> Result> { - self.range_read(path, ..).await - } - - /// Read the specified range of object into a bytes. - /// - /// This function will allocate a new bytes internally. For more precise memory control or - /// reading data lazily, please use [`Object::range_reader`] - /// - /// # Notes - /// - /// - The returning content's length may be smaller than the range specified. - /// - /// # Examples - /// - /// ``` - /// # use std::io::Result; - /// # use opendal::Operator; - /// # use futures::TryStreamExt; - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let mut o = op.object("path/to/file"); - /// # o.write(vec![0; 4096]).await?; - /// let bs = o.range_read(1024..2048).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn range_read(&self, path: &str, range: impl RangeBounds) -> Result> { - let path = normalize_path(path); - - if !validate_path(&path, EntryMode::FILE) { - return Err( - Error::new(ErrorKind::ObjectIsADirectory, "read path is a directory") - .with_operation("range_read") - .with_context("service", self.inner().metadata().scheme()) - .with_context("path", &path), - ); - } - - let br = BytesRange::from(range); - - let op = OpRead::new().with_range(br); - - let (rp, mut s) = self.inner().read(&path, op).await?; - - let length = rp.into_metadata().content_length() as usize; - let mut buffer = Vec::with_capacity(length); - - let dst = buffer.spare_capacity_mut(); - let mut buf = ReadBuf::uninit(dst); - - // Safety: the input buffer is created with_capacity(length). - unsafe { buf.assume_init(length) }; - - // TODO: use native read api - s.read_exact(buf.initialized_mut()).await.map_err(|err| { - Error::new(ErrorKind::Unexpected, "read from storage") - .with_operation("range_read") - .with_context("service", self.inner().metadata().scheme().into_static()) - .with_context("path", &path) - .with_context("range", br.to_string()) - .set_source(err) - })?; - - // Safety: read_exact makes sure this buffer has been filled. - unsafe { buffer.set_len(length) } - - Ok(buffer) - } -} diff --git a/src/types/operator/batch.rs b/src/types/operator/batch.rs deleted file mode 100644 index cd4a2421034..00000000000 --- a/src/types/operator/batch.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2022 Datafuse Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use futures::stream; -use futures::Stream; -use futures::StreamExt; -use futures::TryStreamExt; - -use crate::ops::*; -use crate::raw::*; -use crate::*; - -/// BatchOperator is used to take batch operations like `remove_all`. -/// -/// # Examples -/// -/// ``` -/// # use anyhow::Result; -/// # use futures::io; -/// # use opendal::Operator; -/// # -/// # #[tokio::main] -/// # async fn test(op: Operator) -> Result<()> { -/// op.batch() -/// .with_limit(1000) -/// .remove_all("dir/to/delete") -/// .await?; -/// # Ok(()) -/// # } -/// ``` -#[derive(Clone, Debug)] -pub struct BatchOperator { - src: Operator, - meta: OperatorMetadata, - - limit: usize, -} - -impl BatchOperator { - pub(crate) fn new(op: Operator) -> Self { - let meta = op.metadata(); - - BatchOperator { - src: op, - meta, - limit: 1000, - } - } - - /// Specify the batch limit. - /// - /// Default: 1000 - pub fn with_limit(mut self, limit: usize) -> Self { - self.limit = limit; - self - } - - /// remove will given paths. - /// - /// # Notes - /// - /// If underlying services support delete in batch, we will use batch - /// delete instead. - /// - /// # Examples - /// - /// ``` - /// # use anyhow::Result; - /// # use futures::io; - /// # use opendal::Operator; - /// # - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// op.batch() - /// .remove(vec!["abc".to_string(), "def".to_string()]) - /// .await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn remove(&self, paths: Vec) -> Result<()> { - self.remove_via(stream::iter(paths)).await - } - - /// remove_via will remove objects via given stream. - /// - /// We will delete by chunks with given batch limit on the stream. - /// - /// # Notes - /// - /// If underlying services support delete in batch, we will use batch - /// delete instead. - /// - /// # Examples - /// - /// ``` - /// # use anyhow::Result; - /// # use futures::io; - /// # use opendal::Operator; - /// use futures::stream; - /// # - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// let stream = stream::iter(vec!["abc".to_string(), "def".to_string()]); - /// op.batch().remove_via(stream).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn remove_via(&self, mut input: impl Stream + Unpin) -> Result<()> { - if self.meta.can_batch() { - let mut input = input.map(|v| (v, OpDelete::default())).chunks(self.limit); - - while let Some(batches) = input.next().await { - let results = self - .src - .inner() - .batch(OpBatch::new(BatchOperations::Delete(batches))) - .await?; - - let BatchedResults::Delete(results) = results.into_results(); - - // TODO: return error here directly seems not a good idea? - for (_, result) in results { - let _ = result?; - } - } - } else { - while let Some(path) = input.next().await { - self.src.inner().delete(&path, OpDelete::default()).await?; - } - } - - Ok(()) - } - - /// Remove the path and all nested dirs and files recursively. - /// - /// # Notes - /// - /// If underlying services support delete in batch, we will use batch - /// delete instead. - /// - /// # Examples - /// - /// ``` - /// # use anyhow::Result; - /// # use futures::io; - /// # use opendal::Operator; - /// # - /// # #[tokio::main] - /// # async fn test(op: Operator) -> Result<()> { - /// op.batch().remove_all("path/to/dir").await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn remove_all(&self, path: &str) -> Result<()> { - let parent = self.src.object(path); - let meta = parent.stat().await?; - - if meta.mode() != EntryMode::DIR { - return parent.delete().await; - } - - let obs = parent.scan().await?; - - if self.meta.can_batch() { - let mut obs = obs.try_chunks(self.limit); - - while let Some(batches) = obs.next().await { - let batches = batches - .map_err(|err| err.1)? - .into_iter() - .map(|v| (v.path().to_string(), OpDelete::default())) - .collect(); - - let results = self - .src - .inner() - .batch(OpBatch::new(BatchOperations::Delete(batches))) - .await?; - - let BatchedResults::Delete(results) = results.into_results(); - - // TODO: return error here directly seems not a good idea? - for (_, result) in results { - let _ = result?; - } - } - } else { - obs.try_for_each(|v| async move { v.delete().await }) - .await?; - } - - Ok(()) - } -} diff --git a/src/types/operator/blocking_operator.rs b/src/types/operator/blocking_operator.rs new file mode 100644 index 00000000000..2d8246499ee --- /dev/null +++ b/src/types/operator/blocking_operator.rs @@ -0,0 +1,651 @@ +// Copyright 2022 Datafuse Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::io::Read; +use std::ops::RangeBounds; + +use bytes::Bytes; +use flagset::FlagSet; + +use crate::ops::*; +use crate::raw::*; +use crate::*; + +/// BlockingOperator is the entry for all public blocking APIs. +/// +/// Read [`concepts`][docs::concepts] for know more about [`Operator`]. +/// +/// # Examples +/// +/// Read more backend init examples in [`services`] +/// +/// ``` +/// # use anyhow::Result; +/// use opendal::services::Fs; +/// use opendal::BlockingOperator; +/// use opendal::Operator; +/// #[tokio::main] +/// async fn main() -> Result<()> { +/// // Create fs backend builder. +/// let mut builder = Fs::default(); +/// // Set the root for fs, all operations will happen under this root. +/// // +/// // NOTE: the root must be absolute path. +/// builder.root("/tmp"); +/// +/// // Build an `BlockingOperator` to start operating the storage. +/// let _: BlockingOperator = Operator::new(builder)?.finish().blocking(); +/// +/// Ok(()) +/// } +/// ``` +#[derive(Clone, Debug)] +pub struct BlockingOperator { + accessor: FusedAccessor, + + limit: usize, +} + +impl BlockingOperator { + pub(super) fn inner(&self) -> &FusedAccessor { + &self.accessor + } + + pub(crate) fn from_inner(accessor: FusedAccessor) -> Self { + Self { + accessor, + limit: 1000, + } + } + + /// Get current operator's limit + pub fn limit(&self) -> usize { + self.limit + } + + /// Specify the batch limit. + /// + /// Default: 1000 + pub fn with_limit(&self, limit: usize) -> Self { + let mut op = self.clone(); + op.limit = limit; + op + } + + /// Get information of underlying accessor. + /// + /// # Examples + /// + /// ``` + /// # use std::sync::Arc; + /// # use anyhow::Result; + /// use opendal::BlockingOperator; + /// + /// # #[tokio::main] + /// # async fn test(op: BlockingOperator) -> Result<()> { + /// let info = op.info(); + /// # Ok(()) + /// # } + /// ``` + pub fn info(&self) -> OperatorInfo { + OperatorInfo::new(self.accessor.info()) + } +} + +/// # Operator blocking API. +impl BlockingOperator { + /// Get current object's metadata **without cache** directly. + /// + /// # Notes + /// + /// Use `stat` if you: + /// + /// - Want detect the outside changes of object. + /// - Don't want to read from cached object metadata. + /// + /// You may want to use `metadata` if you are working with objects + /// returned by [`Lister`]. It's highly possible that metadata + /// you want has already been cached. + /// + /// # Examples + /// + /// ``` + /// # use anyhow::Result; + /// # use futures::io; + /// # use opendal::BlockingOperator; + /// use opendal::ErrorKind; + /// # + /// # fn test(op: BlockingOperator) -> Result<()> { + /// if let Err(e) = op.stat("test") { + /// if e.kind() == ErrorKind::NotFound { + /// println!("object not exist") + /// } + /// } + /// # Ok(()) + /// # } + /// ``` + pub fn stat(&self, path: &str) -> Result { + let path = normalize_path(path); + + let rp = self.inner().blocking_stat(&path, OpStat::new())?; + let meta = rp.into_metadata(); + + Ok(meta) + } + + /// Get current object's metadata with cache in blocking way. + /// + /// `metadata` will check the given query with already cached metadata + /// first. And query from storage if not found. + /// + /// # Notes + /// + /// Use `metadata` if you are working with objects returned by + /// [`Lister`]. It's highly possible that metadata you want + /// has already been cached. + /// + /// You may want to use `stat`, if you: + /// + /// - Want detect the outside changes of object. + /// - Don't want to read from cached object metadata. + /// + /// # Behavior + /// + /// Visiting not fetched metadata will lead to panic in debug build. + /// It must be a bug, please fix it instead. + /// + /// # Examples + /// + /// ## Query already cached metadata + /// + /// By query metadata with `None`, we can only query in-memory metadata + /// cache. In this way, we can make sure that no API call will send. + /// + /// ``` + /// # use anyhow::Result; + /// # use opendal::BlockingOperator; + /// use opendal::Entry; + /// + /// # fn test(op: BlockingOperator, entry: Entry) -> Result<()> { + /// let meta = op.metadata(&entry, None)?; + /// // content length COULD be correct. + /// let _ = meta.content_length(); + /// // etag COULD be correct. + /// let _ = meta.etag(); + /// # Ok(()) + /// # } + /// ``` + /// + /// ## Query content length and content type + /// + /// ``` + /// # use anyhow::Result; + /// # use opendal::BlockingOperator; + /// use opendal::Entry; + /// use opendal::Metakey; + /// + /// # fn test(op: BlockingOperator, entry: Entry) -> Result<()> { + /// let meta = op.metadata(&entry, { Metakey::ContentLength | Metakey::ContentType })?; + /// // content length MUST be correct. + /// let _ = meta.content_length(); + /// // etag COULD be correct. + /// let _ = meta.etag(); + /// # Ok(()) + /// # } + /// ``` + /// + /// ## Query all metadata + /// + /// By query metadata with `Complete`, we can make sure that we have fetched all metadata of this object. + /// + /// ``` + /// # use anyhow::Result; + /// # use opendal::BlockingOperator; + /// use opendal::Entry; + /// use opendal::Metakey; + /// + /// # fn test(op: BlockingOperator, entry: Entry) -> Result<()> { + /// let meta = op.metadata(&entry, { Metakey::Complete })?; + /// // content length MUST be correct. + /// let _ = meta.content_length(); + /// // etag MUST be correct. + /// let _ = meta.etag(); + /// # Ok(()) + /// # } + /// ``` + pub fn metadata(&self, entry: &Entry, flags: impl Into>) -> Result { + let meta = entry.metadata(); + if meta.bit().contains(flags) || meta.bit().contains(Metakey::Complete) { + return Ok(meta.clone()); + } + + let meta = self.stat(entry.path())?; + Ok(meta) + } + + /// Check if this object exists or not. + /// + /// # Example + /// + /// ```no_run + /// use anyhow::Result; + /// use opendal::BlockingOperator; + /// fn test(op: BlockingOperator) -> Result<()> { + /// let _ = op.is_exist("test")?; + /// + /// Ok(()) + /// } + /// ``` + pub fn is_exist(&self, path: &str) -> Result { + let r = self.stat(path); + match r { + Ok(_) => Ok(true), + Err(err) => match err.kind() { + ErrorKind::NotFound => Ok(false), + _ => Err(err), + }, + } + } + + /// Create an empty object, like using the following linux commands: + /// + /// - `touch path/to/file` + /// - `mkdir path/to/dir/` + /// + /// # Behavior + /// + /// - Create on existing dir will succeed. + /// - Create on existing file will overwrite and truncate it. + /// + /// # Examples + /// + /// ## Create an empty file + /// + /// ```no_run + /// # use std::io::Result; + /// # use opendal::BlockingOperator; + /// # use futures::TryStreamExt; + /// # fn test(op: BlockingOperator) -> Result<()> { + /// op.create("path/to/file")?; + /// # Ok(()) + /// # } + /// ``` + /// + /// ## Create a dir + /// + /// ```no_run + /// # use std::io::Result; + /// # use opendal::BlockingOperator; + /// # use futures::TryStreamExt; + /// # async fn test(op: BlockingOperator) -> Result<()> { + /// op.create("path/to/dir/")?; + /// # Ok(()) + /// # } + /// ``` + pub fn create(&self, path: &str) -> Result<()> { + let path = normalize_path(path); + + if path.ends_with('/') { + self.inner() + .blocking_create(&path, OpCreate::new(EntryMode::DIR))?; + } else { + self.inner() + .blocking_create(&path, OpCreate::new(EntryMode::FILE))?; + }; + + Ok(()) + } + + /// Read the whole object into a bytes. + /// + /// This function will allocate a new bytes internally. For more precise memory control or + /// reading data lazily, please use [`BlockingOperator::reader`] + /// + /// # Examples + /// + /// ```no_run + /// # use std::io::Result; + /// # use opendal::BlockingOperator; + /// # + /// # fn test(op: BlockingOperator) -> Result<()> { + /// let bs = op.read("path/to/file")?; + /// # Ok(()) + /// # } + /// ``` + pub fn read(&self, path: &str) -> Result> { + self.range_read(path, ..) + } + + /// Read the specified range of object into a bytes. + /// + /// This function will allocate a new bytes internally. For more precise memory control or + /// reading data lazily, please use [`BlockingOperator::range_reader`] + /// + /// # Examples + /// + /// ```no_run + /// # use std::io::Result; + /// # use opendal::BlockingOperator; + /// # use futures::TryStreamExt; + /// # use opendal::Scheme; + /// # async fn test(op: BlockingOperator) -> Result<()> { + /// let bs = op.range_read("path/to/file", 1024..2048)?; + /// # Ok(()) + /// # } + /// ``` + pub fn range_read(&self, path: &str, range: impl RangeBounds) -> Result> { + let path = normalize_path(path); + + if !validate_path(&path, EntryMode::FILE) { + return Err( + Error::new(ErrorKind::IsADirectory, "read path is a directory") + .with_operation("Object::blocking_range_read") + .with_context("service", self.info().scheme().into_static()) + .with_context("path", &path), + ); + } + + let br = BytesRange::from(range); + let (rp, mut s) = self + .inner() + .blocking_read(&path, OpRead::new().with_range(br))?; + + let mut buffer = Vec::with_capacity(rp.into_metadata().content_length() as usize); + s.read_to_end(&mut buffer).map_err(|err| { + Error::new(ErrorKind::Unexpected, "blocking range read failed") + .with_operation("Object::blocking_range_read") + .with_context("service", self.info().scheme().into_static()) + .with_context("path", path) + .with_context("range", br.to_string()) + .set_source(err) + })?; + + Ok(buffer) + } + + /// Create a new reader which can read the whole object. + /// + /// # Examples + /// + /// ```no_run + /// # use std::io::Result; + /// # use opendal::BlockingOperator; + /// # use futures::TryStreamExt; + /// # fn test(op: BlockingOperator) -> Result<()> { + /// let r = op.reader("path/to/file")?; + /// # Ok(()) + /// # } + /// ``` + pub fn reader(&self, path: &str) -> Result { + self.range_reader(path, ..) + } + + /// Create a new reader which can read the specified range. + /// + /// # Examples + /// + /// ```no_run + /// # use std::io::Result; + /// # use opendal::BlockingOperator; + /// # use futures::TryStreamExt; + /// # fn test(op: BlockingOperator) -> Result<()> { + /// let r = op.range_reader("path/to/file", 1024..2048)?; + /// # Ok(()) + /// # } + /// ``` + pub fn range_reader(&self, path: &str, range: impl RangeBounds) -> Result { + let path = normalize_path(path); + + if !validate_path(&path, EntryMode::FILE) { + return Err( + Error::new(ErrorKind::IsADirectory, "read path is a directory") + .with_operation("Object::blocking_range_reader") + .with_context("service", self.info().scheme().into_static()) + .with_context("path", &path), + ); + } + + let op = OpRead::new().with_range(range.into()); + + BlockingReader::create(self.inner().clone(), &path, op) + } + + /// Write bytes into object. + /// + /// # Notes + /// + /// - Write will make sure all bytes has been written, or an error will be returned. + /// + /// # Examples + /// + /// ```no_run + /// # use std::io::Result; + /// # use opendal::BlockingOperator; + /// # use futures::StreamExt; + /// # use futures::SinkExt; + /// use bytes::Bytes; + /// + /// # fn test(op: BlockingOperator) -> Result<()> { + /// op.write("path/to/file", vec![0; 4096])?; + /// # Ok(()) + /// # } + /// ``` + pub fn write(&self, path: &str, bs: impl Into) -> Result<()> { + self.write_with(path, OpWrite::new(), bs) + } + + /// Write data with option described in OpenDAL [rfc-0661](../../docs/rfcs/0661-path-in-accessor.md) + /// + /// # Notes + /// + /// - Write will make sure all bytes has been written, or an error will be returned. + /// + /// # Examples + /// + /// ```no_run + /// # use opendal::Result; + /// # use opendal::BlockingOperator; + /// use bytes::Bytes; + /// use opendal::ops::OpWrite; + /// + /// # async fn test(op: BlockingOperator) -> Result<()> { + /// let bs = b"hello, world!".to_vec(); + /// let ow = OpWrite::new().with_content_type("text/plain"); + /// let _ = op.write_with("hello.txt", ow, bs)?; + /// # Ok(()) + /// # } + /// ``` + pub fn write_with(&self, path: &str, args: OpWrite, bs: impl Into) -> Result<()> { + let path = normalize_path(path); + + if !validate_path(&path, EntryMode::FILE) { + return Err( + Error::new(ErrorKind::IsADirectory, "write path is a directory") + .with_operation("Object::blocking_write_with") + .with_context("service", self.info().scheme().into_static()) + .with_context("path", &path), + ); + } + + let (_, mut w) = self.inner().blocking_write(&path, args)?; + w.write(bs.into())?; + w.close()?; + + Ok(()) + } + + /// Write multiple bytes into object. + /// + /// # Notes + /// + /// - Write will make sure all bytes has been written, or an error will be returned. + /// + /// # Examples + /// + /// ```no_run + /// # use std::io::Result; + /// # use opendal::BlockingOperator; + /// # use futures::StreamExt; + /// # use futures::SinkExt; + /// use bytes::Bytes; + /// + /// # fn test(op: BlockingOperator) -> Result<()> { + /// let mut w = op.writer("path/to/file")?; + /// w.append(vec![0; 4096])?; + /// w.append(vec![1; 4096])?; + /// w.close()?; + /// # Ok(()) + /// # } + /// ``` + pub fn writer(&self, path: &str) -> Result { + let path = normalize_path(path); + + if !validate_path(&path, EntryMode::FILE) { + return Err( + Error::new(ErrorKind::IsADirectory, "write path is a directory") + .with_operation("Object::write_with") + .with_context("service", self.info().scheme().into_static()) + .with_context("path", &path), + ); + } + + let op = OpWrite::default().with_append(); + BlockingWriter::create(self.inner().clone(), &path, op) + } + + /// Delete object. + /// + /// # Notes + /// + /// - Delete not existing error won't return errors. + /// + /// # Examples + /// + /// ```no_run + /// # use anyhow::Result; + /// # use futures::io; + /// # use opendal::BlockingOperator; + /// # fn test(op: BlockingOperator) -> Result<()> { + /// op.delete("path/to/file")?; + /// # Ok(()) + /// # } + /// ``` + pub fn delete(&self, path: &str) -> Result<()> { + let path = normalize_path(path); + + let _ = self.inner().blocking_delete(&path, OpDelete::new())?; + + Ok(()) + } + + /// List current dir object. + /// + /// This function will create a new handle to list objects. + /// + /// An error will be returned if object path doesn't end with `/`. + /// + /// # Examples + /// + /// ```no_run + /// # use opendal::Result; + /// # use futures::io; + /// # use opendal::BlockingOperator; + /// # use opendal::EntryMode; + /// # fn test(op: BlockingOperator) -> Result<()> { + /// let mut ds = op.list("path/to/dir/")?; + /// while let Some(mut de) = ds.next() { + /// let meta = op.metadata(&de?, { + /// use opendal::Metakey::*; + /// Mode + /// })?; + /// match meta.mode() { + /// EntryMode::FILE => { + /// println!("Handling file") + /// } + /// EntryMode::DIR => { + /// println!("Handling dir like start a new list via de.path()") + /// } + /// EntryMode::Unknown => continue, + /// } + /// } + /// # Ok(()) + /// # } + /// ``` + pub fn list(&self, path: &str) -> Result { + let path = normalize_path(path); + + if !validate_path(&path, EntryMode::DIR) { + return Err(Error::new( + ErrorKind::NotADirectory, + "the path trying to list is not a directory", + ) + .with_operation("Object::blocking_list") + .with_context("service", self.info().scheme().into_static()) + .with_context("path", &path)); + } + + let (_, pager) = self.inner().blocking_list(&path, OpList::new())?; + Ok(BlockingLister::new(pager)) + } + + /// List dir in flat way. + /// + /// This function will create a new handle to list objects. + /// + /// An error will be returned if object path doesn't end with `/`. + /// + /// # Examples + /// + /// ```no_run + /// # use opendal::Result; + /// # use futures::io; + /// # use opendal::BlockingOperator; + /// # use opendal::EntryMode; + /// # fn test(op: BlockingOperator) -> Result<()> { + /// let mut ds = op.list("path/to/dir/")?; + /// while let Some(mut de) = ds.next() { + /// let meta = op.metadata(&de?, { + /// use opendal::Metakey::*; + /// Mode + /// })?; + /// match meta.mode() { + /// EntryMode::FILE => { + /// println!("Handling file") + /// } + /// EntryMode::DIR => { + /// println!("Handling dir like start a new list via meta.path()") + /// } + /// EntryMode::Unknown => continue, + /// } + /// } + /// # Ok(()) + /// # } + /// ``` + pub fn scan(&self, path: &str) -> Result { + let path = normalize_path(path); + + if !validate_path(&path, EntryMode::DIR) { + return Err(Error::new( + ErrorKind::NotADirectory, + "the path trying to list is not a directory", + ) + .with_operation("Object::blocking_scan") + .with_context("service", self.info().scheme().into_static()) + .with_context("path", path)); + } + + let (_, pager) = self.inner().blocking_scan(&path, OpScan::new())?; + Ok(BlockingLister::new(pager)) + } +} diff --git a/src/types/operator/builder.rs b/src/types/operator/builder.rs index 4179a874020..3d6060c65ca 100644 --- a/src/types/operator/builder.rs +++ b/src/types/operator/builder.rs @@ -37,49 +37,12 @@ use crate::*; /// builder.root("/tmp"); /// /// // Build an `Operator` to start operating the storage. -/// let op: Operator = Operator::create(builder)?.finish(); -/// -/// // Create an object handle to start operation on object. -/// let _ = op.object("test_file"); +/// let op: Operator = Operator::new(builder)?.finish(); /// /// Ok(()) /// } /// ``` impl Operator { - /// Create a new operator. - /// - /// # Examples - /// - /// Read more backend init examples in [examples](https://github.com/datafuselabs/opendal/tree/main/examples). - /// - /// ``` - /// # use anyhow::Result; - /// use opendal::services::Fs; - /// use opendal::Builder; - /// use opendal::Operator; - /// #[tokio::main] - /// async fn main() -> Result<()> { - /// // Create fs backend builder. - /// let mut builder = Fs::default(); - /// // Set the root for fs, all operations will happen under this root. - /// // - /// // NOTE: the root must be absolute path. - /// builder.root("/tmp"); - /// - /// // Build an `Operator` to start operating the storage. - /// let op: Operator = Operator::new(builder.build()?).finish(); - /// - /// // Create an object handle to start operation on object. - /// let _ = op.object("test_file"); - /// - /// Ok(()) - /// } - /// ``` - #[allow(clippy::new_ret_no_self)] - pub fn new(acc: A) -> OperatorBuilder { - OperatorBuilder::new(acc) - } - /// Create a new operator with input builder. /// /// OpenDAL will call `builder.build()` internally, so we don't need @@ -103,15 +66,13 @@ impl Operator { /// builder.root("/tmp"); /// /// // Build an `Operator` to start operating the storage. - /// let op: Operator = Operator::create(builder)?.finish(); - /// - /// // Create an object handle to start operation on object. - /// let _ = op.object("test_file"); + /// let op: Operator = Operator::new(builder)?.finish(); /// /// Ok(()) /// } /// ``` - pub fn create(mut ab: B) -> Result> { + #[allow(clippy::new_ret_no_self)] + pub fn new(mut ab: B) -> Result> { let acc = ab.build()?; Ok(OperatorBuilder::new(acc)) } @@ -136,9 +97,6 @@ impl Operator { /// // Build an `Operator` to start operating the storage. /// let op: Operator = Operator::from_map::(map)?.finish(); /// - /// // Create an object handle to start operation on object. - /// let _ = op.object("test_file"); - /// /// Ok(()) /// } /// ``` @@ -196,10 +154,10 @@ impl Operator { /// /// # #[tokio::main] /// # async fn main() -> Result<()> { - /// let op = Operator::create(Fs::default())?.finish(); + /// let op = Operator::new(Fs::default())?.finish(); /// let op = op.layer(LoggingLayer::default()); /// // All operations will go through the new_layer - /// let _ = op.object("test_file").read().await?; + /// let _ = op.read("test_file").await?; /// # Ok(()) /// # } /// ``` @@ -291,11 +249,11 @@ impl OperatorBuilder { /// /// # #[tokio::main] /// # async fn main() -> Result<()> { - /// let op = Operator::create(Fs::default())? + /// let op = Operator::new(Fs::default())? /// .layer(LoggingLayer::default()) /// .finish(); /// // All operations will go through the new_layer - /// let _ = op.object("test_file").read().await?; + /// let _ = op.read("test_file").await?; /// # Ok(()) /// # } /// ``` diff --git a/src/types/operator/metadata.rs b/src/types/operator/metadata.rs index 400d88c2d69..848f57f3231 100644 --- a/src/types/operator/metadata.rs +++ b/src/types/operator/metadata.rs @@ -17,11 +17,11 @@ use crate::*; /// Metadata for operator, users can use this metadata to get information of operator. #[derive(Clone, Debug, Default)] -pub struct OperatorMetadata(AccessorMetadata); +pub struct OperatorInfo(AccessorInfo); -impl OperatorMetadata { - pub(super) fn new(acc: AccessorMetadata) -> Self { - OperatorMetadata(acc) +impl OperatorInfo { + pub(super) fn new(acc: AccessorInfo) -> Self { + OperatorInfo(acc) } /// [`Scheme`] of operator. diff --git a/src/types/operator/mod.rs b/src/types/operator/mod.rs index b30ae49bccd..2abffd688e1 100644 --- a/src/types/operator/mod.rs +++ b/src/types/operator/mod.rs @@ -18,13 +18,11 @@ mod operator; pub use operator::Operator; +mod blocking_operator; +pub use blocking_operator::BlockingOperator; + mod builder; pub use builder::OperatorBuilder; mod metadata; -pub use metadata::OperatorMetadata; - -mod batch; -pub use batch::BatchOperator; - -mod api_async; +pub use metadata::OperatorInfo; diff --git a/src/types/operator/operator.rs b/src/types/operator/operator.rs index 01da2716031..9b700dd8bba 100644 --- a/src/types/operator/operator.rs +++ b/src/types/operator/operator.rs @@ -12,6 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::ops::RangeBounds; + +use bytes::Bytes; +use flagset::FlagSet; +use futures::stream; +use futures::AsyncReadExt; +use futures::Stream; +use futures::StreamExt; +use futures::TryStreamExt; +use time::Duration; +use tokio::io::ReadBuf; + +use super::BlockingOperator; +use crate::ops::*; use crate::raw::*; use crate::*; @@ -37,7 +51,7 @@ use crate::*; /// builder.root("/tmp"); /// /// // Build an `Operator` to start operating the storage. -/// let _: Operator = Operator::create(builder)?.finish(); +/// let _: Operator = Operator::new(builder)?.finish(); /// /// Ok(()) /// } @@ -45,6 +59,8 @@ use crate::*; #[derive(Clone, Debug)] pub struct Operator { accessor: FusedAccessor, + + limit: usize, } /// # Operator basic API. @@ -54,14 +70,31 @@ impl Operator { } pub(crate) fn from_inner(accessor: FusedAccessor) -> Self { - Self { accessor } + Self { + accessor, + limit: 1000, + } } pub(super) fn into_innter(self) -> FusedAccessor { self.accessor } - /// Get metadata of underlying accessor. + /// Get current operator's limit + pub fn limit(&self) -> usize { + self.limit + } + + /// Specify the batch limit. + /// + /// Default: 1000 + pub fn with_limit(&self, limit: usize) -> Self { + let mut op = self.clone(); + op.limit = limit; + op + } + + /// Get information of underlying accessor. /// /// # Examples /// @@ -72,22 +105,909 @@ impl Operator { /// /// # #[tokio::main] /// # async fn test(op: Operator) -> Result<()> { - /// let meta = op.metadata(); + /// let info = op.info(); + /// # Ok(()) + /// # } + /// ``` + pub fn info(&self) -> OperatorInfo { + OperatorInfo::new(self.accessor.info()) + } + + /// Create a new blocking operator. + /// + /// This operation is nearly no cost. + pub fn blocking(&self) -> BlockingOperator { + BlockingOperator::from_inner(self.accessor.clone()).with_limit(self.limit) + } +} + +/// Operato async API. +impl Operator { + /// Check if this operator can work correctly. + /// + /// We will send a `list` request to path and return any errors we met. + /// + /// ``` + /// # use std::sync::Arc; + /// # use anyhow::Result; + /// use opendal::Operator; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// op.check().await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn check(&self) -> Result<()> { + let mut ds = self.list("/").await?; + + match ds.next().await { + Some(Err(e)) if e.kind() != ErrorKind::NotFound => Err(e), + _ => Ok(()), + } + } + + /// Get current object's metadata **without cache** directly. + /// + /// # Notes + /// + /// Use `stat` if you: + /// + /// - Want detect the outside changes of object. + /// - Don't want to read from cached object metadata. + /// + /// You may want to use `metadata` if you are working with objects + /// returned by [`Lister`]. It's highly possible that metadata + /// you want has already been cached. + /// + /// # Examples + /// + /// ``` + /// # use anyhow::Result; + /// # use futures::io; + /// # use opendal::Operator; + /// use opendal::ErrorKind; + /// # + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// if let Err(e) = op.stat("test").await { + /// if e.kind() == ErrorKind::NotFound { + /// println!("file not exist") + /// } + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn stat(&self, path: &str) -> Result { + let path = normalize_path(path); + + let rp = self.inner().stat(&path, OpStat::new()).await?; + let meta = rp.into_metadata(); + + Ok(meta) + } + + /// Get current object's metadata with cache. + /// + /// `metadata` will check the given query with already cached metadata + /// first. And query from storage if not found. + /// + /// # Notes + /// + /// Use `metadata` if you are working with objects returned by + /// [`Lister`]. It's highly possible that metadata you want + /// has already been cached. + /// + /// You may want to use `stat`, if you: + /// + /// - Want detect the outside changes of object. + /// - Don't want to read from cached object metadata. + /// + /// # Behavior + /// + /// Visiting not fetched metadata will lead to panic in debug build. + /// It must be a bug, please fix it instead. + /// + /// # Examples + /// + /// ## Query already cached metadata + /// + /// By query metadata with `None`, we can only query in-memory metadata + /// cache. In this way, we can make sure that no API call will send. + /// + /// ``` + /// # use anyhow::Result; + /// # use opendal::Operator; + /// use opendal::Entry; + /// # #[tokio::main] + /// # async fn test(op: Operator, entry: Entry) -> Result<()> { + /// let meta = op.metadata(&entry, None).await?; + /// // content length COULD be correct. + /// let _ = meta.content_length(); + /// // etag COULD be correct. + /// let _ = meta.etag(); + /// # Ok(()) + /// # } + /// ``` + /// + /// ## Query content length and content type + /// + /// ``` + /// # use anyhow::Result; + /// # use opendal::Operator; + /// use opendal::Entry; + /// use opendal::Metakey; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator, entry: Entry) -> Result<()> { + /// let meta = op + /// .metadata(&entry, Metakey::ContentLength | Metakey::ContentType) + /// .await?; + /// // content length MUST be correct. + /// let _ = meta.content_length(); + /// // etag COULD be correct. + /// let _ = meta.etag(); + /// # Ok(()) + /// # } + /// ``` + /// + /// ## Query all metadata + /// + /// By query metadata with `Complete`, we can make sure that we have fetched all metadata of this object. + /// + /// ``` + /// # use anyhow::Result; + /// # use opendal::Operator; + /// use opendal::Entry; + /// use opendal::Metakey; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator, entry: Entry) -> Result<()> { + /// let meta = op.metadata(&entry, Metakey::Complete).await?; + /// // content length MUST be correct. + /// let _ = meta.content_length(); + /// // etag MUST be correct. + /// let _ = meta.etag(); + /// # Ok(()) + /// # } + /// ``` + pub async fn metadata( + &self, + entry: &Entry, + flags: impl Into>, + ) -> Result { + let meta = entry.metadata(); + if meta.bit().contains(flags) || meta.bit().contains(Metakey::Complete) { + return Ok(meta.clone()); + } + + let meta = self.stat(entry.path()).await?; + Ok(meta) + } + + /// Check if this path exists or not. + /// + /// # Example + /// + /// ``` + /// use anyhow::Result; + /// use futures::io; + /// use opendal::Operator; + /// + /// #[tokio::main] + /// async fn test(op: Operator) -> Result<()> { + /// let _ = op.is_exist("test").await?; + /// + /// Ok(()) + /// } + /// ``` + pub async fn is_exist(&self, path: &str) -> Result { + let r = self.stat(path).await; + match r { + Ok(_) => Ok(true), + Err(err) => match err.kind() { + ErrorKind::NotFound => Ok(false), + _ => Err(err), + }, + } + } + + /// Create an empty object, like using the following linux commands: + /// + /// - `touch path/to/file` + /// - `mkdir path/to/dir/` + /// + /// # Behavior + /// + /// - Create on existing dir will succeed. + /// - Create on existing file will overwrite and truncate it. + /// + /// # Examples + /// + /// ## Create an empty file + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::TryStreamExt; + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// op.create("path/to/file").await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// ## Create a dir + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::TryStreamExt; + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// op.create("path/to/dir/").await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn create(&self, path: &str) -> Result<()> { + let path = normalize_path(path); + + let _ = if path.ends_with('/') { + self.inner() + .create(&path, OpCreate::new(EntryMode::DIR)) + .await? + } else { + self.inner() + .create(&path, OpCreate::new(EntryMode::FILE)) + .await? + }; + + Ok(()) + } + + /// Read the whole path into a bytes. + /// + /// This function will allocate a new bytes internally. For more precise memory control or + /// reading data lazily, please use [`Operator::reader`] + /// + /// # Examples + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::TryStreamExt; + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let bs = op.read("path/to/file").await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn read(&self, path: &str) -> Result> { + self.range_read(path, ..).await + } + + /// Read the specified range of object into a bytes. + /// + /// This function will allocate a new bytes internally. For more precise memory control or + /// reading data lazily, please use [`Operator::range_reader`] + /// + /// # Notes + /// + /// - The returning content's length may be smaller than the range specified. + /// + /// # Examples + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::TryStreamExt; + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let bs = op.range_read("path/to/file", 1024..2048).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn range_read(&self, path: &str, range: impl RangeBounds) -> Result> { + let path = normalize_path(path); + + if !validate_path(&path, EntryMode::FILE) { + return Err( + Error::new(ErrorKind::IsADirectory, "read path is a directory") + .with_operation("range_read") + .with_context("service", self.inner().info().scheme()) + .with_context("path", &path), + ); + } + + let br = BytesRange::from(range); + + let op = OpRead::new().with_range(br); + + let (rp, mut s) = self.inner().read(&path, op).await?; + + let length = rp.into_metadata().content_length() as usize; + let mut buffer = Vec::with_capacity(length); + + let dst = buffer.spare_capacity_mut(); + let mut buf = ReadBuf::uninit(dst); + + // Safety: the input buffer is created with_capacity(length). + unsafe { buf.assume_init(length) }; + + // TODO: use native read api + s.read_exact(buf.initialized_mut()).await.map_err(|err| { + Error::new(ErrorKind::Unexpected, "read from storage") + .with_operation("range_read") + .with_context("service", self.inner().info().scheme().into_static()) + .with_context("path", &path) + .with_context("range", br.to_string()) + .set_source(err) + })?; + + // Safety: read_exact makes sure this buffer has been filled. + unsafe { buffer.set_len(length) } + + Ok(buffer) + } + + /// Create a new reader which can read the whole path. + /// + /// # Examples + /// + /// ```no_run + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::TryStreamExt; + /// # use opendal::Scheme; + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let r = op.reader("path/to/file").await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn reader(&self, path: &str) -> Result { + self.range_reader(path, ..).await + } + + /// Create a new reader which can read the specified range. + /// + /// # Notes + /// + /// - The returning content's length may be smaller than the range specified. + /// + /// # Examples + /// + /// ```no_run + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::TryStreamExt; + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let r = op.range_reader("path/to/file", 1024..2048).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn range_reader(&self, path: &str, range: impl RangeBounds) -> Result { + let path = normalize_path(path); + + if !validate_path(&path, EntryMode::FILE) { + return Err( + Error::new(ErrorKind::IsADirectory, "read path is a directory") + .with_operation("Object::range_reader") + .with_context("service", self.info().scheme()) + .with_context("path", path), + ); + } + + let op = OpRead::new().with_range(range.into()); + + Reader::create(self.inner().clone(), &path, op).await + } + + /// Write bytes into object. + /// + /// # Notes + /// + /// - Write will make sure all bytes has been written, or an error will be returned. + /// + /// # Examples + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::StreamExt; + /// # use futures::SinkExt; + /// use bytes::Bytes; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// op.write("path/to/file", vec![0; 4096]).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn write(&self, path: &str, bs: impl Into) -> Result<()> { + self.write_with(path, OpWrite::new(), bs).await + } + + /// Write multiple bytes into object. + /// + /// # Notes + /// + /// - Write will make sure all bytes has been written, or an error will be returned. + /// + /// # Examples + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::StreamExt; + /// # use futures::SinkExt; + /// use bytes::Bytes; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let mut w = op.writer("path/to/file").await?; + /// w.append(vec![0; 4096]).await?; + /// w.append(vec![1; 4096]).await?; + /// w.close().await?; /// # Ok(()) /// # } /// ``` - pub fn metadata(&self) -> OperatorMetadata { - OperatorMetadata::new(self.accessor.metadata()) + pub async fn writer(&self, path: &str) -> Result { + let path = normalize_path(path); + + if !validate_path(&path, EntryMode::FILE) { + return Err( + Error::new(ErrorKind::IsADirectory, "write path is a directory") + .with_operation("Object::write_with") + .with_context("service", self.inner().info().scheme().into_static()) + .with_context("path", &path), + ); + } + + let op = OpWrite::default().with_append(); + Writer::create(self.inner().clone(), &path, op).await } - /// Create a new batch operator handle to take batch operations - /// like `walk` and `remove`. - pub fn batch(&self) -> BatchOperator { - BatchOperator::new(self.clone()) + /// Write data with extra options. + /// + /// # Notes + /// + /// - Write will make sure all bytes has been written, or an error will be returned. + /// + /// # Examples + /// + /// ```no_run + /// # use std::io::Result; + /// # use opendal::Operator; + /// use bytes::Bytes; + /// use opendal::ops::OpWrite; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let bs = b"hello, world!".to_vec(); + /// let args = OpWrite::new().with_content_type("text/plain"); + /// let _ = op.write_with("path/to/file", args, bs).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn write_with(&self, path: &str, args: OpWrite, bs: impl Into) -> Result<()> { + let path = normalize_path(path); + + if !validate_path(&path, EntryMode::FILE) { + return Err( + Error::new(ErrorKind::IsADirectory, "write path is a directory") + .with_operation("Object::write_with") + .with_context("service", self.info().scheme().into_static()) + .with_context("path", &path), + ); + } + + let (_, mut w) = self.inner().write(&path, args).await?; + w.write(bs.into()).await?; + w.close().await?; + + Ok(()) } - /// Create a new [`Object`][crate::Object] handle to take operations. - pub fn object(&self, path: &str) -> Object { - Object::new(self.accessor.clone(), path) + /// Delete object. + /// + /// # Notes + /// + /// - Delete not existing error won't return errors. + /// + /// # Examples + /// + /// ``` + /// # use anyhow::Result; + /// # use futures::io; + /// # use opendal::Operator; + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// op.delete("test").await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn delete(&self, path: &str) -> Result<()> { + let path = normalize_path(path); + + let _ = self.inner().delete(&path, OpDelete::new()).await?; + + Ok(()) + } + + /// + /// # Notes + /// + /// If underlying services support delete in batch, we will use batch + /// delete instead. + /// + /// # Examples + /// + /// ``` + /// # use anyhow::Result; + /// # use futures::io; + /// # use opendal::Operator; + /// # + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// op.remove(vec!["abc".to_string(), "def".to_string()]) + /// .await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn remove(&self, paths: Vec) -> Result<()> { + self.remove_via(stream::iter(paths)).await + } + + /// remove will given paths. + + /// remove_via will remove objects via given stream. + /// + /// We will delete by chunks with given batch limit on the stream. + /// + /// # Notes + /// + /// If underlying services support delete in batch, we will use batch + /// delete instead. + /// + /// # Examples + /// + /// ``` + /// # use anyhow::Result; + /// # use futures::io; + /// # use opendal::Operator; + /// use futures::stream; + /// # + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let stream = stream::iter(vec!["abc".to_string(), "def".to_string()]); + /// op.remove_via(stream).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn remove_via(&self, mut input: impl Stream + Unpin) -> Result<()> { + if self.info().can_batch() { + let mut input = input.map(|v| (v, OpDelete::default())).chunks(self.limit()); + + while let Some(batches) = input.next().await { + let results = self + .inner() + .batch(OpBatch::new(BatchOperations::Delete(batches))) + .await?; + + let BatchedResults::Delete(results) = results.into_results(); + + // TODO: return error here directly seems not a good idea? + for (_, result) in results { + let _ = result?; + } + } + } else { + while let Some(path) = input.next().await { + self.inner().delete(&path, OpDelete::default()).await?; + } + } + + Ok(()) + } + + /// Remove the path and all nested dirs and files recursively. + /// + /// # Notes + /// + /// If underlying services support delete in batch, we will use batch + /// delete instead. + /// + /// # Examples + /// + /// ``` + /// # use anyhow::Result; + /// # use futures::io; + /// # use opendal::Operator; + /// # + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// op.remove_all("path/to/dir").await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn remove_all(&self, path: &str) -> Result<()> { + let meta = self.stat(path).await?; + + if meta.mode() != EntryMode::DIR { + return self.delete(path).await; + } + + let obs = self.scan(path).await?; + + if self.info().can_batch() { + let mut obs = obs.try_chunks(self.limit()); + + while let Some(batches) = obs.next().await { + let batches = batches + .map_err(|err| err.1)? + .into_iter() + .map(|v| (v.path().to_string(), OpDelete::default())) + .collect(); + + let results = self + .inner() + .batch(OpBatch::new(BatchOperations::Delete(batches))) + .await?; + + let BatchedResults::Delete(results) = results.into_results(); + + // TODO: return error here directly seems not a good idea? + for (_, result) in results { + let _ = result?; + } + } + } else { + obs.try_for_each(|v| async move { self.delete(v.path()).await }) + .await?; + } + + Ok(()) + } + + /// List given path. + /// + /// This function will create a new handle to list objects. + /// + /// An error will be returned if object path doesn't end with `/`. + /// + /// # Examples + /// + /// ```no_run + /// # use anyhow::Result; + /// # use futures::io; + /// # use opendal::Operator; + /// # use opendal::EntryMode; + /// # use futures::TryStreamExt; + /// use opendal::Metakey; + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let mut ds = op.list("path/to/dir/").await?; + /// while let Some(mut de) = ds.try_next().await? { + /// let meta = op.metadata(&de, Metakey::Mode).await?; + /// match meta.mode() { + /// EntryMode::FILE => { + /// println!("Handling file") + /// } + /// EntryMode::DIR => { + /// println!("Handling dir like start a new list via meta.path()") + /// } + /// EntryMode::Unknown => continue, + /// } + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn list(&self, path: &str) -> Result { + let path = normalize_path(path); + + if !validate_path(&path, EntryMode::DIR) { + return Err(Error::new( + ErrorKind::NotADirectory, + "the path trying to list is not a directory", + ) + .with_operation("Object::list") + .with_context("service", self.info().scheme().into_static()) + .with_context("path", &path)); + } + + let (_, pager) = self.inner().list(&path, OpList::new()).await?; + + Ok(Lister::new(pager)) + } + + /// List dir in flat way. + /// + /// This function will create a new handle to list objects. + /// + /// An error will be returned if object path doesn't end with `/`. + /// + /// # Examples + /// + /// ```no_run + /// # use anyhow::Result; + /// # use futures::io; + /// # use opendal::Operator; + /// # use opendal::EntryMode; + /// # use futures::TryStreamExt; + /// use opendal::Metakey; + /// # + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let mut ds = op.scan("/path/to/dir/").await?; + /// while let Some(mut de) = ds.try_next().await? { + /// let meta = op.metadata(&de, Metakey::Mode).await?; + /// match meta.mode() { + /// EntryMode::FILE => { + /// println!("Handling file") + /// } + /// EntryMode::DIR => { + /// println!("Handling dir like start a new list via meta.path()") + /// } + /// EntryMode::Unknown => continue, + /// } + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn scan(&self, path: &str) -> Result { + let path = normalize_path(path); + + if !validate_path(&path, EntryMode::DIR) { + return Err(Error::new( + ErrorKind::NotADirectory, + "the path trying to list is not a directory", + ) + .with_operation("scan") + .with_context("service", self.info().scheme().into_static()) + .with_context("path", &path)); + } + + let (_, pager) = self.inner().scan(&path, OpScan::new()).await?; + + Ok(Lister::new(pager)) + } +} + +/// Operato presign API. +impl Operator { + /// Presign an operation for stat(head). + /// + /// # Example + /// + /// ```no_run + /// use anyhow::Result; + /// use futures::io; + /// use opendal::Operator; + /// use time::Duration; + /// + /// #[tokio::main] + /// async fn test(op: Operator) -> Result<()> { + /// let signed_req = op.presign_stat("test",Duration::hours(1))?; + /// let req = http::Request::builder() + /// .method(signed_req.method()) + /// .uri(signed_req.uri()) + /// .body(())?; + /// + /// # Ok(()) + /// # } + /// ``` + pub fn presign_stat(&self, path: &str, expire: Duration) -> Result { + let path = normalize_path(path); + + let op = OpPresign::new(OpStat::new(), expire); + + let rp = self.inner().presign(&path, op)?; + Ok(rp.into_presigned_request()) + } + + /// Presign an operation for read. + /// + /// # Example + /// + /// ```no_run + /// use anyhow::Result; + /// use futures::io; + /// use opendal::Operator; + /// use time::Duration; + /// + /// #[tokio::main] + /// async fn test(op: Operator) -> Result<()> { + /// let signed_req = op.presign_read("test.txt", Duration::hours(1))?; + /// # Ok(()) + /// # } + /// ``` + /// + /// - `signed_req.method()`: `GET` + /// - `signed_req.uri()`: `https://s3.amazonaws.com/examplebucket/test.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=access_key_id/20130721/us-east-1/s3/aws4_request&X-Amz-Date=20130721T201207Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=` + /// - `signed_req.headers()`: `{ "host": "s3.amazonaws.com" }` + /// + /// We can download this object via `curl` or other tools without credentials: + /// + /// ```shell + /// curl "https://s3.amazonaws.com/examplebucket/test.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=access_key_id/20130721/us-east-1/s3/aws4_request&X-Amz-Date=20130721T201207Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=" -O /tmp/test.txt + /// ``` + pub fn presign_read(&self, path: &str, expire: Duration) -> Result { + let path = normalize_path(path); + + let op = OpPresign::new(OpRead::new(), expire); + + let rp = self.inner().presign(&path, op)?; + Ok(rp.into_presigned_request()) + } + + /// Presign an operation for write. + /// + /// # Example + /// + /// ```no_run + /// use anyhow::Result; + /// use futures::io; + /// use opendal::Operator; + /// use time::Duration; + /// + /// #[tokio::main] + /// async fn test(op: Operator) -> Result<()> { + /// let signed_req = op.presign_write("test.txt", Duration::hours(1))?; + /// # Ok(()) + /// # } + /// ``` + /// + /// - `signed_req.method()`: `PUT` + /// - `signed_req.uri()`: `https://s3.amazonaws.com/examplebucket/test.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=access_key_id/20130721/us-east-1/s3/aws4_request&X-Amz-Date=20130721T201207Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=` + /// - `signed_req.headers()`: `{ "host": "s3.amazonaws.com" }` + /// + /// We can upload file as this object via `curl` or other tools without credential: + /// + /// ```shell + /// curl -X PUT "https://s3.amazonaws.com/examplebucket/test.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=access_key_id/20130721/us-east-1/s3/aws4_request&X-Amz-Date=20130721T201207Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=" -d "Hello, World!" + /// ``` + pub fn presign_write(&self, path: &str, expire: Duration) -> Result { + self.presign_write_with(path, OpWrite::new(), expire) + } + + /// Presign an operation for write with option described in OpenDAL [rfc-0661](../../docs/rfcs/0661-path-in-accessor.md) + /// + /// You can pass `OpWrite` to this method to specify the content length and content type. + /// + /// # Example + /// + /// ```no_run + /// use anyhow::Result; + /// use futures::io; + /// use opendal::ops::OpWrite; + /// use opendal::Operator; + /// use time::Duration; + /// + /// #[tokio::main] + /// async fn test(op: Operator) -> Result<()> { + /// let args = OpWrite::new().with_content_type("text/csv"); + /// let signed_req = op.presign_write_with("test", args, Duration::hours(1))?; + /// let req = http::Request::builder() + /// .method(signed_req.method()) + /// .uri(signed_req.uri()) + /// .body(())?; + /// + /// # Ok(()) + /// # } + /// ``` + pub fn presign_write_with( + &self, + path: &str, + op: OpWrite, + expire: Duration, + ) -> Result { + let path = normalize_path(path); + + let op = OpPresign::new(op, expire); + + let rp = self.inner().presign(&path, op)?; + Ok(rp.into_presigned_request()) } } diff --git a/src/types/reader.rs b/src/types/reader.rs index 7efb1b1fb35..b00463ff47c 100644 --- a/src/types/reader.rs +++ b/src/types/reader.rs @@ -187,7 +187,7 @@ impl BlockingReader { /// We don't want to expose those details to users so keep this function /// in crate only. pub(crate) fn create(acc: FusedAccessor, path: &str, op: OpRead) -> Result { - let acc_meta = acc.metadata(); + let acc_meta = acc.info(); let r = if acc_meta.hints().contains(AccessorHint::ReadSeekable) { let (_, r) = acc.blocking_read(path, op)?; @@ -274,17 +274,15 @@ mod tests { #[tokio::test] async fn test_reader_async_read() { - let op = Operator::create(services::Memory::default()) - .unwrap() - .finish(); - let obj = op.object("test_file"); + let op = Operator::new(services::Memory::default()).unwrap().finish(); + let path = "test_file"; let content = gen_random_bytes(); - obj.write(content.clone()) + op.write(path, content.clone()) .await .expect("writ to object must succeed"); - let mut reader = obj.reader().await.unwrap(); + let mut reader = op.reader(path).await.unwrap(); let mut buf = Vec::new(); reader .read_to_end(&mut buf) @@ -296,17 +294,15 @@ mod tests { #[tokio::test] async fn test_reader_async_seek() { - let op = Operator::create(services::Memory::default()) - .unwrap() - .finish(); - let obj = op.object("test_file"); + let op = Operator::new(services::Memory::default()).unwrap().finish(); + let path = "test_file"; let content = gen_random_bytes(); - obj.write(content.clone()) + op.write(path, content.clone()) .await .expect("writ to object must succeed"); - let mut reader = obj.reader().await.unwrap(); + let mut reader = op.reader(path).await.unwrap(); let mut buf = Vec::new(); reader .read_to_end(&mut buf) diff --git a/src/types/writer.rs b/src/types/writer.rs index c2d2d683c6a..7ded78e6829 100644 --- a/src/types/writer.rs +++ b/src/types/writer.rs @@ -34,13 +34,13 @@ use crate::*; /// /// Writer is designed for appending multiple blocks which could /// lead to much requests. If only want to send all data in single chunk, -/// please use [`Object::write`] instead. +/// please use [`Operator::write`] instead. pub struct Writer { state: State, } impl Writer { - /// Create a new object writer. + /// Create a new writer. /// /// Create will use internal information to decide the most suitable /// implementation for users. diff --git a/tests/behavior/base.rs b/tests/behavior/base.rs deleted file mode 100644 index 918a365c385..00000000000 --- a/tests/behavior/base.rs +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2022 Datafuse Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use anyhow::Result; -use opendal::Operator; - -/// All services should pass this test. -macro_rules! behavior_base_test { - ($service:ident, $($(#[$meta:meta])* $test:ident),*,) => { - paste::item! { - mod [] { - $( - #[test] - $( - #[$meta] - )* - fn [< $test >]() -> anyhow::Result<()> { - let op = $crate::utils::init_service::(true); - match op { - Some(op) => $crate::base::$test(op), - None => { - log::warn!("service {} not initiated, ignored", opendal::Scheme::$service); - Ok(()) - } - } - } - )* - } - } - }; -} - -#[macro_export] -macro_rules! behavior_base_tests { - ($($service:ident),*) => { - $( - behavior_base_test!( - $service, - - test_metadata, - test_object_id, - test_object_path, - test_object_name, - - ); - )* - }; -} - -/// Create file with file path should succeed. -pub fn test_metadata(op: Operator) -> Result<()> { - let _ = op.metadata(); - - Ok(()) -} - -/// Test object id. -pub fn test_object_id(op: Operator) -> Result<()> { - let path = uuid::Uuid::new_v4().to_string(); - - let o = op.object(&path); - - assert_eq!(o.id(), format!("{}{}", op.metadata().root(), path)); - - Ok(()) -} - -/// Test object path. -pub fn test_object_path(op: Operator) -> Result<()> { - let path = uuid::Uuid::new_v4().to_string(); - - let o = op.object(&path); - - assert_eq!(o.path(), path); - - Ok(()) -} - -/// Test object name. -pub fn test_object_name(op: Operator) -> Result<()> { - // Normal - let path = uuid::Uuid::new_v4().to_string(); - - let o = op.object(&path); - assert_eq!(o.name(), path); - - // File in subdir - let name = uuid::Uuid::new_v4().to_string(); - let path = format!("{}/{}", uuid::Uuid::new_v4(), name); - - let o = op.object(&path); - assert_eq!(o.name(), name); - - // Dir in subdir - let name = uuid::Uuid::new_v4().to_string(); - let path = format!("{}/{}/", uuid::Uuid::new_v4(), name); - - let o = op.object(&path); - assert_eq!(o.name(), format!("{name}/")); - - Ok(()) -} diff --git a/tests/behavior/blocking_list.rs b/tests/behavior/blocking_list.rs index 9171cd51ca1..0fc086da269 100644 --- a/tests/behavior/blocking_list.rs +++ b/tests/behavior/blocking_list.rs @@ -17,8 +17,8 @@ use std::collections::HashSet; use anyhow::Result; use log::debug; +use opendal::BlockingOperator; use opendal::EntryMode; -use opendal::Operator; use super::utils::*; @@ -40,9 +40,9 @@ macro_rules! behavior_blocking_list_test { fn [< $test >]() -> anyhow::Result<()> { let op = $crate::utils::init_service::(true); match op { - Some(op) if op.metadata().can_read() - && op.metadata().can_write() - && op.metadata().can_blocking() && (op.metadata().can_list()||op.metadata().can_scan()) => $crate::blocking_list::$test(op), + Some(op) if op.info().can_read() + && op.info().can_write() + && op.info().can_blocking() && (op.info().can_list()||op.info().can_scan()) => $crate::blocking_list::$test(op.blocking()), Some(_) => { log::warn!("service {} doesn't support read, ignored", opendal::Scheme::$service); Ok(()) @@ -75,20 +75,18 @@ macro_rules! behavior_blocking_list_tests { } /// List dir should return newly created file. -pub fn test_list_dir(op: Operator) -> Result<()> { +pub fn test_list_dir(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); - op.object(&path) - .blocking_write(content) - .expect("write must succeed"); + op.write(&path, content).expect("write must succeed"); - let obs = op.object("/").blocking_list()?; + let obs = op.list("/")?; let mut found = false; for de in obs { let de = de?; - let meta = de.blocking_stat()?; + let meta = op.stat(de.path())?; if de.path() == path { assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), size as u64); @@ -98,17 +96,15 @@ pub fn test_list_dir(op: Operator) -> Result<()> { } assert!(found, "file should be found in list"); - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } /// List non exist dir should return nothing. -pub fn test_list_non_exist_dir(op: Operator) -> Result<()> { +pub fn test_list_non_exist_dir(op: BlockingOperator) -> Result<()> { let dir = format!("{}/", uuid::Uuid::new_v4()); - let obs = op.object(&dir).blocking_list()?; + let obs = op.list(&dir)?; let mut objects = HashMap::new(); for de in obs { let de = de?; @@ -121,15 +117,15 @@ pub fn test_list_non_exist_dir(op: Operator) -> Result<()> { } // Walk top down should output as expected -pub fn test_scan(op: Operator) -> Result<()> { +pub fn test_scan(op: BlockingOperator) -> Result<()> { let expected = vec![ "x/", "x/y", "x/x/", "x/x/y", "x/x/x/", "x/x/x/y", "x/x/x/x/", ]; for path in expected.iter() { - op.object(path).blocking_create()?; + op.create(path)?; } - let w = op.object("x/").blocking_scan()?; + let w = op.scan("x/")?; let actual = w .collect::>() .into_iter() diff --git a/tests/behavior/blocking_read.rs b/tests/behavior/blocking_read.rs index 450f6e166f5..50e21d2f7a0 100644 --- a/tests/behavior/blocking_read.rs +++ b/tests/behavior/blocking_read.rs @@ -13,9 +13,9 @@ // limitations under the License. use anyhow::Result; +use opendal::BlockingOperator; use opendal::EntryMode; use opendal::ErrorKind; -use opendal::Operator; use sha2::Digest; use sha2::Sha256; @@ -36,9 +36,9 @@ macro_rules! behavior_blocking_read_test { fn [< $test >]() -> anyhow::Result<()> { let op = $crate::utils::init_service::(true); match op { - Some(op) if op.metadata().can_read() - && !op.metadata().can_write() - && op.metadata().can_blocking() => $crate::blocking_read::$test(op), + Some(op) if op.info().can_read() + && !op.info().can_write() + && op.info().can_blocking() => $crate::blocking_read::$test(op.blocking()), Some(_) => { log::warn!("service {} doesn't support read, ignored", opendal::Scheme::$service); Ok(()) @@ -74,47 +74,43 @@ macro_rules! behavior_blocking_read_tests { } /// Stat normal file and dir should return metadata -pub fn test_stat(op: Operator) -> Result<()> { - let meta = op.object("normal_file").blocking_stat()?; +pub fn test_stat(op: BlockingOperator) -> Result<()> { + let meta = op.stat("normal_file")?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), 262144); - let meta = op.object("normal_dir/").blocking_stat()?; + let meta = op.stat("normal_dir/")?; assert_eq!(meta.mode(), EntryMode::DIR); Ok(()) } /// Stat special file and dir should return metadata -pub fn test_stat_special_chars(op: Operator) -> Result<()> { - let meta = op - .object("special_file !@#$%^&()_+-=;',") - .blocking_stat()?; +pub fn test_stat_special_chars(op: BlockingOperator) -> Result<()> { + let meta = op.stat("special_file !@#$%^&()_+-=;',")?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), 262144); - let meta = op - .object("special_dir !@#$%^&()_+-=;',/") - .blocking_stat()?; + let meta = op.stat("special_dir !@#$%^&()_+-=;',/")?; assert_eq!(meta.mode(), EntryMode::DIR); Ok(()) } /// Stat not exist file should return NotFound -pub fn test_stat_not_exist(op: Operator) -> Result<()> { +pub fn test_stat_not_exist(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); - let meta = op.object(&path).blocking_stat(); + let meta = op.stat(&path); assert!(meta.is_err()); - assert_eq!(meta.unwrap_err().kind(), ErrorKind::ObjectNotFound); + assert_eq!(meta.unwrap_err().kind(), ErrorKind::NotFound); Ok(()) } /// Read full content should match. -pub fn test_read_full(op: Operator) -> Result<()> { - let bs = op.object("normal_file").blocking_read()?; +pub fn test_read_full(op: BlockingOperator) -> Result<()> { + let bs = op.read("normal_file")?; assert_eq!(bs.len(), 262144, "read size"); assert_eq!( format!("{:x}", Sha256::digest(&bs)), @@ -126,8 +122,8 @@ pub fn test_read_full(op: Operator) -> Result<()> { } /// Read full content should match. -pub fn test_read_range(op: Operator) -> Result<()> { - let bs = op.object("normal_file").blocking_range_read(1024..2048)?; +pub fn test_read_range(op: BlockingOperator) -> Result<()> { + let bs = op.range_read("normal_file", 1024..2048)?; assert_eq!(bs.len(), 1024, "read size"); assert_eq!( format!("{:x}", Sha256::digest(&bs)), @@ -139,12 +135,12 @@ pub fn test_read_range(op: Operator) -> Result<()> { } /// Read not exist file should return NotFound -pub fn test_read_not_exist(op: Operator) -> Result<()> { +pub fn test_read_not_exist(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); - let bs = op.object(&path).blocking_read(); + let bs = op.read(&path); assert!(bs.is_err()); - assert_eq!(bs.unwrap_err().kind(), ErrorKind::ObjectNotFound); + assert_eq!(bs.unwrap_err().kind(), ErrorKind::NotFound); Ok(()) } diff --git a/tests/behavior/blocking_write.rs b/tests/behavior/blocking_write.rs index 8307417f7e9..33ea3894c3f 100644 --- a/tests/behavior/blocking_write.rs +++ b/tests/behavior/blocking_write.rs @@ -17,9 +17,9 @@ use std::io::Seek; use anyhow::Result; use log::debug; +use opendal::BlockingOperator; use opendal::EntryMode; use opendal::ErrorKind; -use opendal::Operator; use sha2::Digest; use sha2::Sha256; @@ -42,9 +42,9 @@ macro_rules! behavior_blocking_write_test { fn [< $test >]() -> anyhow::Result<()> { let op = $crate::utils::init_service::(true); match op { - Some(op) if op.metadata().can_read() - && op.metadata().can_write() - && op.metadata().can_blocking() => $crate::blocking_write::$test(op), + Some(op) if op.info().can_read() + && op.info().can_write() + && op.info().can_blocking() => $crate::blocking_write::$test(op.blocking()), Some(_) => { log::warn!("service {} doesn't support read, ignored", opendal::Scheme::$service); Ok(()) @@ -94,223 +94,185 @@ macro_rules! behavior_blocking_write_tests { } /// Create file with file path should succeed. -pub fn test_create_file(op: Operator) -> Result<()> { +pub fn test_create_file(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); - let o = op.object(&path); + op.create(&path)?; - o.blocking_create()?; - - let meta = o.blocking_stat()?; + let meta = op.stat(&path)?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), 0); - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } /// Create file on existing file path should succeed. -pub fn test_create_file_existing(op: Operator) -> Result<()> { +pub fn test_create_file_existing(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); - let o = op.object(&path); - - o.blocking_create()?; + op.create(&path)?; - o.blocking_create()?; + op.create(&path)?; - let meta = o.blocking_stat()?; + let meta = op.stat(&path)?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), 0); - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } /// Create file with special chars should succeed. -pub fn test_create_file_with_special_chars(op: Operator) -> Result<()> { +pub fn test_create_file_with_special_chars(op: BlockingOperator) -> Result<()> { let path = format!("{} !@#$%^&()_+-=;',.txt", uuid::Uuid::new_v4()); - let o = op.object(&path); - debug!("{o:?}"); - - o.blocking_create()?; + op.create(&path)?; - let meta = o.blocking_stat()?; + let meta = op.stat(&path)?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), 0); - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } /// Create dir with dir path should succeed. -pub fn test_create_dir(op: Operator) -> Result<()> { +pub fn test_create_dir(op: BlockingOperator) -> Result<()> { let path = format!("{}/", uuid::Uuid::new_v4()); - let o = op.object(&path); + op.create(&path)?; - o.blocking_create()?; - - let meta = o.blocking_stat()?; + let meta = op.stat(&path)?; assert_eq!(meta.mode(), EntryMode::DIR); - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } /// Create dir on existing dir should succeed. -pub fn test_create_dir_existing(op: Operator) -> Result<()> { +pub fn test_create_dir_existing(op: BlockingOperator) -> Result<()> { let path = format!("{}/", uuid::Uuid::new_v4()); - let o = op.object(&path); - - o.blocking_create()?; + op.create(&path)?; - o.blocking_create()?; + op.create(&path)?; - let meta = o.blocking_stat()?; + let meta = op.stat(&path)?; assert_eq!(meta.mode(), EntryMode::DIR); - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } /// Write a single file and test with stat. -pub fn test_write(op: Operator) -> Result<()> { +pub fn test_write(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); - op.object(&path).blocking_write(content)?; + op.write(&path, content)?; - let meta = op.object(&path).blocking_stat().expect("stat must succeed"); + let meta = op.stat(&path).expect("stat must succeed"); assert_eq!(meta.content_length(), size as u64); - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } /// Write file with dir path should return an error -pub fn test_write_with_dir_path(op: Operator) -> Result<()> { +pub fn test_write_with_dir_path(op: BlockingOperator) -> Result<()> { let path = format!("{}/", uuid::Uuid::new_v4()); let (content, _) = gen_bytes(); - let result = op.object(&path).blocking_write(content); + let result = op.write(&path, content); assert!(result.is_err()); - assert_eq!(result.unwrap_err().kind(), ErrorKind::ObjectIsADirectory); + assert_eq!(result.unwrap_err().kind(), ErrorKind::IsADirectory); Ok(()) } /// Write a single file with special chars should succeed. -pub fn test_write_with_special_chars(op: Operator) -> Result<()> { +pub fn test_write_with_special_chars(op: BlockingOperator) -> Result<()> { let path = format!("{} !@#$%^&()_+-=;',.txt", uuid::Uuid::new_v4()); debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); - op.object(&path).blocking_write(content)?; + op.write(&path, content)?; - let meta = op.object(&path).blocking_stat().expect("stat must succeed"); + let meta = op.stat(&path).expect("stat must succeed"); assert_eq!(meta.content_length(), size as u64); - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } /// Stat existing file should return metadata -pub fn test_stat(op: Operator) -> Result<()> { +pub fn test_stat(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); - op.object(&path) - .blocking_write(content) - .expect("write must succeed"); + op.write(&path, content).expect("write must succeed"); - let meta = op.object(&path).blocking_stat()?; + let meta = op.stat(&path)?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), size as u64); - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } /// Stat existing file should return metadata -pub fn test_stat_dir(op: Operator) -> Result<()> { +pub fn test_stat_dir(op: BlockingOperator) -> Result<()> { let path = format!("{}/", uuid::Uuid::new_v4()); - op.object(&path) - .blocking_create() - .expect("write must succeed"); + op.create(&path).expect("write must succeed"); - let meta = op.object(&path).blocking_stat()?; + let meta = op.stat(&path)?; assert_eq!(meta.mode(), EntryMode::DIR); - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } /// Stat existing file with special chars should return metadata -pub fn test_stat_with_special_chars(op: Operator) -> Result<()> { +pub fn test_stat_with_special_chars(op: BlockingOperator) -> Result<()> { let path = format!("{} !@#$%^&()_+-=;',.txt", uuid::Uuid::new_v4()); debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); - op.object(&path) - .blocking_write(content) - .expect("write must succeed"); + op.write(&path, content).expect("write must succeed"); - let meta = op.object(&path).blocking_stat()?; + let meta = op.stat(&path)?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), size as u64); - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } /// Stat not exist file should return NotFound -pub fn test_stat_not_exist(op: Operator) -> Result<()> { +pub fn test_stat_not_exist(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); - let meta = op.object(&path).blocking_stat(); + let meta = op.stat(&path); assert!(meta.is_err()); - assert_eq!(meta.unwrap_err().kind(), ErrorKind::ObjectNotFound); + assert_eq!(meta.unwrap_err().kind(), ErrorKind::NotFound); Ok(()) } /// Read full content should match. -pub fn test_read_full(op: Operator) -> Result<()> { +pub fn test_read_full(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); - op.object(&path) - .blocking_write(content.clone()) + op.write(&path, content.clone()) .expect("write must succeed"); - let bs = op.object(&path).blocking_read()?; + let bs = op.read(&path)?; assert_eq!(size, bs.len(), "read size"); assert_eq!( format!("{:x}", Sha256::digest(&bs)), @@ -318,26 +280,21 @@ pub fn test_read_full(op: Operator) -> Result<()> { "read content" ); - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } /// Read range content should match. -pub fn test_read_range(op: Operator) -> Result<()> { +pub fn test_read_range(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); let (offset, length) = gen_offset_length(size); - op.object(&path) - .blocking_write(content.clone()) + op.write(&path, content.clone()) .expect("write must succeed"); - let bs = op - .object(&path) - .blocking_range_read(offset..offset + length)?; + let bs = op.range_read(&path, offset..offset + length)?; assert_eq!(bs.len() as u64, length, "read size"); assert_eq!( format!("{:x}", Sha256::digest(&bs)), @@ -348,26 +305,21 @@ pub fn test_read_range(op: Operator) -> Result<()> { "read content" ); - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } /// Read large range content should match. -pub fn test_read_large_range(op: Operator) -> Result<()> { +pub fn test_read_large_range(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); let (offset, _) = gen_offset_length(size); - op.object(&path) - .blocking_write(content.clone()) + op.write(&path, content.clone()) .expect("write must succeed"); - let bs = op - .object(&path) - .blocking_range_read(offset..u32::MAX as u64)?; + let bs = op.range_read(&path, offset..u32::MAX as u64)?; assert_eq!( bs.len() as u64, size as u64 - offset, @@ -379,36 +331,31 @@ pub fn test_read_large_range(op: Operator) -> Result<()> { "read content with large range" ); - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } /// Read not exist file should return NotFound -pub fn test_read_not_exist(op: Operator) -> Result<()> { +pub fn test_read_not_exist(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); - let bs = op.object(&path).blocking_read(); + let bs = op.read(&path); assert!(bs.is_err()); - assert_eq!(bs.unwrap_err().kind(), ErrorKind::ObjectNotFound); + assert_eq!(bs.unwrap_err().kind(), ErrorKind::NotFound); Ok(()) } -pub fn test_fuzz_range_reader(op: Operator) -> Result<()> { +pub fn test_fuzz_range_reader(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); debug!("Generate a random file: {}", &path); let (content, _) = gen_bytes(); - op.object(&path) - .blocking_write(content.clone()) + op.write(&path, content.clone()) .expect("write must succeed"); let mut fuzzer = ObjectReaderFuzzer::new(&path, content.clone(), 0, content.len()); - let mut o = op - .object(&path) - .blocking_range_reader(0..content.len() as u64)?; + let mut o = op.range_reader(&path, 0..content.len() as u64)?; for _ in 0..100 { match fuzzer.fuzz() { @@ -428,23 +375,20 @@ pub fn test_fuzz_range_reader(op: Operator) -> Result<()> { } } - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } -pub fn test_fuzz_offset_reader(op: Operator) -> Result<()> { +pub fn test_fuzz_offset_reader(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); debug!("Generate a random file: {}", &path); let (content, _) = gen_bytes(); - op.object(&path) - .blocking_write(content.clone()) + op.write(&path, content.clone()) .expect("write must succeed"); let mut fuzzer = ObjectReaderFuzzer::new(&path, content.clone(), 0, content.len()); - let mut o = op.object(&path).blocking_range_reader(0..)?; + let mut o = op.range_reader(&path, 0..)?; for _ in 0..100 { match fuzzer.fuzz() { @@ -464,26 +408,21 @@ pub fn test_fuzz_offset_reader(op: Operator) -> Result<()> { } } - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } -pub fn test_fuzz_part_reader(op: Operator) -> Result<()> { +pub fn test_fuzz_part_reader(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); let (offset, length) = gen_offset_length(size); - op.object(&path) - .blocking_write(content.clone()) + op.write(&path, content.clone()) .expect("write must succeed"); let mut fuzzer = ObjectReaderFuzzer::new(&path, content, offset as usize, length as usize); - let mut o = op - .object(&path) - .blocking_range_reader(offset..offset + length)?; + let mut o = op.range_reader(&path, offset..offset + length)?; for _ in 0..100 { match fuzzer.fuzz() { @@ -503,26 +442,22 @@ pub fn test_fuzz_part_reader(op: Operator) -> Result<()> { } } - op.object(&path) - .blocking_delete() - .expect("delete must succeed"); + op.delete(&path).expect("delete must succeed"); Ok(()) } // Delete existing file should succeed. -pub fn test_delete(op: Operator) -> Result<()> { +pub fn test_delete(op: BlockingOperator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); debug!("Generate a random file: {}", &path); let (content, _) = gen_bytes(); - op.object(&path) - .blocking_write(content) - .expect("write must succeed"); + op.write(&path, content).expect("write must succeed"); - op.object(&path).blocking_delete()?; + op.delete(&path)?; // Stat it again to check. - assert!(!op.object(&path).blocking_is_exist()?); + assert!(!op.is_exist(&path)?); Ok(()) } diff --git a/tests/behavior/list.rs b/tests/behavior/list.rs index 8482802d705..1f92ef7a8cb 100644 --- a/tests/behavior/list.rs +++ b/tests/behavior/list.rs @@ -43,10 +43,10 @@ macro_rules! behavior_list_test { async fn [< $test >]() -> anyhow::Result<()> { let op = $crate::utils::init_service::(true); match op { - Some(op) if op.metadata().can_read() - && op.metadata().can_write() - && (op.metadata().can_list() - || op.metadata().can_scan()) => $crate::list::$test(op).await, + Some(op) if op.info().can_read() + && op.info().can_write() + && (op.info().can_list() + || op.info().can_scan()) => $crate::list::$test(op).await, Some(_) => { log::warn!("service {} doesn't support write, ignored", opendal::Scheme::$service); Ok(()) @@ -98,15 +98,12 @@ pub async fn test_list_dir(op: Operator) -> Result<()> { debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); - op.object(&path) - .write(content) - .await - .expect("write must succeed"); + op.write(&path, content).await.expect("write must succeed"); - let mut obs = op.object("/").list().await?; + let mut obs = op.list("/").await?; let mut found = false; while let Some(de) = obs.try_next().await? { - let meta = de.stat().await?; + let meta = op.stat(de.path()).await?; if de.path() == path { assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), size as u64); @@ -116,16 +113,13 @@ pub async fn test_list_dir(op: Operator) -> Result<()> { } assert!(found, "file should be found in list"); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } /// listing a directory, which contains more objects than a single page can take. pub async fn test_list_rich_dir(op: Operator) -> Result<()> { - op.object("test_list_rich_dir/").create().await?; + op.create("test_list_rich_dir/").await?; let mut expected: Vec = (0..=1000) .map(|num| format!("test_list_rich_dir/file-{num}")) @@ -134,8 +128,7 @@ pub async fn test_list_rich_dir(op: Operator) -> Result<()> { expected .iter() .map(|v| async { - let o = op.object(v); - o.create().await.expect("create must succeed"); + op.create(v).await.expect("create must succeed"); }) // Collect into a FuturesUnordered. .collect::>() @@ -143,7 +136,7 @@ pub async fn test_list_rich_dir(op: Operator) -> Result<()> { .collect::>() .await; - let mut objects = op.object("test_list_rich_dir/").list().await?; + let mut objects = op.list("test_list_rich_dir/").await?; let mut actual = vec![]; while let Some(o) = objects.try_next().await? { let path = o.path().to_string(); @@ -154,7 +147,7 @@ pub async fn test_list_rich_dir(op: Operator) -> Result<()> { assert_eq!(actual, expected); - op.batch().remove_all("test_list_rich_dir/").await?; + op.remove_all("test_list_rich_dir/").await?; Ok(()) } @@ -162,9 +155,9 @@ pub async fn test_list_rich_dir(op: Operator) -> Result<()> { pub async fn test_list_empty_dir(op: Operator) -> Result<()> { let dir = format!("{}/", uuid::Uuid::new_v4()); - op.object(&dir).create().await.expect("write must succeed"); + op.create(&dir).await.expect("write must succeed"); - let mut obs = op.object(&dir).list().await?; + let mut obs = op.list(&dir).await?; let mut objects = HashMap::new(); while let Some(de) = obs.try_next().await? { objects.insert(de.path().to_string(), de); @@ -173,7 +166,7 @@ pub async fn test_list_empty_dir(op: Operator) -> Result<()> { assert_eq!(objects.len(), 0, "dir should only return empty"); - op.object(&dir).delete().await.expect("delete must succeed"); + op.delete(&dir).await.expect("delete must succeed"); Ok(()) } @@ -181,7 +174,7 @@ pub async fn test_list_empty_dir(op: Operator) -> Result<()> { pub async fn test_list_non_exist_dir(op: Operator) -> Result<()> { let dir = format!("{}/", uuid::Uuid::new_v4()); - let mut obs = op.object(&dir).list().await?; + let mut obs = op.list(&dir).await?; let mut objects = HashMap::new(); while let Some(de) = obs.try_next().await? { objects.insert(de.path().to_string(), de); @@ -196,13 +189,13 @@ pub async fn test_list_non_exist_dir(op: Operator) -> Result<()> { pub async fn test_list_sub_dir(op: Operator) -> Result<()> { let path = format!("{}/", uuid::Uuid::new_v4()); - op.object(&path).create().await.expect("creat must succeed"); + op.create(&path).await.expect("creat must succeed"); - let mut obs = op.object("/").list().await?; + let mut obs = op.list("/").await?; let mut found = false; while let Some(de) = obs.try_next().await? { if de.path() == path { - assert_eq!(de.stat().await?.mode(), EntryMode::DIR); + assert_eq!(op.stat(&path).await?.mode(), EntryMode::DIR); assert_eq!(de.name(), path); found = true @@ -210,10 +203,7 @@ pub async fn test_list_sub_dir(op: Operator) -> Result<()> { } assert!(found, "dir should be found in list"); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -226,17 +216,11 @@ pub async fn test_list_nested_dir(op: Operator) -> Result<()> { let dir_name = format!("{}/", uuid::Uuid::new_v4()); let dir_path = format!("{dir}{dir_name}"); - op.object(&dir).create().await.expect("creat must succeed"); - op.object(&file_path) - .create() - .await - .expect("creat must succeed"); - op.object(&dir_path) - .create() - .await - .expect("creat must succeed"); - - let mut obs = op.object(&dir).list().await?; + op.create(&dir).await.expect("creat must succeed"); + op.create(&file_path).await.expect("creat must succeed"); + op.create(&dir_path).await.expect("creat must succeed"); + + let mut obs = op.list(&dir).await?; let mut objects = HashMap::new(); while let Some(de) = obs.try_next().await? { @@ -247,31 +231,31 @@ pub async fn test_list_nested_dir(op: Operator) -> Result<()> { assert_eq!(objects.len(), 2, "dir should only got 2 objects"); // Check file - let meta = objects - .get(&file_path) - .expect("file should be found in list") - .stat() + let meta = op + .stat( + objects + .get(&file_path) + .expect("file should be found in list") + .path(), + ) .await?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), 0); // Check dir - let meta = objects - .get(&dir_path) - .expect("file should be found in list") - .stat() + let meta = op + .stat( + objects + .get(&dir_path) + .expect("file should be found in list") + .path(), + ) .await?; assert_eq!(meta.mode(), EntryMode::DIR); - op.object(&file_path) - .delete() - .await - .expect("delete must succeed"); - op.object(&dir_path) - .delete() - .await - .expect("delete must succeed"); - op.object(&dir).delete().await.expect("delete must succeed"); + op.delete(&file_path).await.expect("delete must succeed"); + op.delete(&dir_path).await.expect("delete must succeed"); + op.delete(&dir).await.expect("delete must succeed"); Ok(()) } @@ -279,9 +263,9 @@ pub async fn test_list_nested_dir(op: Operator) -> Result<()> { pub async fn test_list_dir_with_file_path(op: Operator) -> Result<()> { let parent = uuid::Uuid::new_v4().to_string(); - let obs = op.object(&parent).list().await.map(|_| ()); + let obs = op.list(&parent).await.map(|_| ()); assert!(obs.is_err()); - assert_eq!(obs.unwrap_err().kind(), ErrorKind::ObjectNotADirectory); + assert_eq!(obs.unwrap_err().kind(), ErrorKind::NotADirectory); Ok(()) } @@ -292,10 +276,10 @@ pub async fn test_scan(op: Operator) -> Result<()> { "x/", "x/y", "x/x/", "x/x/y", "x/x/x/", "x/x/x/y", "x/x/x/x/", ]; for path in expected.iter() { - op.object(path).create().await?; + op.create(path).await?; } - let w = op.object("x/").scan().await?; + let w = op.scan("x/").await?; let actual = w .try_collect::>() .await? @@ -317,19 +301,16 @@ pub async fn test_remove_all(op: Operator) -> Result<()> { "x/", "x/y", "x/x/", "x/x/y", "x/x/x/", "x/x/x/y", "x/x/x/x/", ]; for path in expected.iter() { - op.object(path).create().await?; + op.create(path).await?; } - op.batch().remove_all("x/").await?; + op.remove_all("x/").await?; for path in expected.iter() { if path.ends_with('/') { continue; } - assert!( - !op.object(path).is_exist().await?, - "{path} should be removed" - ) + assert!(!op.is_exist(path).await?, "{path} should be removed") } Ok(()) } diff --git a/tests/behavior/list_only.rs b/tests/behavior/list_only.rs index 855055923be..56344a9930a 100644 --- a/tests/behavior/list_only.rs +++ b/tests/behavior/list_only.rs @@ -35,7 +35,7 @@ macro_rules! behavior_list_only_test { async fn [< $test >]() -> anyhow::Result<()> { let op = $crate::utils::init_service::(false); match op { - Some(op) if op.metadata().can_list() && !op.metadata().can_write() => $crate::list_only::$test(op).await, + Some(op) if op.info().can_list() && !op.info().can_write() => $crate::list_only::$test(op).await, Some(_) => { log::warn!("service {} doesn't support list, ignored", opendal::Scheme::$service); Ok(()) @@ -69,9 +69,9 @@ macro_rules! behavior_list_only_tests { pub async fn test_list(op: Operator) -> Result<()> { let mut entries = HashMap::new(); - let mut ds = op.object("/").list().await?; + let mut ds = op.list("/").await?; while let Some(de) = ds.try_next().await? { - entries.insert(de.path().to_string(), de.stat().await?.mode()); + entries.insert(de.path().to_string(), op.stat(de.path()).await?.mode()); } assert_eq!(entries["normal_file"], EntryMode::FILE); diff --git a/tests/behavior/main.rs b/tests/behavior/main.rs index 1207659e646..9d7dcb9cd38 100644 --- a/tests/behavior/main.rs +++ b/tests/behavior/main.rs @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[macro_use] -mod base; #[macro_use] mod blocking_list; #[macro_use] @@ -38,7 +36,6 @@ mod utils; macro_rules! behavior_tests { ($($service:ident),*) => { $( - behavior_base_tests!($service); // can_read && !can_write behavior_read_tests!($service); // can_read && !can_write && can_blocking diff --git a/tests/behavior/presign.rs b/tests/behavior/presign.rs index 6d231df97ab..5841612f0fc 100644 --- a/tests/behavior/presign.rs +++ b/tests/behavior/presign.rs @@ -43,7 +43,7 @@ macro_rules! behavior_presign_test { async fn [< $test >]() -> anyhow::Result<()> { let op = $crate::utils::init_service::(true); match op { - Some(op) if op.metadata().can_read() && op.metadata().can_write() && op.metadata().can_presign() => $crate::presign::$test(op).await, + Some(op) if op.info().can_read() && op.info().can_write() && op.info().can_presign() => $crate::presign::$test(op).await, Some(_) => { log::warn!("service {} doesn't support write, ignored", opendal::Scheme::$service); Ok(()) @@ -81,7 +81,7 @@ pub async fn test_presign_write(op: Operator) -> Result<()> { debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); - let signed_req = op.object(&path).presign_write(Duration::hours(1))?; + let signed_req = op.presign_write(&path, Duration::hours(1))?; debug!("Generated request: {signed_req:?}"); let client = reqwest::Client::new(); @@ -101,13 +101,10 @@ pub async fn test_presign_write(op: Operator) -> Result<()> { resp.text().await.expect("read response must succeed") ); - let meta = op.object(&path).stat().await.expect("stat must succeed"); + let meta = op.stat(&path).await.expect("stat must succeed"); assert_eq!(meta.content_length(), size as u64); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -115,11 +112,10 @@ pub async fn test_presign_stat(op: Operator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); - op.object(&path) - .write(content.clone()) + op.write(&path, content.clone()) .await .expect("write must succeed"); - let signed_req = op.object(&path).presign_stat(Duration::hours(1))?; + let signed_req = op.presign_stat(&path, Duration::hours(1))?; debug!("Generated request: {signed_req:?}"); let client = reqwest::Client::new(); let mut req = client.request( @@ -137,10 +133,7 @@ pub async fn test_presign_stat(op: Operator) -> Result<()> { .expect("content length must be present"); assert_eq!(content_length, size as u64); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -150,12 +143,11 @@ pub async fn test_presign_read(op: Operator) -> Result<()> { debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); - op.object(&path) - .write(content.clone()) + op.write(&path, content.clone()) .await .expect("write must succeed"); - let signed_req = op.object(&path).presign_read(Duration::hours(1))?; + let signed_req = op.presign_read(&path, Duration::hours(1))?; debug!("Generated request: {signed_req:?}"); let client = reqwest::Client::new(); @@ -177,9 +169,6 @@ pub async fn test_presign_read(op: Operator) -> Result<()> { "read content" ); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } diff --git a/tests/behavior/read_only.rs b/tests/behavior/read_only.rs index 2d769891086..9b244c07dce 100644 --- a/tests/behavior/read_only.rs +++ b/tests/behavior/read_only.rs @@ -36,7 +36,7 @@ macro_rules! behavior_read_test { async fn [< $test >]() -> anyhow::Result<()> { let op = $crate::utils::init_service::(false); match op { - Some(op) if op.metadata().can_read() && !op.metadata().can_write() => $crate::read_only::$test(op).await, + Some(op) if op.info().can_read() && !op.info().can_write() => $crate::read_only::$test(op).await, Some(_) => { log::warn!("service {} doesn't support read, ignored", opendal::Scheme::$service); Ok(()) @@ -80,11 +80,11 @@ macro_rules! behavior_read_tests { /// Stat normal file and dir should return metadata pub async fn test_stat(op: Operator) -> Result<()> { - let meta = op.object("normal_file").stat().await?; + let meta = op.stat("normal_file").await?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), 262144); - let meta = op.object("normal_dir/").stat().await?; + let meta = op.stat("normal_dir/").await?; assert_eq!(meta.mode(), EntryMode::DIR); Ok(()) @@ -92,11 +92,11 @@ pub async fn test_stat(op: Operator) -> Result<()> { /// Stat special file and dir should return metadata pub async fn test_stat_special_chars(op: Operator) -> Result<()> { - let meta = op.object("special_file !@#$%^&()_+-=;',").stat().await?; + let meta = op.stat("special_file !@#$%^&()_+-=;',").await?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), 262144); - let meta = op.object("special_dir !@#$%^&()_+-=;',/").stat().await?; + let meta = op.stat("special_dir !@#$%^&()_+-=;',/").await?; assert_eq!(meta.mode(), EntryMode::DIR); Ok(()) @@ -104,7 +104,7 @@ pub async fn test_stat_special_chars(op: Operator) -> Result<()> { /// Stat not cleaned path should also succeed. pub async fn test_stat_not_cleaned_path(op: Operator) -> Result<()> { - let meta = op.object("//normal_file").stat().await?; + let meta = op.stat("//normal_file").await?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), 262144); @@ -115,19 +115,19 @@ pub async fn test_stat_not_cleaned_path(op: Operator) -> Result<()> { pub async fn test_stat_not_exist(op: Operator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); - let meta = op.object(&path).stat().await; + let meta = op.stat(&path).await; assert!(meta.is_err()); - assert_eq!(meta.unwrap_err().kind(), ErrorKind::ObjectNotFound); + assert_eq!(meta.unwrap_err().kind(), ErrorKind::NotFound); Ok(()) } /// Root should be able to stat and returns DIR. pub async fn test_stat_root(op: Operator) -> Result<()> { - let meta = op.object("").stat().await?; + let meta = op.stat("").await?; assert_eq!(meta.mode(), EntryMode::DIR); - let meta = op.object("/").stat().await?; + let meta = op.stat("/").await?; assert_eq!(meta.mode(), EntryMode::DIR); Ok(()) @@ -135,7 +135,7 @@ pub async fn test_stat_root(op: Operator) -> Result<()> { /// Read full content should match. pub async fn test_read_full(op: Operator) -> Result<()> { - let bs = op.object("normal_file").read().await?; + let bs = op.read("normal_file").await?; assert_eq!(bs.len(), 262144, "read size"); assert_eq!( format!("{:x}", Sha256::digest(&bs)), @@ -148,7 +148,7 @@ pub async fn test_read_full(op: Operator) -> Result<()> { /// Read full content should match. pub async fn test_read_full_with_special_chars(op: Operator) -> Result<()> { - let bs = op.object("special_file !@#$%^&()_+-=;',").read().await?; + let bs = op.read("special_file !@#$%^&()_+-=;',").await?; assert_eq!(bs.len(), 262144, "read size"); assert_eq!( format!("{:x}", Sha256::digest(&bs)), @@ -161,7 +161,7 @@ pub async fn test_read_full_with_special_chars(op: Operator) -> Result<()> { /// Read full content should match. pub async fn test_read_range(op: Operator) -> Result<()> { - let bs = op.object("normal_file").range_read(1024..2048).await?; + let bs = op.range_read("normal_file", 1024..2048).await?; assert_eq!(bs.len(), 1024, "read size"); assert_eq!( format!("{:x}", Sha256::digest(&bs)), @@ -174,7 +174,7 @@ pub async fn test_read_range(op: Operator) -> Result<()> { /// Read range should match. pub async fn test_reader_range(op: Operator) -> Result<()> { - let mut r = op.object("normal_file").range_reader(1024..2048).await?; + let mut r = op.range_reader("normal_file", 1024..2048).await?; let mut bs = Vec::new(); r.read_to_end(&mut bs).await?; @@ -191,7 +191,7 @@ pub async fn test_reader_range(op: Operator) -> Result<()> { /// Read from should match. pub async fn test_reader_from(op: Operator) -> Result<()> { - let mut r = op.object("normal_file").range_reader(261120..).await?; + let mut r = op.range_reader("normal_file", 261120..).await?; let mut bs = Vec::new(); r.read_to_end(&mut bs).await?; @@ -208,7 +208,7 @@ pub async fn test_reader_from(op: Operator) -> Result<()> { /// Read tail should match. pub async fn test_reader_tail(op: Operator) -> Result<()> { - let mut r = op.object("normal_file").range_reader(..1024).await?; + let mut r = op.range_reader("normal_file", ..1024).await?; let mut bs = Vec::new(); r.read_to_end(&mut bs).await?; @@ -227,9 +227,9 @@ pub async fn test_reader_tail(op: Operator) -> Result<()> { pub async fn test_read_not_exist(op: Operator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); - let bs = op.object(&path).read().await; + let bs = op.read(&path).await; assert!(bs.is_err()); - assert_eq!(bs.unwrap_err().kind(), ErrorKind::ObjectNotFound); + assert_eq!(bs.unwrap_err().kind(), ErrorKind::NotFound); Ok(()) } @@ -238,9 +238,9 @@ pub async fn test_read_not_exist(op: Operator) -> Result<()> { pub async fn test_read_with_dir_path(op: Operator) -> Result<()> { let path = format!("{}/", uuid::Uuid::new_v4()); - let result = op.object(&path).read().await; + let result = op.read(&path).await; assert!(result.is_err()); - assert_eq!(result.unwrap_err().kind(), ErrorKind::ObjectIsADirectory); + assert_eq!(result.unwrap_err().kind(), ErrorKind::IsADirectory); Ok(()) } diff --git a/tests/behavior/write.rs b/tests/behavior/write.rs index 4086c51c382..df7790e34c3 100644 --- a/tests/behavior/write.rs +++ b/tests/behavior/write.rs @@ -42,7 +42,7 @@ macro_rules! behavior_write_test { async fn [< $test >]() -> anyhow::Result<()> { let op = $crate::utils::init_service::(true); match op { - Some(op) if op.metadata().can_read() && op.metadata().can_write() => $crate::write::$test(op).await, + Some(op) if op.info().can_read() && op.info().can_write() => $crate::write::$test(op).await, Some(_) => { log::warn!("service {} doesn't support write, ignored", opendal::Scheme::$service); Ok(()) @@ -107,18 +107,13 @@ macro_rules! behavior_write_tests { pub async fn test_create_file(op: Operator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); - let o = op.object(&path); + op.create(&path).await?; - o.create().await?; - - let meta = o.stat().await?; + let meta = op.stat(&path).await?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), 0); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -126,20 +121,15 @@ pub async fn test_create_file(op: Operator) -> Result<()> { pub async fn test_create_file_existing(op: Operator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); - let o = op.object(&path); - - o.create().await?; + op.create(&path).await?; - o.create().await?; + op.create(&path).await?; - let meta = o.stat().await?; + let meta = op.stat(&path).await?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), 0); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -147,18 +137,13 @@ pub async fn test_create_file_existing(op: Operator) -> Result<()> { pub async fn test_create_file_with_special_chars(op: Operator) -> Result<()> { let path = format!("{} !@#$%^&()_+-=;',.txt", uuid::Uuid::new_v4()); - let o = op.object(&path); - - o.create().await?; + op.create(&path).await?; - let meta = o.stat().await?; + let meta = op.stat(&path).await?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), 0); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -166,17 +151,12 @@ pub async fn test_create_file_with_special_chars(op: Operator) -> Result<()> { pub async fn test_create_dir(op: Operator) -> Result<()> { let path = format!("{}/", uuid::Uuid::new_v4()); - let o = op.object(&path); + op.create(&path).await?; - o.create().await?; - - let meta = o.stat().await?; + let meta = op.stat(&path).await?; assert_eq!(meta.mode(), EntryMode::DIR); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -184,19 +164,14 @@ pub async fn test_create_dir(op: Operator) -> Result<()> { pub async fn test_create_dir_existing(op: Operator) -> Result<()> { let path = format!("{}/", uuid::Uuid::new_v4()); - let o = op.object(&path); - - o.create().await?; + op.create(&path).await?; - o.create().await?; + op.create(&path).await?; - let meta = o.stat().await?; + let meta = op.stat(&path).await?; assert_eq!(meta.mode(), EntryMode::DIR); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -205,15 +180,12 @@ pub async fn test_write(op: Operator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); let (content, size) = gen_bytes(); - op.object(&path).write(content).await?; + op.write(&path, content).await?; - let meta = op.object(&path).stat().await.expect("stat must succeed"); + let meta = op.stat(&path).await.expect("stat must succeed"); assert_eq!(meta.content_length(), size as u64); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -222,9 +194,9 @@ pub async fn test_write_with_dir_path(op: Operator) -> Result<()> { let path = format!("{}/", uuid::Uuid::new_v4()); let (content, _) = gen_bytes(); - let result = op.object(&path).write(content).await; + let result = op.write(&path, content).await; assert!(result.is_err()); - assert_eq!(result.unwrap_err().kind(), ErrorKind::ObjectIsADirectory); + assert_eq!(result.unwrap_err().kind(), ErrorKind::IsADirectory); Ok(()) } @@ -234,15 +206,12 @@ pub async fn test_write_with_special_chars(op: Operator) -> Result<()> { let path = format!("{} !@#$%^&()_+-=;',.txt", uuid::Uuid::new_v4()); let (content, size) = gen_bytes(); - op.object(&path).write(content).await?; + op.write(&path, content).await?; - let meta = op.object(&path).stat().await.expect("stat must succeed"); + let meta = op.stat(&path).await.expect("stat must succeed"); assert_eq!(meta.content_length(), size as u64); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -251,19 +220,13 @@ pub async fn test_stat(op: Operator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); let (content, size) = gen_bytes(); - op.object(&path) - .write(content) - .await - .expect("write must succeed"); + op.write(&path, content).await.expect("write must succeed"); - let meta = op.object(&path).stat().await?; + let meta = op.stat(&path).await?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), size as u64); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -271,15 +234,12 @@ pub async fn test_stat(op: Operator) -> Result<()> { pub async fn test_stat_dir(op: Operator) -> Result<()> { let path = format!("{}/", uuid::Uuid::new_v4()); - op.object(&path).create().await.expect("write must succeed"); + op.create(&path).await.expect("write must succeed"); - let meta = op.object(&path).stat().await?; + let meta = op.stat(&path).await?; assert_eq!(meta.mode(), EntryMode::DIR); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -288,19 +248,13 @@ pub async fn test_stat_with_special_chars(op: Operator) -> Result<()> { let path = format!("{} !@#$%^&()_+-=;',.txt", uuid::Uuid::new_v4()); let (content, size) = gen_bytes(); - op.object(&path) - .write(content) - .await - .expect("write must succeed"); + op.write(&path, content).await.expect("write must succeed"); - let meta = op.object(&path).stat().await?; + let meta = op.stat(&path).await?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), size as u64); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -310,19 +264,13 @@ pub async fn test_stat_not_cleaned_path(op: Operator) -> Result<()> { debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); - op.object(&path) - .write(content) - .await - .expect("write must succeed"); + op.write(&path, content).await.expect("write must succeed"); - let meta = op.object(&format!("//{}", &path)).stat().await?; + let meta = op.stat(&format!("//{}", &path)).await?; assert_eq!(meta.mode(), EntryMode::FILE); assert_eq!(meta.content_length(), size as u64); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -330,19 +278,19 @@ pub async fn test_stat_not_cleaned_path(op: Operator) -> Result<()> { pub async fn test_stat_not_exist(op: Operator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); - let meta = op.object(&path).stat().await; + let meta = op.stat(&path).await; assert!(meta.is_err()); - assert_eq!(meta.unwrap_err().kind(), ErrorKind::ObjectNotFound); + assert_eq!(meta.unwrap_err().kind(), ErrorKind::NotFound); Ok(()) } /// Root should be able to stat and returns DIR. pub async fn test_stat_root(op: Operator) -> Result<()> { - let meta = op.object("").stat().await?; + let meta = op.stat("").await?; assert_eq!(meta.mode(), EntryMode::DIR); - let meta = op.object("/").stat().await?; + let meta = op.stat("/").await?; assert_eq!(meta.mode(), EntryMode::DIR); Ok(()) @@ -354,12 +302,11 @@ pub async fn test_read_full(op: Operator) -> Result<()> { debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); - op.object(&path) - .write(content.clone()) + op.write(&path, content.clone()) .await .expect("write must succeed"); - let bs = op.object(&path).read().await?; + let bs = op.read(&path).await?; assert_eq!(size, bs.len(), "read size"); assert_eq!( format!("{:x}", Sha256::digest(&bs)), @@ -367,10 +314,7 @@ pub async fn test_read_full(op: Operator) -> Result<()> { "read content" ); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -381,12 +325,11 @@ pub async fn test_read_range(op: Operator) -> Result<()> { let (content, size) = gen_bytes(); let (offset, length) = gen_offset_length(size); - op.object(&path) - .write(content.clone()) + op.write(&path, content.clone()) .await .expect("write must succeed"); - let bs = op.object(&path).range_read(offset..offset + length).await?; + let bs = op.range_read(&path, offset..offset + length).await?; assert_eq!(bs.len() as u64, length, "read size"); assert_eq!( format!("{:x}", Sha256::digest(&bs)), @@ -397,10 +340,7 @@ pub async fn test_read_range(op: Operator) -> Result<()> { "read content" ); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -411,12 +351,11 @@ pub async fn test_read_large_range(op: Operator) -> Result<()> { let (content, size) = gen_bytes(); let (offset, _) = gen_offset_length(size); - op.object(&path) - .write(content.clone()) + op.write(&path, content.clone()) .await .expect("write must succeed"); - let bs = op.object(&path).range_read(offset..u32::MAX as u64).await?; + let bs = op.range_read(&path, offset..u32::MAX as u64).await?; assert_eq!( bs.len() as u64, size as u64 - offset, @@ -428,10 +367,7 @@ pub async fn test_read_large_range(op: Operator) -> Result<()> { "read content with large range" ); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -442,15 +378,11 @@ pub async fn test_reader_range(op: Operator) -> Result<()> { let (content, size) = gen_bytes(); let (offset, length) = gen_offset_length(size); - op.object(&path) - .write(content.clone()) + op.write(&path, content.clone()) .await .expect("write must succeed"); - let mut r = op - .object(&path) - .range_reader(offset..offset + length) - .await?; + let mut r = op.range_reader(&path, offset..offset + length).await?; let mut bs = Vec::new(); r.read_to_end(&mut bs).await?; @@ -464,10 +396,7 @@ pub async fn test_reader_range(op: Operator) -> Result<()> { "read content" ); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -478,12 +407,11 @@ pub async fn test_reader_from(op: Operator) -> Result<()> { let (content, size) = gen_bytes(); let (offset, _) = gen_offset_length(size); - op.object(&path) - .write(content.clone()) + op.write(&path, content.clone()) .await .expect("write must succeed"); - let mut r = op.object(&path).range_reader(offset..).await?; + let mut r = op.range_reader(&path, offset..).await?; let mut bs = Vec::new(); r.read_to_end(&mut bs).await?; @@ -495,10 +423,7 @@ pub async fn test_reader_from(op: Operator) -> Result<()> { "read content" ); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -509,12 +434,11 @@ pub async fn test_reader_tail(op: Operator) -> Result<()> { let (content, size) = gen_bytes(); let (_, length) = gen_offset_length(size); - op.object(&path) - .write(content.clone()) + op.write(&path, content.clone()) .await .expect("write must succeed"); - let mut r = match op.object(&path).range_reader(..length).await { + let mut r = match op.range_reader(&path, ..length).await { Ok(r) => r, // Not all services support range with tail range, let's tolerate this. Err(err) if err.kind() == ErrorKind::Unsupported => { @@ -534,10 +458,7 @@ pub async fn test_reader_tail(op: Operator) -> Result<()> { "read content" ); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -545,9 +466,9 @@ pub async fn test_reader_tail(op: Operator) -> Result<()> { pub async fn test_read_not_exist(op: Operator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); - let bs = op.object(&path).read().await; + let bs = op.read(&path).await; assert!(bs.is_err()); - assert_eq!(bs.unwrap_err().kind(), ErrorKind::ObjectNotFound); + assert_eq!(bs.unwrap_err().kind(), ErrorKind::NotFound); Ok(()) } @@ -557,16 +478,12 @@ pub async fn test_fuzz_range_reader(op: Operator) -> Result<()> { debug!("Generate a random file: {}", &path); let (content, _) = gen_bytes(); - op.object(&path) - .write(content.clone()) + op.write(&path, content.clone()) .await .expect("write must succeed"); let mut fuzzer = ObjectReaderFuzzer::new(&path, content.clone(), 0, content.len()); - let mut o = op - .object(&path) - .range_reader(0..content.len() as u64) - .await?; + let mut o = op.range_reader(&path, 0..content.len() as u64).await?; for _ in 0..100 { match fuzzer.fuzz() { @@ -589,10 +506,7 @@ pub async fn test_fuzz_range_reader(op: Operator) -> Result<()> { } } - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -601,13 +515,12 @@ pub async fn test_fuzz_offset_reader(op: Operator) -> Result<()> { debug!("Generate a random file: {}", &path); let (content, _) = gen_bytes(); - op.object(&path) - .write(content.clone()) + op.write(&path, content.clone()) .await .expect("write must succeed"); let mut fuzzer = ObjectReaderFuzzer::new(&path, content.clone(), 0, content.len()); - let mut o = op.object(&path).range_reader(0..).await?; + let mut o = op.range_reader(&path, 0..).await?; for _ in 0..100 { match fuzzer.fuzz() { @@ -630,10 +543,7 @@ pub async fn test_fuzz_offset_reader(op: Operator) -> Result<()> { } } - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -643,17 +553,13 @@ pub async fn test_fuzz_part_reader(op: Operator) -> Result<()> { let (content, size) = gen_bytes(); let (offset, length) = gen_offset_length(size); - op.object(&path) - .write(content.clone()) + op.write(&path, content.clone()) .await .expect("write must succeed"); let mut fuzzer = ObjectReaderFuzzer::new(&path, content.clone(), offset as usize, length as usize); - let mut o = op - .object(&path) - .range_reader(offset..offset + length) - .await?; + let mut o = op.range_reader(&path, offset..offset + length).await?; for _ in 0..100 { match fuzzer.fuzz() { @@ -676,10 +582,7 @@ pub async fn test_fuzz_part_reader(op: Operator) -> Result<()> { } } - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -687,16 +590,13 @@ pub async fn test_fuzz_part_reader(op: Operator) -> Result<()> { pub async fn test_read_with_dir_path(op: Operator) -> Result<()> { let path = format!("{}/", uuid::Uuid::new_v4()); - op.object(&path).create().await.expect("write must succeed"); + op.create(&path).await.expect("write must succeed"); - let result = op.object(&path).read().await; + let result = op.read(&path).await; assert!(result.is_err()); - assert_eq!(result.unwrap_err().kind(), ErrorKind::ObjectIsADirectory); + assert_eq!(result.unwrap_err().kind(), ErrorKind::IsADirectory); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -706,12 +606,11 @@ pub async fn test_read_with_special_chars(op: Operator) -> Result<()> { debug!("Generate a random file: {}", &path); let (content, size) = gen_bytes(); - op.object(&path) - .write(content.clone()) + op.write(&path, content.clone()) .await .expect("write must succeed"); - let bs = op.object(&path).read().await?; + let bs = op.read(&path).await?; assert_eq!(size, bs.len(), "read size"); assert_eq!( format!("{:x}", Sha256::digest(&bs)), @@ -719,10 +618,7 @@ pub async fn test_read_with_special_chars(op: Operator) -> Result<()> { "read content" ); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) } @@ -731,15 +627,12 @@ pub async fn test_delete(op: Operator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); let (content, _) = gen_bytes(); - op.object(&path) - .write(content) - .await - .expect("write must succeed"); + op.write(&path, content).await.expect("write must succeed"); - op.object(&path).delete().await?; + op.delete(&path).await?; // Stat it again to check. - assert!(!op.object(&path).is_exist().await?); + assert!(!op.is_exist(&path).await?); Ok(()) } @@ -748,12 +641,9 @@ pub async fn test_delete(op: Operator) -> Result<()> { pub async fn test_delete_empty_dir(op: Operator) -> Result<()> { let path = format!("{}/", uuid::Uuid::new_v4()); - op.object(&path) - .create() - .await - .expect("create must succeed"); + op.create(&path).await.expect("create must succeed"); - op.object(&path).delete().await?; + op.delete(&path).await?; Ok(()) } @@ -764,15 +654,12 @@ pub async fn test_delete_with_special_chars(op: Operator) -> Result<()> { debug!("Generate a random file: {}", &path); let (content, _) = gen_bytes(); - op.object(&path) - .write(content) - .await - .expect("write must succeed"); + op.write(&path, content).await.expect("write must succeed"); - op.object(&path).delete().await?; + op.delete(&path).await?; // Stat it again to check. - assert!(!op.object(&path).is_exist().await?); + assert!(!op.is_exist(&path).await?); Ok(()) } @@ -781,7 +668,7 @@ pub async fn test_delete_with_special_chars(op: Operator) -> Result<()> { pub async fn test_delete_not_existing(op: Operator) -> Result<()> { let path = uuid::Uuid::new_v4().to_string(); - op.object(&path).delete().await?; + op.delete(&path).await?; Ok(()) } @@ -789,25 +676,23 @@ pub async fn test_delete_not_existing(op: Operator) -> Result<()> { // Delete via stream. pub async fn test_delete_stream(op: Operator) -> Result<()> { let dir = uuid::Uuid::new_v4().to_string(); - op.object(&format!("{dir}/")) - .create() + op.create(&format!("{dir}/")) .await .expect("creat must succeed"); let expected: Vec<_> = (0..100).collect(); for path in expected.iter() { - op.object(&format!("{dir}/{path}")).create().await?; + op.create(&format!("{dir}/{path}")).await?; } - op.batch() - .with_limit(30) + op.with_limit(30) .remove_via(futures::stream::iter(expected.clone()).map(|v| format!("{dir}/{v}"))) .await?; // Stat it again to check. for path in expected.iter() { assert!( - !op.object(&format!("{dir}/{path}")).is_exist().await?, + !op.is_exist(&format!("{dir}/{path}")).await?, "{path} should be removed" ) } @@ -822,7 +707,7 @@ pub async fn test_append(op: Operator) -> Result<()> { let content_a = gen_fixed_bytes(size); let content_b = gen_fixed_bytes(size); - let mut w = match op.object(&path).writer().await { + let mut w = match op.writer(&path).await { Ok(w) => w, Err(err) if err.kind() == ErrorKind::Unsupported => { warn!("service doesn't support write with append"); @@ -834,10 +719,10 @@ pub async fn test_append(op: Operator) -> Result<()> { w.append(content_b.clone()).await?; w.close().await?; - let meta = op.object(&path).stat().await.expect("stat must succeed"); + let meta = op.stat(&path).await.expect("stat must succeed"); assert_eq!(meta.content_length(), (size * 2) as u64); - let bs = op.object(&path).read().await?; + let bs = op.read(&path).await?; assert_eq!(bs.len(), size * 2, "read size"); assert_eq!( format!("{:x}", Sha256::digest(&bs[..size])), @@ -850,9 +735,6 @@ pub async fn test_append(op: Operator) -> Result<()> { "read content b" ); - op.object(&path) - .delete() - .await - .expect("delete must succeed"); + op.delete(&path).await.expect("delete must succeed"); Ok(()) }