From f4417cce8e4e458e07d5b713e970036446be945e Mon Sep 17 00:00:00 2001 From: Xuanwo Date: Thu, 4 Jan 2024 23:09:16 +0800 Subject: [PATCH 1/2] chore: Format code to make readers happy Signed-off-by: Xuanwo --- Cargo.toml | 10 +- bin/ofs/src/lib.rs | 10 +- bindings/java/Cargo.toml | 4 +- bindings/nodejs/Cargo.toml | 4 +- bindings/php/Cargo.toml | 4 +- bindings/python/Cargo.toml | 4 +- core/Cargo.toml | 18 +- .../Cargo.toml | 2 +- .../src/main.rs | 3 +- core/edge/s3_read_on_wasm/Cargo.toml | 4 +- core/edge/s3_read_on_wasm/src/lib.rs | 3 +- core/src/layers/complete.rs | 28 +- core/src/layers/retry.rs | 4 +- core/src/lib.rs | 4 +- core/src/raw/accessor.rs | 230 ++++++++-------- core/src/raw/enum_utils.rs | 6 +- core/src/raw/futures_util.rs | 17 +- core/src/raw/http_util/header.rs | 6 +- core/src/raw/oio/read/buffer_reader.rs | 18 +- core/src/raw/oio/read/range_read.rs | 4 +- core/src/raw/oio/stream/into_stream.rs | 14 +- core/src/services/alluxio/backend.rs | 28 +- core/src/services/atomicserver/backend.rs | 3 +- core/src/services/azblob/backend.rs | 54 ++-- core/src/services/azdls/backend.rs | 116 ++++---- core/src/services/azfile/backend.rs | 62 ++--- core/src/services/b2/backend.rs | 118 ++++----- core/src/services/chainsafe/backend.rs | 43 ++- core/src/services/chainsafe/core.rs | 1 - core/src/services/chainsafe/lister.rs | 4 +- core/src/services/chainsafe/writer.rs | 5 +- core/src/services/cos/backend.rs | 50 ++-- core/src/services/dbfs/backend.rs | 62 ++--- core/src/services/dropbox/backend.rs | 80 +++--- core/src/services/fs/backend.rs | 200 +++++++------- core/src/services/ftp/backend.rs | 42 ++- core/src/services/gcs/backend.rs | 112 ++++---- core/src/services/gdrive/backend.rs | 70 ++--- core/src/services/ghac/backend.rs | 60 ++--- core/src/services/hdfs/backend.rs | 247 +++++++++--------- core/src/services/hdfs/writer.rs | 9 +- core/src/services/http/backend.rs | 46 ++-- core/src/services/huggingface/backend.rs | 48 ++-- core/src/services/ipfs/backend.rs | 26 +- core/src/services/ipmfs/backend.rs | 40 +-- core/src/services/memcached/backend.rs | 9 +- core/src/services/obs/backend.rs | 88 +++---- core/src/services/onedrive/backend.rs | 94 +++---- core/src/services/oss/backend.rs | 58 ++-- core/src/services/pcloud/backend.rs | 154 ++++++----- core/src/services/pcloud/core.rs | 14 +- core/src/services/pcloud/writer.rs | 6 +- core/src/services/rocksdb/backend.rs | 7 +- core/src/services/s3/backend.rs | 60 ++--- core/src/services/seafile/backend.rs | 52 ++-- core/src/services/seafile/core.rs | 11 +- core/src/services/seafile/lister.rs | 5 +- core/src/services/seafile/writer.rs | 4 +- core/src/services/sftp/backend.rs | 92 +++---- core/src/services/supabase/backend.rs | 40 +-- core/src/services/swift/backend.rs | 64 ++--- core/src/services/upyun/backend.rs | 84 +++--- core/src/services/upyun/core.rs | 14 +- core/src/services/upyun/lister.rs | 3 +- core/src/services/upyun/writer.rs | 5 +- core/src/services/vercel_artifacts/backend.rs | 34 +-- core/src/services/webdav/backend.rs | 140 +++++----- core/src/services/webhdfs/backend.rs | 68 ++--- core/tests/behavior/blocking_write.rs | 4 +- core/tests/behavior/main.rs | 3 +- core/tests/behavior/utils.rs | 3 +- deny.toml | 12 +- 72 files changed, 1538 insertions(+), 1513 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7bc750cf5af..267a1858dc1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,12 @@ [workspace] default-members = ["core"] exclude = [ - "examples", + "examples", - "bindings/c", - "bindings/ocaml", - "bindings/php", - "bindings/ruby" + "bindings/c", + "bindings/ocaml", + "bindings/php", + "bindings/ruby", ] members = [ "core", diff --git a/bin/ofs/src/lib.rs b/bin/ofs/src/lib.rs index bff825fb5d3..3fc0cadae6b 100644 --- a/bin/ofs/src/lib.rs +++ b/bin/ofs/src/lib.rs @@ -15,14 +15,14 @@ // specific language governing permissions and limitations // under the License. -use fuse3::path::prelude::*; -use fuse3::Result; - -use async_trait::async_trait; -use futures_util::stream::{Empty, Iter}; use std::ffi::OsStr; use std::vec::IntoIter; +use async_trait::async_trait; +use fuse3::path::prelude::*; +use fuse3::Result; +use futures_util::stream::Empty; +use futures_util::stream::Iter; use opendal::Operator; pub struct Ofs { diff --git a/bindings/java/Cargo.toml b/bindings/java/Cargo.toml index f9c3dc5e045..9787a7325f9 100644 --- a/bindings/java/Cargo.toml +++ b/bindings/java/Cargo.toml @@ -114,7 +114,6 @@ services-webhdfs = ["opendal/services-webhdfs"] services-alluxio = ["opendal/services-alluxio"] services-azfile = ["opendal/services-azfile"] services-b2 = ["opendal/services-b2"] -services-upyun = ["opendal/services-upyun"] services-cacache = ["opendal/services-cacache"] services-dashmap = ["opendal/services-dashmap"] services-dropbox = ["opendal/services-dropbox"] @@ -138,13 +137,14 @@ services-postgresql = ["opendal/services-postgresql"] services-redb = ["opendal/services-redb"] services-redis = ["opendal/services-redis"] services-rocksdb = ["opendal/services-rocksdb"] +services-seafile = ["opendal/services-seafile"] services-sftp = ["opendal/services-sftp"] services-sled = ["opendal/services-sled"] -services-seafile = ["opendal/services-seafile"] services-sqlite = ["opendal/services-sqlite"] services-supabase = ["opendal/services-supabase"] services-swift = ["opendal/services-swift"] services-tikv = ["opendal/services-tikv"] +services-upyun = ["opendal/services-upyun"] services-vercel-artifacts = ["opendal/services-vercel-artifacts"] services-wasabi = ["opendal/services-wasabi"] diff --git a/bindings/nodejs/Cargo.toml b/bindings/nodejs/Cargo.toml index 90c04dd4fb0..51da25bd668 100644 --- a/bindings/nodejs/Cargo.toml +++ b/bindings/nodejs/Cargo.toml @@ -108,7 +108,6 @@ services-webhdfs = ["opendal/services-webhdfs"] # Optional services provided by opendal. services-alluxio = ["opendal/services-alluxio"] services-azfile = ["opendal/services-azfile"] -services-upyun = ["opendal/services-upyun"] services-b2 = ["opendal/services-b2"] services-cacache = ["opendal/services-cacache"] services-dashmap = ["opendal/services-dashmap"] @@ -133,13 +132,14 @@ services-postgresql = ["opendal/services-postgresql"] services-redb = ["opendal/services-redb"] services-redis = ["opendal/services-redis"] services-rocksdb = ["opendal/services-rocksdb"] +services-seafile = ["opendal/services-seafile"] services-sftp = ["opendal/services-sftp"] services-sled = ["opendal/services-sled"] services-sqlite = ["opendal/services-sqlite"] -services-seafile = ["opendal/services-seafile"] services-supabase = ["opendal/services-supabase"] services-swift = ["opendal/services-swift"] services-tikv = ["opendal/services-tikv"] +services-upyun = ["opendal/services-upyun"] services-vercel-artifacts = ["opendal/services-vercel-artifacts"] services-wasabi = ["opendal/services-wasabi"] diff --git a/bindings/php/Cargo.toml b/bindings/php/Cargo.toml index cddc68cb17b..2cd7ff8248a 100644 --- a/bindings/php/Cargo.toml +++ b/bindings/php/Cargo.toml @@ -16,13 +16,13 @@ # under the License. [package] -name = "opendal-php" -version = "0.1.0" edition = "2021" homepage = "https://opendal.apache.org/" license = "Apache-2.0" +name = "opendal-php" repository = "https://github.com/apache/incubator-opendal" rust-version = "1.67" +version = "0.1.0" [lib] crate-type = ["cdylib"] diff --git a/bindings/python/Cargo.toml b/bindings/python/Cargo.toml index 2dc3d394c32..711a11a98e9 100644 --- a/bindings/python/Cargo.toml +++ b/bindings/python/Cargo.toml @@ -111,7 +111,6 @@ services-webhdfs = ["opendal/services-webhdfs"] services-alluxio = ["opendal/services-alluxio"] services-azfile = ["opendal/services-azfile"] services-b2 = ["opendal/services-b2"] -services-upyun = ["opendal/services-upyun"] services-cacache = ["opendal/services-cacache"] services-dashmap = ["opendal/services-dashmap"] services-dropbox = ["opendal/services-dropbox"] @@ -135,13 +134,14 @@ services-postgresql = ["opendal/services-postgresql"] services-redb = ["opendal/services-redb"] services-redis = ["opendal/services-redis"] services-rocksdb = ["opendal/services-rocksdb"] +services-seafile = ["opendal/services-seafile"] services-sftp = ["opendal/services-sftp"] services-sled = ["opendal/services-sled"] services-sqlite = ["opendal/services-sqlite"] -services-seafile = ["opendal/services-seafile"] services-supabase = ["opendal/services-supabase"] services-swift = ["opendal/services-swift"] services-tikv = ["opendal/services-tikv"] +services-upyun = ["opendal/services-upyun"] services-vercel-artifacts = ["opendal/services-vercel-artifacts"] services-wasabi = ["opendal/services-wasabi"] diff --git a/core/Cargo.toml b/core/Cargo.toml index 0128db37244..11c92212a06 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -121,11 +121,8 @@ services-azdls = [ ] services-azfile = [] services-b2 = [] -services-seafile = [] -services-upyun = ["dep:hmac", "dep:sha1"] -services-chainsafe = [] -services-pcloud = [] services-cacache = ["dep:cacache"] +services-chainsafe = [] services-cloudflare-kv = [] services-cos = [ "dep:reqsign", @@ -139,7 +136,7 @@ services-dropbox = [] services-etcd = ["dep:etcd-client", "dep:bb8"] services-foundationdb = ["dep:foundationdb"] services-fs = ["tokio/fs"] -services-ftp = ["dep:suppaftp", "dep:bb8", "dep:async-tls" ] +services-ftp = ["dep:suppaftp", "dep:bb8", "dep:async-tls"] services-gcs = [ "dep:reqsign", "reqsign?/services-google", @@ -171,6 +168,7 @@ services-oss = [ "reqsign?/services-aliyun", "reqsign?/reqwest_request", ] +services-pcloud = [] services-persy = ["dep:persy"] services-postgresql = ["dep:tokio-postgres", "dep:bb8", "dep:bb8-postgres"] services-redb = ["dep:redb"] @@ -182,12 +180,14 @@ services-s3 = [ "reqsign?/services-aws", "reqsign?/reqwest_request", ] +services-seafile = [] services-sftp = ["dep:openssh", "dep:openssh-sftp-client", "dep:dirs"] services-sled = ["dep:sled"] services-sqlite = ["dep:rusqlite", "dep:r2d2"] services-supabase = [] services-swift = [] services-tikv = ["tikv-client"] +services-upyun = ["dep:hmac", "dep:sha1"] services-vercel-artifacts = [] # Deprecated # wasabi services support has been removed. @@ -219,6 +219,7 @@ required-features = ["tests"] anyhow = { version = "1.0.30", features = ["std"] } async-backtrace = { version = "0.2.6", optional = true } async-compat = "0.2" +async-tls = { version = "0.12.0", optional = true } async-trait = "0.1.68" atomic_lib = { version = "0.34.5", optional = true } await-tree = { version = "0.1.1", optional = true } @@ -246,6 +247,7 @@ foundationdb = { version = "0.8.0", features = [ futures = { version = "0.3", default-features = false, features = ["std"] } governor = { version = "0.6.0", optional = true, features = ["std"] } hdrs = { version = "0.3.0", optional = true, features = ["async_file"] } +hmac = { version = "0.12.1", optional = true } hrana-client-proto = { version = "0.2.1", optional = true } http = "0.2.9" log = "0.4" @@ -290,9 +292,8 @@ rocksdb = { version = "0.21.0", default-features = false, optional = true } rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] } serde = { version = "1", features = ["derive"] } serde_json = "1" -sha2 = { version = "0.10", optional = true } -hmac = { version = "0.12.1", optional = true } sha1 = { version = "0.10.6", optional = true } +sha2 = { version = "0.10", optional = true } sled = { version = "0.34.7", optional = true } suppaftp = { version = "5.2", default-features = false, features = [ "async-secure", @@ -300,11 +301,10 @@ suppaftp = { version = "5.2", default-features = false, features = [ "async-rustls", ], optional = true } tikv-client = { version = "0.3.0", optional = true, default-features = false } -tokio = { version= "1.27",features = ["sync"] } +tokio = { version = "1.27", features = ["sync"] } tokio-postgres = { version = "0.7.8", optional = true } tracing = { version = "0.1", optional = true } uuid = { version = "1", features = ["serde", "v4"] } -async-tls = { version = "0.12.0", optional = true } [target.'cfg(target_arch = "wasm32")'.dependencies] getrandom = { version = "0.2", features = ["js"] } diff --git a/core/edge/s3_aws_assume_role_with_web_identity/Cargo.toml b/core/edge/s3_aws_assume_role_with_web_identity/Cargo.toml index e7cf869a772..c4842534ead 100644 --- a/core/edge/s3_aws_assume_role_with_web_identity/Cargo.toml +++ b/core/edge/s3_aws_assume_role_with_web_identity/Cargo.toml @@ -25,5 +25,5 @@ license.workspace = true [dependencies] opendal = { workspace = true, features = ["tests"] } -uuid = { version = "1", features = ["serde", "v4"] } tokio = { version = "1", features = ["full"] } +uuid = { version = "1", features = ["serde", "v4"] } diff --git a/core/edge/s3_aws_assume_role_with_web_identity/src/main.rs b/core/edge/s3_aws_assume_role_with_web_identity/src/main.rs index 19446d2d741..b5a5bc64883 100644 --- a/core/edge/s3_aws_assume_role_with_web_identity/src/main.rs +++ b/core/edge/s3_aws_assume_role_with_web_identity/src/main.rs @@ -16,7 +16,8 @@ // under the License. use opendal::raw::tests::init_test_service; -use opendal::{Result, Scheme}; +use opendal::Result; +use opendal::Scheme; #[tokio::main] async fn main() -> Result<()> { diff --git a/core/edge/s3_read_on_wasm/Cargo.toml b/core/edge/s3_read_on_wasm/Cargo.toml index 5a37dc13caa..e479a0eee64 100644 --- a/core/edge/s3_read_on_wasm/Cargo.toml +++ b/core/edge/s3_read_on_wasm/Cargo.toml @@ -27,7 +27,9 @@ license.workspace = true crate-type = ["cdylib"] [dependencies] -opendal = { path = "../../", default-features = false, features = ["services-s3"] } +opendal = { path = "../../", default-features = false, features = [ + "services-s3", +] } wasm-bindgen = "0.2.89" wasm-bindgen-futures = "0.4.39" diff --git a/core/edge/s3_read_on_wasm/src/lib.rs b/core/edge/s3_read_on_wasm/src/lib.rs index 4e8ea7f36bc..d0fadff2a54 100644 --- a/core/edge/s3_read_on_wasm/src/lib.rs +++ b/core/edge/s3_read_on_wasm/src/lib.rs @@ -43,10 +43,11 @@ pub async fn hello_world() -> String { #[cfg(test)] mod tests { - use super::*; use wasm_bindgen_test::wasm_bindgen_test; use wasm_bindgen_test::wasm_bindgen_test_configure; + use super::*; + wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] diff --git a/core/src/layers/complete.rs b/core/src/layers/complete.rs index 69e1e7b83d9..60dd2d26738 100644 --- a/core/src/layers/complete.rs +++ b/core/src/layers/complete.rs @@ -786,10 +786,10 @@ mod tests { #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] impl Accessor for MockService { type Reader = oio::Reader; - type BlockingReader = oio::BlockingReader; type Writer = oio::Writer; - type BlockingWriter = oio::BlockingWriter; type Lister = oio::Lister; + type BlockingReader = oio::BlockingReader; + type BlockingWriter = oio::BlockingWriter; type BlockingLister = oio::BlockingLister; fn info(&self) -> AccessorInfo { @@ -803,6 +803,10 @@ mod tests { Ok(RpCreateDir {}) } + async fn stat(&self, _: &str, _: OpStat) -> Result { + Ok(RpStat::new(Metadata::new(EntryMode::Unknown))) + } + async fn read(&self, _: &str, _: OpRead) -> Result<(RpRead, Self::Reader)> { Ok((RpRead::new(), Box::new(oio::Cursor::new()))) } @@ -811,18 +815,6 @@ mod tests { Ok((RpWrite::new(), Box::new(()))) } - async fn copy(&self, _: &str, _: &str, _: OpCopy) -> Result { - Ok(RpCopy {}) - } - - async fn rename(&self, _: &str, _: &str, _: OpRename) -> Result { - Ok(RpRename {}) - } - - async fn stat(&self, _: &str, _: OpStat) -> Result { - Ok(RpStat::new(Metadata::new(EntryMode::Unknown))) - } - async fn delete(&self, _: &str, _: OpDelete) -> Result { Ok(RpDelete {}) } @@ -831,6 +823,14 @@ mod tests { Ok((RpList {}, Box::new(()))) } + async fn copy(&self, _: &str, _: &str, _: OpCopy) -> Result { + Ok(RpCopy {}) + } + + async fn rename(&self, _: &str, _: &str, _: OpRename) -> Result { + Ok(RpRename {}) + } + async fn presign(&self, _: &str, _: OpPresign) -> Result { Ok(RpPresign::new(PresignedRequest::new( HttpMethod::POST, diff --git a/core/src/layers/retry.rs b/core/src/layers/retry.rs index 4219f445a19..1093646bcd0 100644 --- a/core/src/layers/retry.rs +++ b/core/src/layers/retry.rs @@ -1164,10 +1164,10 @@ mod tests { #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] impl Accessor for MockService { type Reader = MockReader; - type BlockingReader = (); type Writer = (); - type BlockingWriter = (); type Lister = MockLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { diff --git a/core/src/lib.rs b/core/src/lib.rs index 26dfdcd0396..1e658c2ad75 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -62,10 +62,10 @@ //! every operation that OpenDAL performs. //! //! ```no_run +//! use opendal::layers::LoggingLayer; //! use opendal::services; //! use opendal::Operator; //! use opendal::Result; -//! use opendal::layers::LoggingLayer; //! //! #[tokio::main] //! async fn main() -> Result<()> { @@ -96,10 +96,10 @@ //! - `reader_with`: Create a reader with advanced options. //! //! ```no_run +//! use opendal::layers::LoggingLayer; //! use opendal::services; //! use opendal::Operator; //! use opendal::Result; -//! use opendal::layers::LoggingLayer; //! //! #[tokio::main] //! async fn main() -> Result<()> { diff --git a/core/src/raw/accessor.rs b/core/src/raw/accessor.rs index 4d78b153a56..f74f3ac2ae7 100644 --- a/core/src/raw/accessor.rs +++ b/core/src/raw/accessor.rs @@ -56,20 +56,18 @@ use crate::*; #[cfg_attr(not(target_arch = "wasm32"), async_trait)] #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] pub trait Accessor: Send + Sync + Debug + Unpin + 'static { - /// Reader is the associated reader the could return in `read` operation. + /// Reader is the associated reader returned in `read` operation. type Reader: oio::Read; - /// BlockingReader is the associated reader that could return in - /// `blocking_read` operation. - type BlockingReader: oio::BlockingRead; - /// Writer is the associated writer the could return in `write` operation. + /// Writer is the associated writer returned in `write` operation. type Writer: oio::Write; - /// BlockingWriter is the associated writer the could return in - /// `blocking_write` operation. - type BlockingWriter: oio::BlockingWrite; - /// Lister is the associated lister that return in `list` operation. + /// Lister is the associated lister returned in `list` operation. type Lister: oio::List; - /// BlockingLister is the associated lister that could return in - /// `blocking_list` operation. + + /// BlockingReader is the associated reader returned `blocking_read` operation. + type BlockingReader: oio::BlockingRead; + /// BlockingWriter is the associated writer returned `blocking_write` operation. + type BlockingWriter: oio::BlockingWrite; + /// BlockingLister is the associated lister returned `blocking_list` operation. type BlockingLister: oio::BlockingList; /// Invoke the `info` operation to get metadata of accessor. @@ -102,6 +100,24 @@ pub trait Accessor: Send + Sync + Debug + Unpin + 'static { )) } + /// Invoke the `stat` operation on the specified path. + /// + /// Require [`Capability::stat`] + /// + /// # Behavior + /// + /// - `stat` empty path means stat backend's root path. + /// - `stat` a path endswith "/" means stating a dir. + /// - `mode` and `content_length` must be set. + async fn stat(&self, path: &str, args: OpStat) -> Result { + let (_, _) = (path, args); + + Err(Error::new( + ErrorKind::Unsupported, + "operation is not supported", + )) + } + /// Invoke the `read` operation on the specified path, returns a /// [`Reader`][crate::Reader] if operate successful. /// @@ -137,29 +153,16 @@ pub trait Accessor: Send + Sync + Debug + Unpin + 'static { )) } - /// Invoke the `copy` operation on the specified `from` path and `to` path. - /// - /// Require [Capability::copy] + /// Invoke the `delete` operation on the specified path. /// - /// # Behaviour + /// Require [`Capability::delete`] /// - /// - `from` and `to` MUST be file path, DON'T NEED to check mode. - /// - Copy on existing file SHOULD succeed. - /// - Copy on existing file SHOULD overwrite and truncate. - async fn copy(&self, from: &str, to: &str, args: OpCopy) -> Result { - let (_, _, _) = (from, to, args); - - Err(Error::new( - ErrorKind::Unsupported, - "operation is not supported", - )) - } - - /// Invoke the `rename` operation on the specified `from` path and `to` path. + /// # Behavior /// - /// Require [Capability::rename] - async fn rename(&self, from: &str, to: &str, args: OpRename) -> Result { - let (_, _, _) = (from, to, args); + /// - `delete` is an idempotent operation, it's safe to call `Delete` on the same path multiple times. + /// - `delete` SHOULD return `Ok(())` if the path is deleted successfully or not exist. + async fn delete(&self, path: &str, args: OpDelete) -> Result { + let (_, _) = (path, args); Err(Error::new( ErrorKind::Unsupported, @@ -167,16 +170,15 @@ pub trait Accessor: Send + Sync + Debug + Unpin + 'static { )) } - /// Invoke the `stat` operation on the specified path. + /// Invoke the `list` operation on the specified path. /// - /// Require [`Capability::stat`] + /// Require [`Capability::list`] /// /// # Behavior /// - /// - `stat` empty path means stat backend's root path. - /// - `stat` a path endswith "/" means stating a dir. - /// - `mode` and `content_length` must be set. - async fn stat(&self, path: &str, args: OpStat) -> Result { + /// - Input path MUST be dir path, DON'T NEED to check mode. + /// - List non-exist dir should return Empty. + async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { let (_, _) = (path, args); Err(Error::new( @@ -185,16 +187,17 @@ pub trait Accessor: Send + Sync + Debug + Unpin + 'static { )) } - /// Invoke the `delete` operation on the specified path. + /// Invoke the `copy` operation on the specified `from` path and `to` path. /// - /// Require [`Capability::delete`] + /// Require [Capability::copy] /// - /// # Behavior + /// # Behaviour /// - /// - `delete` is an idempotent operation, it's safe to call `Delete` on the same path multiple times. - /// - `delete` SHOULD return `Ok(())` if the path is deleted successfully or not exist. - async fn delete(&self, path: &str, args: OpDelete) -> Result { - let (_, _) = (path, args); + /// - `from` and `to` MUST be file path, DON'T NEED to check mode. + /// - Copy on existing file SHOULD succeed. + /// - Copy on existing file SHOULD overwrite and truncate. + async fn copy(&self, from: &str, to: &str, args: OpCopy) -> Result { + let (_, _, _) = (from, to, args); Err(Error::new( ErrorKind::Unsupported, @@ -202,16 +205,11 @@ pub trait Accessor: Send + Sync + Debug + Unpin + 'static { )) } - /// Invoke the `list` operation on the specified path. - /// - /// Require [`Capability::list`] - /// - /// # Behavior + /// Invoke the `rename` operation on the specified `from` path and `to` path. /// - /// - Input path MUST be dir path, DON'T NEED to check mode. - /// - List non-exist dir should return Empty. - async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { - let (_, _) = (path, args); + /// Require [Capability::rename] + async fn rename(&self, from: &str, to: &str, args: OpRename) -> Result { + let (_, _, _) = (from, to, args); Err(Error::new( ErrorKind::Unsupported, @@ -261,6 +259,20 @@ pub trait Accessor: Send + Sync + Debug + Unpin + 'static { )) } + /// Invoke the `blocking_stat` operation on the specified path. + /// + /// This operation is the blocking version of [`Accessor::stat`] + /// + /// Require [`Capability::stat`] and [`Capability::blocking`] + fn blocking_stat(&self, path: &str, args: OpStat) -> Result { + let (_, _) = (path, args); + + Err(Error::new( + ErrorKind::Unsupported, + "operation is not supported", + )) + } + /// Invoke the `blocking_read` operation on the specified path. /// /// This operation is the blocking version of [`Accessor::read`] @@ -289,13 +301,13 @@ pub trait Accessor: Send + Sync + Debug + Unpin + 'static { )) } - /// Invoke the `blocking_copy` operation on the specified `from` path and `to` path. + /// Invoke the `blocking_delete` operation on the specified path. /// - /// This operation is the blocking version of [`Accessor::copy`] + /// This operation is the blocking version of [`Accessor::delete`] /// - /// Require [`Capability::copy`] and [`Capability::blocking`] - fn blocking_copy(&self, from: &str, to: &str, args: OpCopy) -> Result { - let (_, _, _) = (from, to, args); + /// Require [`Capability::write`] and [`Capability::blocking`] + fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { + let (_, _) = (path, args); Err(Error::new( ErrorKind::Unsupported, @@ -303,26 +315,16 @@ pub trait Accessor: Send + Sync + Debug + Unpin + 'static { )) } - /// Invoke the `blocking_rename` operation on the specified `from` path and `to` path. + /// Invoke the `blocking_list` operation on the specified path. /// - /// This operation is the blocking version of [`Accessor::rename`] + /// This operation is the blocking version of [`Accessor::list`] /// - /// Require [`Capability::rename`] and [`Capability::blocking`] - fn blocking_rename(&self, from: &str, to: &str, args: OpRename) -> Result { - let (_, _, _) = (from, to, args); - - Err(Error::new( - ErrorKind::Unsupported, - "operation is not supported", - )) - } - - /// Invoke the `blocking_stat` operation on the specified path. + /// Require [`Capability::list`] and [`Capability::blocking`] /// - /// This operation is the blocking version of [`Accessor::stat`] + /// # Behavior /// - /// Require [`Capability::stat`] and [`Capability::blocking`] - fn blocking_stat(&self, path: &str, args: OpStat) -> Result { + /// - List non-exist dir should return Empty. + fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { let (_, _) = (path, args); Err(Error::new( @@ -331,13 +333,13 @@ pub trait Accessor: Send + Sync + Debug + Unpin + 'static { )) } - /// Invoke the `blocking_delete` operation on the specified path. + /// Invoke the `blocking_copy` operation on the specified `from` path and `to` path. /// - /// This operation is the blocking version of [`Accessor::delete`] + /// This operation is the blocking version of [`Accessor::copy`] /// - /// Require [`Capability::write`] and [`Capability::blocking`] - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - let (_, _) = (path, args); + /// Require [`Capability::copy`] and [`Capability::blocking`] + fn blocking_copy(&self, from: &str, to: &str, args: OpCopy) -> Result { + let (_, _, _) = (from, to, args); Err(Error::new( ErrorKind::Unsupported, @@ -345,17 +347,13 @@ pub trait Accessor: Send + Sync + Debug + Unpin + 'static { )) } - /// Invoke the `blocking_list` operation on the specified path. - /// - /// This operation is the blocking version of [`Accessor::list`] - /// - /// Require [`Capability::list`] and [`Capability::blocking`] + /// Invoke the `blocking_rename` operation on the specified `from` path and `to` path. /// - /// # Behavior + /// This operation is the blocking version of [`Accessor::rename`] /// - /// - List non-exist dir should return Empty. - fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { - let (_, _) = (path, args); + /// Require [`Capability::rename`] and [`Capability::blocking`] + fn blocking_rename(&self, from: &str, to: &str, args: OpRename) -> Result { + let (_, _, _) = (from, to, args); Err(Error::new( ErrorKind::Unsupported, @@ -369,10 +367,10 @@ pub trait Accessor: Send + Sync + Debug + Unpin + 'static { #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] impl Accessor for () { type Reader = (); - type BlockingReader = (); type Writer = (); - type BlockingWriter = (); type Lister = (); + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -392,10 +390,10 @@ impl Accessor for () { #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] impl Accessor for Arc { type Reader = T::Reader; - type BlockingReader = T::BlockingReader; type Writer = T::Writer; - type BlockingWriter = T::BlockingWriter; type Lister = T::Lister; + type BlockingReader = T::BlockingReader; + type BlockingWriter = T::BlockingWriter; type BlockingLister = T::BlockingLister; fn info(&self) -> AccessorInfo { @@ -406,66 +404,66 @@ impl Accessor for Arc { self.as_ref().create_dir(path, args).await } + async fn stat(&self, path: &str, args: OpStat) -> Result { + self.as_ref().stat(path, args).await + } async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { self.as_ref().read(path, args).await } + async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { self.as_ref().write(path, args).await } + async fn delete(&self, path: &str, args: OpDelete) -> Result { + self.as_ref().delete(path, args).await + } + + async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { + self.as_ref().list(path, args).await + } async fn copy(&self, from: &str, to: &str, args: OpCopy) -> Result { self.as_ref().copy(from, to, args).await } - async fn rename(&self, from: &str, to: &str, args: OpRename) -> Result { self.as_ref().rename(from, to, args).await } - async fn stat(&self, path: &str, args: OpStat) -> Result { - self.as_ref().stat(path, args).await - } - async fn delete(&self, path: &str, args: OpDelete) -> Result { - self.as_ref().delete(path, args).await - } - async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { - self.as_ref().list(path, args).await + async fn presign(&self, path: &str, args: OpPresign) -> Result { + self.as_ref().presign(path, args).await } async fn batch(&self, args: OpBatch) -> Result { self.as_ref().batch(args).await } - async fn presign(&self, path: &str, args: OpPresign) -> Result { - self.as_ref().presign(path, args).await - } - fn blocking_create_dir(&self, path: &str, args: OpCreateDir) -> Result { self.as_ref().blocking_create_dir(path, args) } + fn blocking_stat(&self, path: &str, args: OpStat) -> Result { + self.as_ref().blocking_stat(path, args) + } fn blocking_read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::BlockingReader)> { self.as_ref().blocking_read(path, args) } + fn blocking_write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::BlockingWriter)> { self.as_ref().blocking_write(path, args) } - fn blocking_copy(&self, from: &str, to: &str, args: OpCopy) -> Result { - self.as_ref().blocking_copy(from, to, args) - } - - fn blocking_rename(&self, from: &str, to: &str, args: OpRename) -> Result { - self.as_ref().blocking_rename(from, to, args) - } - - fn blocking_stat(&self, path: &str, args: OpStat) -> Result { - self.as_ref().blocking_stat(path, args) - } fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { self.as_ref().blocking_delete(path, args) } + fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { self.as_ref().blocking_list(path, args) } + fn blocking_copy(&self, from: &str, to: &str, args: OpCopy) -> Result { + self.as_ref().blocking_copy(from, to, args) + } + fn blocking_rename(&self, from: &str, to: &str, args: OpRename) -> Result { + self.as_ref().blocking_rename(from, to, args) + } } /// FusedAccessor is the type erased accessor with `Arc`. diff --git a/core/src/raw/enum_utils.rs b/core/src/raw/enum_utils.rs index f2b2625b003..56b32a9de07 100644 --- a/core/src/raw/enum_utils.rs +++ b/core/src/raw/enum_utils.rs @@ -38,9 +38,11 @@ //! This module is used to provide some enums for the above code. We should remove this module once //! type_alias_impl_trait has been stabilized. -use bytes::Bytes; use std::io::SeekFrom; -use std::task::{Context, Poll}; +use std::task::Context; +use std::task::Poll; + +use bytes::Bytes; use crate::raw::*; use crate::*; diff --git a/core/src/raw/futures_util.rs b/core/src/raw/futures_util.rs index 318abe4ec7d..08583fe6d8e 100644 --- a/core/src/raw/futures_util.rs +++ b/core/src/raw/futures_util.rs @@ -15,12 +15,15 @@ // specific language governing permissions and limitations // under the License. -use futures::stream::FuturesOrdered; -use futures::{FutureExt, StreamExt}; use std::collections::VecDeque; use std::future::Future; use std::pin::Pin; -use std::task::{Context, Poll}; +use std::task::Context; +use std::task::Poll; + +use futures::stream::FuturesOrdered; +use futures::FutureExt; +use futures::StreamExt; /// BoxedFuture is the type alias of [`futures::future::BoxFuture`]. /// @@ -189,12 +192,14 @@ where #[cfg(test)] mod tests { - use super::*; + use std::task::ready; + use std::time::Duration; + use futures::future::BoxFuture; use futures::Stream; use rand::Rng; - use std::task::ready; - use std::time::Duration; + + use super::*; struct Lister { size: usize, diff --git a/core/src/raw/http_util/header.rs b/core/src/raw/http_util/header.rs index ec28113a70d..87748c96f4d 100644 --- a/core/src/raw/http_util/header.rs +++ b/core/src/raw/http_util/header.rs @@ -19,16 +19,18 @@ use base64::engine::general_purpose; use base64::Engine; use chrono::DateTime; use chrono::Utc; +use http::header::CACHE_CONTROL; use http::header::CONTENT_DISPOSITION; +use http::header::CONTENT_ENCODING; use http::header::CONTENT_LENGTH; use http::header::CONTENT_RANGE; use http::header::CONTENT_TYPE; use http::header::ETAG; use http::header::LAST_MODIFIED; use http::header::LOCATION; -use http::header::{CACHE_CONTROL, CONTENT_ENCODING}; +use http::HeaderMap; +use http::HeaderName; use http::HeaderValue; -use http::{HeaderMap, HeaderName}; use md5::Digest; use crate::raw::*; diff --git a/core/src/raw/oio/read/buffer_reader.rs b/core/src/raw/oio/read/buffer_reader.rs index 449f93e0102..3d96b93c127 100644 --- a/core/src/raw/oio/read/buffer_reader.rs +++ b/core/src/raw/oio/read/buffer_reader.rs @@ -15,21 +15,19 @@ // specific language governing permissions and limitations // under the License. -use bytes::BufMut; -use bytes::Bytes; -use tokio::io::ReadBuf; - use std::cmp::min; use std::io::SeekFrom; - use std::task::ready; use std::task::Context; use std::task::Poll; -use crate::raw::*; -use crate::*; +use bytes::BufMut; +use bytes::Bytes; +use tokio::io::ReadBuf; use super::BlockingRead; +use crate::raw::*; +use crate::*; /// [BufferReader] allows the underlying reader to fetch data at the buffer's size /// and is used to amortize the IO's overhead. @@ -316,7 +314,6 @@ mod tests { use std::io::SeekFrom; use std::sync::Arc; - use crate::raw::oio::RangeReader; use async_trait::async_trait; use bytes::Bytes; use futures::AsyncReadExt; @@ -326,6 +323,7 @@ mod tests { use sha2::Sha256; use super::*; + use crate::raw::oio::RangeReader; // Generate bytes between [4MiB, 16MiB) fn gen_bytes() -> (Bytes, usize) { @@ -352,10 +350,10 @@ mod tests { #[async_trait] impl Accessor for MockReadService { type Reader = MockReader; - type BlockingReader = MockReader; type Writer = (); - type BlockingWriter = (); type Lister = (); + type BlockingReader = MockReader; + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { diff --git a/core/src/raw/oio/read/range_read.rs b/core/src/raw/oio/read/range_read.rs index 4b46f47ac68..5ffacf2ec8a 100644 --- a/core/src/raw/oio/read/range_read.rs +++ b/core/src/raw/oio/read/range_read.rs @@ -672,10 +672,10 @@ mod tests { #[async_trait] impl Accessor for MockReadService { type Reader = MockReader; - type BlockingReader = (); type Writer = (); - type BlockingWriter = (); type Lister = (); + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { diff --git a/core/src/raw/oio/stream/into_stream.rs b/core/src/raw/oio/stream/into_stream.rs index d5a57970c72..3e34bbf4bab 100644 --- a/core/src/raw/oio/stream/into_stream.rs +++ b/core/src/raw/oio/stream/into_stream.rs @@ -20,10 +20,13 @@ pub use non_wasm32_impl::*; #[cfg(not(target_arch = "wasm32"))] mod non_wasm32_impl { - use crate::raw::oio; + use std::task::Context; + use std::task::Poll; + use bytes::Bytes; use futures::TryStreamExt; - use std::task::{Context, Poll}; + + use crate::raw::oio; /// Convert given futures stream into [`oio::Stream`]. pub fn into_stream(stream: S) -> IntoStream @@ -51,10 +54,13 @@ mod non_wasm32_impl { pub use wasm32_impl::*; #[cfg(target_arch = "wasm32")] mod wasm32_impl { - use crate::raw::oio; + use std::task::Context; + use std::task::Poll; + use bytes::Bytes; use futures::TryStreamExt; - use std::task::{Context, Poll}; + + use crate::raw::oio; /// Convert given futures stream into [`oio::Stream`]. pub fn into_stream(stream: S) -> IntoStream diff --git a/core/src/services/alluxio/backend.rs b/core/src/services/alluxio/backend.rs index 078a871da59..d14d82f0856 100644 --- a/core/src/services/alluxio/backend.rs +++ b/core/src/services/alluxio/backend.rs @@ -182,10 +182,10 @@ pub struct AlluxioBackend { #[async_trait] impl Accessor for AlluxioBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = AlluxioWriters; - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -216,6 +216,12 @@ impl Accessor for AlluxioBackend { Ok(RpCreateDir::default()) } + async fn stat(&self, path: &str, _: OpStat) -> Result { + let file_info = self.core.get_status(path).await?; + + Ok(RpStat::new(file_info.try_into()?)) + } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let stream_id = self.core.open_file(path).await?; @@ -231,18 +237,6 @@ impl Accessor for AlluxioBackend { Ok((RpWrite::default(), w)) } - async fn stat(&self, path: &str, _: OpStat) -> Result { - let file_info = self.core.get_status(path).await?; - - Ok(RpStat::new(file_info.try_into()?)) - } - - async fn rename(&self, from: &str, to: &str, _: OpRename) -> Result { - self.core.rename(from, to).await?; - - Ok(RpRename::default()) - } - async fn delete(&self, path: &str, _: OpDelete) -> Result { self.core.delete(path).await?; @@ -253,6 +247,12 @@ impl Accessor for AlluxioBackend { let l = AlluxioLister::new(self.core.clone(), path); Ok((RpList::default(), oio::PageLister::new(l))) } + + async fn rename(&self, from: &str, to: &str, _: OpRename) -> Result { + self.core.rename(from, to).await?; + + Ok(RpRename::default()) + } } #[cfg(test)] diff --git a/core/src/services/atomicserver/backend.rs b/core/src/services/atomicserver/backend.rs index 2ad7aa42a28..0742cd9301f 100644 --- a/core/src/services/atomicserver/backend.rs +++ b/core/src/services/atomicserver/backend.rs @@ -31,16 +31,17 @@ use serde::Deserialize; use serde::Serialize; use crate::raw::adapters::kv; +use crate::raw::new_json_deserialize_error; use crate::raw::new_json_serialize_error; use crate::raw::new_request_build_error; use crate::raw::normalize_path; use crate::raw::normalize_root; use crate::raw::percent_encode_path; use crate::raw::AsyncBody; +use crate::raw::ConfigDeserializer; use crate::raw::FormDataPart; use crate::raw::HttpClient; use crate::raw::Multipart; -use crate::raw::{new_json_deserialize_error, ConfigDeserializer}; use crate::Builder; use crate::Scheme; use crate::*; diff --git a/core/src/services/azblob/backend.rs b/core/src/services/azblob/backend.rs index d7ae49344c7..3a38c9977ca 100644 --- a/core/src/services/azblob/backend.rs +++ b/core/src/services/azblob/backend.rs @@ -543,10 +543,10 @@ pub struct AzblobBackend { #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] impl Accessor for AzblobBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = AzblobWriters; - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -593,6 +593,17 @@ impl Accessor for AzblobBackend { am } + async fn stat(&self, path: &str, args: OpStat) -> Result { + let resp = self.core.azblob_get_blob_properties(path, &args).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => parse_into_metadata(path, resp.headers()).map(RpStat::new), + _ => Err(parse_error(resp).await?), + } + } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let resp = self.core.azblob_get_blob(path, &args).await?; @@ -626,31 +637,6 @@ impl Accessor for AzblobBackend { Ok((RpWrite::default(), w)) } - async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { - let resp = self.core.azblob_copy_blob(from, to).await?; - - let status = resp.status(); - - match status { - StatusCode::ACCEPTED => { - resp.into_body().consume().await?; - Ok(RpCopy::default()) - } - _ => Err(parse_error(resp).await?), - } - } - - async fn stat(&self, path: &str, args: OpStat) -> Result { - let resp = self.core.azblob_get_blob_properties(path, &args).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => parse_into_metadata(path, resp.headers()).map(RpStat::new), - _ => Err(parse_error(resp).await?), - } - } - async fn delete(&self, path: &str, _: OpDelete) -> Result { let resp = self.core.azblob_delete_blob(path).await?; @@ -673,6 +659,20 @@ impl Accessor for AzblobBackend { Ok((RpList::default(), oio::PageLister::new(l))) } + async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { + let resp = self.core.azblob_copy_blob(from, to).await?; + + let status = resp.status(); + + match status { + StatusCode::ACCEPTED => { + resp.into_body().consume().await?; + Ok(RpCopy::default()) + } + _ => Err(parse_error(resp).await?), + } + } + async fn presign(&self, path: &str, args: OpPresign) -> Result { let mut req = match args.operation() { PresignOperation::Stat(v) => self.core.azblob_head_blob_request(path, v)?, diff --git a/core/src/services/azdls/backend.rs b/core/src/services/azdls/backend.rs index 77240e72e7a..53fa53d496e 100644 --- a/core/src/services/azdls/backend.rs +++ b/core/src/services/azdls/backend.rs @@ -230,10 +230,10 @@ pub struct AzdlsBackend { #[async_trait] impl Accessor for AzdlsBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = AzdlsWriters; - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -285,62 +285,6 @@ impl Accessor for AzdlsBackend { } } - async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { - let resp = self.core.azdls_read(path, args.range()).await?; - - let status = resp.status(); - - match status { - StatusCode::OK | StatusCode::PARTIAL_CONTENT => { - let size = parse_content_length(resp.headers())?; - let range = parse_content_range(resp.headers())?; - Ok(( - RpRead::new().with_size(size).with_range(range), - resp.into_body(), - )) - } - StatusCode::RANGE_NOT_SATISFIABLE => { - resp.into_body().consume().await?; - Ok((RpRead::new().with_size(Some(0)), IncomingAsyncBody::empty())) - } - _ => Err(parse_error(resp).await?), - } - } - - async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { - let w = AzdlsWriter::new(self.core.clone(), args.clone(), path.to_string()); - let w = if args.append() { - AzdlsWriters::Two(oio::AppendObjectWriter::new(w)) - } else { - AzdlsWriters::One(oio::OneShotWriter::new(w)) - }; - Ok((RpWrite::default(), w)) - } - - async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { - if let Some(resp) = self.core.azdls_ensure_parent_path(to).await? { - let status = resp.status(); - match status { - StatusCode::CREATED | StatusCode::CONFLICT => { - resp.into_body().consume().await?; - } - _ => return Err(parse_error(resp).await?), - } - } - - let resp = self.core.azdls_rename(from, to).await?; - - let status = resp.status(); - - match status { - StatusCode::CREATED => { - resp.into_body().consume().await?; - Ok(RpRename::default()) - } - _ => Err(parse_error(resp).await?), - } - } - async fn stat(&self, path: &str, _: OpStat) -> Result { // Stat root always returns a DIR. if path == "/" { @@ -387,6 +331,38 @@ impl Accessor for AzdlsBackend { Ok(RpStat::new(meta)) } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { + let resp = self.core.azdls_read(path, args.range()).await?; + + let status = resp.status(); + + match status { + StatusCode::OK | StatusCode::PARTIAL_CONTENT => { + let size = parse_content_length(resp.headers())?; + let range = parse_content_range(resp.headers())?; + Ok(( + RpRead::new().with_size(size).with_range(range), + resp.into_body(), + )) + } + StatusCode::RANGE_NOT_SATISFIABLE => { + resp.into_body().consume().await?; + Ok((RpRead::new().with_size(Some(0)), IncomingAsyncBody::empty())) + } + _ => Err(parse_error(resp).await?), + } + } + + async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { + let w = AzdlsWriter::new(self.core.clone(), args.clone(), path.to_string()); + let w = if args.append() { + AzdlsWriters::Two(oio::AppendObjectWriter::new(w)) + } else { + AzdlsWriters::One(oio::OneShotWriter::new(w)) + }; + Ok((RpWrite::default(), w)) + } + async fn delete(&self, path: &str, _: OpDelete) -> Result { let resp = self.core.azdls_delete(path).await?; @@ -403,6 +379,30 @@ impl Accessor for AzdlsBackend { Ok((RpList::default(), oio::PageLister::new(l))) } + + async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { + if let Some(resp) = self.core.azdls_ensure_parent_path(to).await? { + let status = resp.status(); + match status { + StatusCode::CREATED | StatusCode::CONFLICT => { + resp.into_body().consume().await?; + } + _ => return Err(parse_error(resp).await?), + } + } + + let resp = self.core.azdls_rename(from, to).await?; + + let status = resp.status(); + + match status { + StatusCode::CREATED => { + resp.into_body().consume().await?; + Ok(RpRename::default()) + } + _ => Err(parse_error(resp).await?), + } + } } fn infer_storage_name_from_endpoint(endpoint: &str) -> Option { diff --git a/core/src/services/azfile/backend.rs b/core/src/services/azfile/backend.rs index 375b52af6c3..dea84c73aaa 100644 --- a/core/src/services/azfile/backend.rs +++ b/core/src/services/azfile/backend.rs @@ -247,10 +247,10 @@ pub struct AzfileBackend { #[async_trait] impl Accessor for AzfileBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = AzfileWriters; - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -308,6 +308,23 @@ impl Accessor for AzfileBackend { } } + async fn stat(&self, path: &str, _: OpStat) -> Result { + let resp = if path.ends_with('/') { + self.core.azfile_get_directory_properties(path).await? + } else { + self.core.azfile_get_file_properties(path).await? + }; + + let status = resp.status(); + match status { + StatusCode::OK => { + let meta = parse_into_metadata(path, resp.headers())?; + Ok(RpStat::new(meta)) + } + _ => Err(parse_error(resp).await?), + } + } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let resp = self.core.azfile_read(path, args.range()).await?; @@ -341,23 +358,29 @@ impl Accessor for AzfileBackend { return Ok((RpWrite::default(), w)); } - async fn stat(&self, path: &str, _: OpStat) -> Result { + async fn delete(&self, path: &str, _: OpDelete) -> Result { let resp = if path.ends_with('/') { - self.core.azfile_get_directory_properties(path).await? + self.core.azfile_delete_dir(path).await? } else { - self.core.azfile_get_file_properties(path).await? + self.core.azfile_delete_file(path).await? }; let status = resp.status(); match status { - StatusCode::OK => { - let meta = parse_into_metadata(path, resp.headers())?; - Ok(RpStat::new(meta)) + StatusCode::ACCEPTED | StatusCode::NOT_FOUND => { + resp.into_body().consume().await?; + Ok(RpDelete::default()) } _ => Err(parse_error(resp).await?), } } + async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { + let l = AzfileLister::new(self.core.clone(), path.to_string(), args.limit()); + + Ok((RpList::default(), oio::PageLister::new(l))) + } + async fn rename(&self, from: &str, to: &str, _: OpRename) -> Result { self.core.ensure_parent_dir_exists(to).await?; let resp = self.core.azfile_rename(from, to).await?; @@ -370,29 +393,6 @@ impl Accessor for AzfileBackend { _ => Err(parse_error(resp).await?), } } - - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = if path.ends_with('/') { - self.core.azfile_delete_dir(path).await? - } else { - self.core.azfile_delete_file(path).await? - }; - - let status = resp.status(); - match status { - StatusCode::ACCEPTED | StatusCode::NOT_FOUND => { - resp.into_body().consume().await?; - Ok(RpDelete::default()) - } - _ => Err(parse_error(resp).await?), - } - } - - async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { - let l = AzfileLister::new(self.core.clone(), path.to_string(), args.limit()); - - Ok((RpList::default(), oio::PageLister::new(l))) - } } #[cfg(test)] diff --git a/core/src/services/b2/backend.rs b/core/src/services/b2/backend.rs index 1502afe0a6b..a488d60bebd 100644 --- a/core/src/services/b2/backend.rs +++ b/core/src/services/b2/backend.rs @@ -269,14 +269,14 @@ pub struct B2Backend { impl Accessor for B2Backend { type Reader = IncomingAsyncBody; - type BlockingReader = (); - type Writer = B2Writers; - type BlockingWriter = (); - type Lister = oio::PageLister; + type BlockingReader = (); + + type BlockingWriter = (); + type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -326,6 +326,38 @@ impl Accessor for B2Backend { am } + /// B2 have a get_file_info api required a file_id field, but field_id need call list api, list api also return file info + /// So we call list api to get file info + async fn stat(&self, path: &str, _args: OpStat) -> Result { + // Stat root always returns a DIR. + if path == "/" { + return Ok(RpStat::new(Metadata::new(EntryMode::DIR))); + } + + let delimiter = if path.ends_with('/') { Some("/") } else { None }; + let resp = self + .core + .list_file_names(Some(path), delimiter, None, None) + .await?; + + let status = resp.status(); + + match status { + StatusCode::OK => { + let bs = resp.into_body().bytes().await?; + + let resp: ListFileNamesResponse = + serde_json::from_slice(&bs).map_err(new_json_deserialize_error)?; + if resp.files.is_empty() { + return Err(Error::new(ErrorKind::NotFound, "no such file or directory")); + } + let meta = parse_file_info(&resp.files[0]); + Ok(RpStat::new(meta)) + } + _ => Err(parse_error(resp).await?), + } + } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let resp = self.core.download_file_by_name(path, &args).await?; @@ -356,38 +388,38 @@ impl Accessor for B2Backend { Ok((RpWrite::default(), w)) } - /// B2 have a get_file_info api required a file_id field, but field_id need call list api, list api also return file info - /// So we call list api to get file info - async fn stat(&self, path: &str, _args: OpStat) -> Result { - // Stat root always returns a DIR. - if path == "/" { - return Ok(RpStat::new(Metadata::new(EntryMode::DIR))); - } - - let delimiter = if path.ends_with('/') { Some("/") } else { None }; - let resp = self - .core - .list_file_names(Some(path), delimiter, None, None) - .await?; + async fn delete(&self, path: &str, _: OpDelete) -> Result { + let resp = self.core.hide_file(path).await?; let status = resp.status(); match status { - StatusCode::OK => { - let bs = resp.into_body().bytes().await?; - - let resp: ListFileNamesResponse = - serde_json::from_slice(&bs).map_err(new_json_deserialize_error)?; - if resp.files.is_empty() { - return Err(Error::new(ErrorKind::NotFound, "no such file or directory")); + StatusCode::OK => Ok(RpDelete::default()), + _ => { + let err = parse_error(resp).await?; + match err.kind() { + ErrorKind::NotFound => Ok(RpDelete::default()), + // Representative deleted + ErrorKind::AlreadyExists => Ok(RpDelete::default()), + _ => Err(err), } - let meta = parse_file_info(&resp.files[0]); - Ok(RpStat::new(meta)) } - _ => Err(parse_error(resp).await?), } } + async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { + Ok(( + RpList::default(), + oio::PageLister::new(B2Lister::new( + self.core.clone(), + path, + args.recursive(), + args.limit(), + args.start_after(), + )), + )) + } + async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { let resp = self .core @@ -426,38 +458,6 @@ impl Accessor for B2Backend { } } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.hide_file(path).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => Ok(RpDelete::default()), - _ => { - let err = parse_error(resp).await?; - match err.kind() { - ErrorKind::NotFound => Ok(RpDelete::default()), - // Representative deleted - ErrorKind::AlreadyExists => Ok(RpDelete::default()), - _ => Err(err), - } - } - } - } - - async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { - Ok(( - RpList::default(), - oio::PageLister::new(B2Lister::new( - self.core.clone(), - path, - args.recursive(), - args.limit(), - args.start_after(), - )), - )) - } - async fn presign(&self, path: &str, args: OpPresign) -> Result { match args.operation() { PresignOperation::Stat(_) => { diff --git a/core/src/services/chainsafe/backend.rs b/core/src/services/chainsafe/backend.rs index 17a156a688b..3957f4f900d 100644 --- a/core/src/services/chainsafe/backend.rs +++ b/core/src/services/chainsafe/backend.rs @@ -25,9 +25,6 @@ use http::StatusCode; use log::debug; use serde::Deserialize; -use crate::raw::*; -use crate::*; - use super::core::parse_info; use super::core::ChainsafeCore; use super::core::ObjectInfoResponse; @@ -35,6 +32,8 @@ use super::error::parse_error; use super::lister::ChainsafeLister; use super::writer::ChainsafeWriter; use super::writer::ChainsafeWriters; +use crate::raw::*; +use crate::*; /// Config for backblaze Chainsafe services support. #[derive(Default, Deserialize)] @@ -206,14 +205,14 @@ pub struct ChainsafeBackend { impl Accessor for ChainsafeBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); - type Writer = ChainsafeWriters; - type BlockingWriter = (); - type Lister = oio::PageLister; + type BlockingReader = (); + + type BlockingWriter = (); + type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -252,36 +251,36 @@ impl Accessor for ChainsafeBackend { } } - async fn read(&self, path: &str, _args: OpRead) -> Result<(RpRead, Self::Reader)> { - let resp = self.core.download_object(path).await?; + async fn stat(&self, path: &str, _args: OpStat) -> Result { + let resp = self.core.object_info(path).await?; let status = resp.status(); match status { StatusCode::OK => { - let size = parse_content_length(resp.headers())?; - let range = parse_content_range(resp.headers())?; - Ok(( - RpRead::new().with_size(size).with_range(range), - resp.into_body(), - )) + let bs = resp.into_body().bytes().await?; + + let output: ObjectInfoResponse = + serde_json::from_slice(&bs).map_err(new_json_deserialize_error)?; + Ok(RpStat::new(parse_info(output.content))) } _ => Err(parse_error(resp).await?), } } - async fn stat(&self, path: &str, _args: OpStat) -> Result { - let resp = self.core.object_info(path).await?; + async fn read(&self, path: &str, _args: OpRead) -> Result<(RpRead, Self::Reader)> { + let resp = self.core.download_object(path).await?; let status = resp.status(); match status { StatusCode::OK => { - let bs = resp.into_body().bytes().await?; - - let output: ObjectInfoResponse = - serde_json::from_slice(&bs).map_err(new_json_deserialize_error)?; - Ok(RpStat::new(parse_info(output.content))) + let size = parse_content_length(resp.headers())?; + let range = parse_content_range(resp.headers())?; + Ok(( + RpRead::new().with_size(size).with_range(range), + resp.into_body(), + )) } _ => Err(parse_error(resp).await?), } diff --git a/core/src/services/chainsafe/core.rs b/core/src/services/chainsafe/core.rs index 792cc22d0d2..50e3e370698 100644 --- a/core/src/services/chainsafe/core.rs +++ b/core/src/services/chainsafe/core.rs @@ -22,7 +22,6 @@ use bytes::Bytes; use http::header; use http::Request; use http::Response; - use serde::Deserialize; use serde_json::json; diff --git a/core/src/services/chainsafe/lister.rs b/core/src/services/chainsafe/lister.rs index 997fce21f89..69cdc552008 100644 --- a/core/src/services/chainsafe/lister.rs +++ b/core/src/services/chainsafe/lister.rs @@ -20,7 +20,9 @@ use std::sync::Arc; use async_trait::async_trait; use http::StatusCode; -use super::core::{parse_info, ChainsafeCore, Info}; +use super::core::parse_info; +use super::core::ChainsafeCore; +use super::core::Info; use super::error::parse_error; use crate::raw::oio::Entry; use crate::raw::*; diff --git a/core/src/services/chainsafe/writer.rs b/core/src/services/chainsafe/writer.rs index 27158378e0c..456e6ba71c9 100644 --- a/core/src/services/chainsafe/writer.rs +++ b/core/src/services/chainsafe/writer.rs @@ -20,11 +20,10 @@ use std::sync::Arc; use async_trait::async_trait; use http::StatusCode; -use crate::raw::*; -use crate::*; - use super::core::ChainsafeCore; use super::error::parse_error; +use crate::raw::*; +use crate::*; pub type ChainsafeWriters = oio::OneShotWriter; diff --git a/core/src/services/cos/backend.rs b/core/src/services/cos/backend.rs index a31e9818d27..5c6d4588586 100644 --- a/core/src/services/cos/backend.rs +++ b/core/src/services/cos/backend.rs @@ -242,10 +242,10 @@ pub struct CosBackend { #[async_trait] impl Accessor for CosBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = CosWriters; - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -301,6 +301,17 @@ impl Accessor for CosBackend { am } + async fn stat(&self, path: &str, args: OpStat) -> Result { + let resp = self.core.cos_head_object(path, &args).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => parse_into_metadata(path, resp.headers()).map(RpStat::new), + _ => Err(parse_error(resp).await?), + } + } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let resp = self.core.cos_get_object(path, &args).await?; @@ -335,39 +346,33 @@ impl Accessor for CosBackend { Ok((RpWrite::default(), w)) } - async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { - let resp = self.core.cos_copy_object(from, to).await?; + async fn delete(&self, path: &str, _: OpDelete) -> Result { + let resp = self.core.cos_delete_object(path).await?; let status = resp.status(); match status { - StatusCode::OK => { - resp.into_body().consume().await?; - Ok(RpCopy::default()) + StatusCode::NO_CONTENT | StatusCode::ACCEPTED | StatusCode::NOT_FOUND => { + Ok(RpDelete::default()) } _ => Err(parse_error(resp).await?), } } - async fn stat(&self, path: &str, args: OpStat) -> Result { - let resp = self.core.cos_head_object(path, &args).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => parse_into_metadata(path, resp.headers()).map(RpStat::new), - _ => Err(parse_error(resp).await?), - } + async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { + let l = CosLister::new(self.core.clone(), path, args.recursive(), args.limit()); + Ok((RpList::default(), oio::PageLister::new(l))) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.cos_delete_object(path).await?; + async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { + let resp = self.core.cos_copy_object(from, to).await?; let status = resp.status(); match status { - StatusCode::NO_CONTENT | StatusCode::ACCEPTED | StatusCode::NOT_FOUND => { - Ok(RpDelete::default()) + StatusCode::OK => { + resp.into_body().consume().await?; + Ok(RpCopy::default()) } _ => Err(parse_error(resp).await?), } @@ -393,9 +398,4 @@ impl Accessor for CosBackend { parts.headers, ))) } - - async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { - let l = CosLister::new(self.core.clone(), path, args.recursive(), args.limit()); - Ok((RpList::default(), oio::PageLister::new(l))) - } } diff --git a/core/src/services/dbfs/backend.rs b/core/src/services/dbfs/backend.rs index 6a5fcb602d6..260a5abec36 100644 --- a/core/src/services/dbfs/backend.rs +++ b/core/src/services/dbfs/backend.rs @@ -154,10 +154,10 @@ pub struct DbfsBackend { #[async_trait] impl Accessor for DbfsBackend { type Reader = DbfsReader; - type BlockingReader = (); type Writer = oio::OneShotWriter; - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -197,35 +197,6 @@ impl Accessor for DbfsBackend { } } - async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { - let op = DbfsReader::new(self.core.clone(), args, path.to_string()); - - Ok((RpRead::new(), op)) - } - - async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { - Ok(( - RpWrite::default(), - oio::OneShotWriter::new(DbfsWriter::new(self.core.clone(), args, path.to_string())), - )) - } - - async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { - self.core.dbfs_ensure_parent_path(to).await?; - - let resp = self.core.dbfs_rename(from, to).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => { - resp.into_body().consume().await?; - Ok(RpRename::default()) - } - _ => Err(parse_error(resp).await?), - } - } - async fn stat(&self, path: &str, _: OpStat) -> Result { // Stat root always returns a DIR. if path == "/" { @@ -261,6 +232,19 @@ impl Accessor for DbfsBackend { } } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { + let op = DbfsReader::new(self.core.clone(), args, path.to_string()); + + Ok((RpRead::new(), op)) + } + + async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { + Ok(( + RpWrite::default(), + oio::OneShotWriter::new(DbfsWriter::new(self.core.clone(), args, path.to_string())), + )) + } + /// NOTE: Server will return 200 even if the path doesn't exist. async fn delete(&self, path: &str, _: OpDelete) -> Result { let resp = self.core.dbfs_delete(path).await?; @@ -278,6 +262,22 @@ impl Accessor for DbfsBackend { Ok((RpList::default(), oio::PageLister::new(l))) } + + async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { + self.core.dbfs_ensure_parent_path(to).await?; + + let resp = self.core.dbfs_rename(from, to).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => { + resp.into_body().consume().await?; + Ok(RpRename::default()) + } + _ => Err(parse_error(resp).await?), + } + } } #[derive(Deserialize)] diff --git a/core/src/services/dropbox/backend.rs b/core/src/services/dropbox/backend.rs index 9774deed779..5d05aaf5f2f 100644 --- a/core/src/services/dropbox/backend.rs +++ b/core/src/services/dropbox/backend.rs @@ -36,10 +36,10 @@ pub struct DropboxBackend { #[async_trait] impl Accessor for DropboxBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = oio::OneShotWriter; - type BlockingWriter = (); type Lister = (); + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -97,6 +97,44 @@ impl Accessor for DropboxBackend { Ok(res) } + async fn stat(&self, path: &str, _: OpStat) -> Result { + let resp = self.core.dropbox_get_metadata(path).await?; + let status = resp.status(); + match status { + StatusCode::OK => { + let bytes = resp.into_body().bytes().await?; + let decoded_response = serde_json::from_slice::(&bytes) + .map_err(new_json_deserialize_error)?; + let entry_mode: EntryMode = match decoded_response.tag.as_str() { + "file" => EntryMode::FILE, + "folder" => EntryMode::DIR, + _ => EntryMode::Unknown, + }; + + let mut metadata = Metadata::new(entry_mode); + // Only set last_modified and size if entry_mode is FILE, because Dropbox API + // returns last_modified and size only for files. + // FYI: https://www.dropbox.com/developers/documentation/http/documentation#files-get_metadata + if entry_mode == EntryMode::FILE { + let date_utc_last_modified = + parse_datetime_from_rfc3339(&decoded_response.client_modified)?; + metadata.set_last_modified(date_utc_last_modified); + + if let Some(size) = decoded_response.size { + metadata.set_content_length(size); + } else { + return Err(Error::new( + ErrorKind::Unexpected, + &format!("no size found for file {}", path), + )); + } + } + Ok(RpStat::new(metadata)) + } + _ => Err(parse_error(resp).await?), + } + } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let resp = self.core.dropbox_get(path, args).await?; let status = resp.status(); @@ -138,44 +176,6 @@ impl Accessor for DropboxBackend { } } - async fn stat(&self, path: &str, _: OpStat) -> Result { - let resp = self.core.dropbox_get_metadata(path).await?; - let status = resp.status(); - match status { - StatusCode::OK => { - let bytes = resp.into_body().bytes().await?; - let decoded_response = serde_json::from_slice::(&bytes) - .map_err(new_json_deserialize_error)?; - let entry_mode: EntryMode = match decoded_response.tag.as_str() { - "file" => EntryMode::FILE, - "folder" => EntryMode::DIR, - _ => EntryMode::Unknown, - }; - - let mut metadata = Metadata::new(entry_mode); - // Only set last_modified and size if entry_mode is FILE, because Dropbox API - // returns last_modified and size only for files. - // FYI: https://www.dropbox.com/developers/documentation/http/documentation#files-get_metadata - if entry_mode == EntryMode::FILE { - let date_utc_last_modified = - parse_datetime_from_rfc3339(&decoded_response.client_modified)?; - metadata.set_last_modified(date_utc_last_modified); - - if let Some(size) = decoded_response.size { - metadata.set_content_length(size); - } else { - return Err(Error::new( - ErrorKind::Unexpected, - &format!("no size found for file {}", path), - )); - } - } - Ok(RpStat::new(metadata)) - } - _ => Err(parse_error(resp).await?), - } - } - async fn batch(&self, args: OpBatch) -> Result { let ops = args.into_operation(); if ops.len() > 1000 { diff --git a/core/src/services/fs/backend.rs b/core/src/services/fs/backend.rs index e2205e03c0e..68ebf1b398e 100644 --- a/core/src/services/fs/backend.rs +++ b/core/src/services/fs/backend.rs @@ -241,10 +241,10 @@ impl FsBackend { #[async_trait] impl Accessor for FsBackend { type Reader = oio::TokioReader; - type BlockingReader = oio::StdReader; type Writer = FsWriter; - type BlockingWriter = FsWriter; type Lister = Option>; + type BlockingReader = oio::StdReader; + type BlockingWriter = FsWriter; type BlockingLister = Option>; fn info(&self) -> AccessorInfo { @@ -286,6 +286,29 @@ impl Accessor for FsBackend { Ok(RpCreateDir::default()) } + async fn stat(&self, path: &str, _: OpStat) -> Result { + let p = self.root.join(path.trim_end_matches('/')); + + let meta = tokio::fs::metadata(&p).await.map_err(new_std_io_error)?; + + let mode = if meta.is_dir() { + EntryMode::DIR + } else if meta.is_file() { + EntryMode::FILE + } else { + EntryMode::Unknown + }; + let m = Metadata::new(mode) + .with_content_length(meta.len()) + .with_last_modified( + meta.modified() + .map(DateTime::from) + .map_err(new_std_io_error)?, + ); + + Ok(RpStat::new(m)) + } + /// # Notes /// /// There are three ways to get the total file length: @@ -346,6 +369,45 @@ impl Accessor for FsBackend { Ok((RpWrite::new(), FsWriter::new(target_path, tmp_path, f))) } + async fn delete(&self, path: &str, _: OpDelete) -> Result { + let p = self.root.join(path.trim_end_matches('/')); + + let meta = tokio::fs::metadata(&p).await; + + match meta { + Ok(meta) => { + if meta.is_dir() { + tokio::fs::remove_dir(&p).await.map_err(new_std_io_error)?; + } else { + tokio::fs::remove_file(&p).await.map_err(new_std_io_error)?; + } + + Ok(RpDelete::default()) + } + Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(RpDelete::default()), + Err(err) => Err(new_std_io_error(err)), + } + } + + async fn list(&self, path: &str, _: OpList) -> Result<(RpList, Self::Lister)> { + let p = self.root.join(path.trim_end_matches('/')); + + let f = match tokio::fs::read_dir(&p).await { + Ok(rd) => rd, + Err(e) => { + return if e.kind() == std::io::ErrorKind::NotFound { + Ok((RpList::default(), None)) + } else { + Err(new_std_io_error(e)) + }; + } + }; + + let rd = FsLister::new(&self.root, f); + + Ok((RpList::default(), Some(rd))) + } + async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { let from = self.root.join(from.trim_end_matches('/')); @@ -374,10 +436,18 @@ impl Accessor for FsBackend { Ok(RpRename::default()) } - async fn stat(&self, path: &str, _: OpStat) -> Result { + fn blocking_create_dir(&self, path: &str, _: OpCreateDir) -> Result { let p = self.root.join(path.trim_end_matches('/')); - let meta = tokio::fs::metadata(&p).await.map_err(new_std_io_error)?; + std::fs::create_dir_all(p).map_err(new_std_io_error)?; + + Ok(RpCreateDir::default()) + } + + fn blocking_stat(&self, path: &str, _: OpStat) -> Result { + let p = self.root.join(path.trim_end_matches('/')); + + let meta = std::fs::metadata(p).map_err(new_std_io_error)?; let mode = if meta.is_dir() { EntryMode::DIR @@ -397,53 +467,6 @@ impl Accessor for FsBackend { Ok(RpStat::new(m)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let p = self.root.join(path.trim_end_matches('/')); - - let meta = tokio::fs::metadata(&p).await; - - match meta { - Ok(meta) => { - if meta.is_dir() { - tokio::fs::remove_dir(&p).await.map_err(new_std_io_error)?; - } else { - tokio::fs::remove_file(&p).await.map_err(new_std_io_error)?; - } - - Ok(RpDelete::default()) - } - Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(RpDelete::default()), - Err(err) => Err(new_std_io_error(err)), - } - } - - async fn list(&self, path: &str, _: OpList) -> Result<(RpList, Self::Lister)> { - let p = self.root.join(path.trim_end_matches('/')); - - let f = match tokio::fs::read_dir(&p).await { - Ok(rd) => rd, - Err(e) => { - return if e.kind() == std::io::ErrorKind::NotFound { - Ok((RpList::default(), None)) - } else { - Err(new_std_io_error(e)) - }; - } - }; - - let rd = FsLister::new(&self.root, f); - - Ok((RpList::default(), Some(rd))) - } - - fn blocking_create_dir(&self, path: &str, _: OpCreateDir) -> Result { - let p = self.root.join(path.trim_end_matches('/')); - - std::fs::create_dir_all(p).map_err(new_std_io_error)?; - - Ok(RpCreateDir::default()) - } - fn blocking_read(&self, path: &str, _: OpRead) -> Result<(RpRead, Self::BlockingReader)> { let p = self.root.join(path.trim_end_matches('/')); @@ -495,55 +518,6 @@ impl Accessor for FsBackend { Ok((RpWrite::new(), FsWriter::new(target_path, tmp_path, f))) } - fn blocking_copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { - let from = self.root.join(from.trim_end_matches('/')); - - // try to get the metadata of the source file to ensure it exists - std::fs::metadata(&from).map_err(new_std_io_error)?; - - let to = Self::blocking_ensure_write_abs_path(&self.root, to.trim_end_matches('/'))?; - - std::fs::copy(from, to).map_err(new_std_io_error)?; - - Ok(RpCopy::default()) - } - - fn blocking_rename(&self, from: &str, to: &str, _args: OpRename) -> Result { - let from = self.root.join(from.trim_end_matches('/')); - - // try to get the metadata of the source file to ensure it exists - std::fs::metadata(&from).map_err(new_std_io_error)?; - - let to = Self::blocking_ensure_write_abs_path(&self.root, to.trim_end_matches('/'))?; - - std::fs::rename(from, to).map_err(new_std_io_error)?; - - Ok(RpRename::default()) - } - - fn blocking_stat(&self, path: &str, _: OpStat) -> Result { - let p = self.root.join(path.trim_end_matches('/')); - - let meta = std::fs::metadata(p).map_err(new_std_io_error)?; - - let mode = if meta.is_dir() { - EntryMode::DIR - } else if meta.is_file() { - EntryMode::FILE - } else { - EntryMode::Unknown - }; - let m = Metadata::new(mode) - .with_content_length(meta.len()) - .with_last_modified( - meta.modified() - .map(DateTime::from) - .map_err(new_std_io_error)?, - ); - - Ok(RpStat::new(m)) - } - fn blocking_delete(&self, path: &str, _: OpDelete) -> Result { let p = self.root.join(path.trim_end_matches('/')); @@ -582,6 +556,32 @@ impl Accessor for FsBackend { Ok((RpList::default(), Some(rd))) } + + fn blocking_copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { + let from = self.root.join(from.trim_end_matches('/')); + + // try to get the metadata of the source file to ensure it exists + std::fs::metadata(&from).map_err(new_std_io_error)?; + + let to = Self::blocking_ensure_write_abs_path(&self.root, to.trim_end_matches('/'))?; + + std::fs::copy(from, to).map_err(new_std_io_error)?; + + Ok(RpCopy::default()) + } + + fn blocking_rename(&self, from: &str, to: &str, _args: OpRename) -> Result { + let from = self.root.join(from.trim_end_matches('/')); + + // try to get the metadata of the source file to ensure it exists + std::fs::metadata(&from).map_err(new_std_io_error)?; + + let to = Self::blocking_ensure_write_abs_path(&self.root, to.trim_end_matches('/'))?; + + std::fs::rename(from, to).map_err(new_std_io_error)?; + + Ok(RpRename::default()) + } } #[cfg(test)] diff --git a/core/src/services/ftp/backend.rs b/core/src/services/ftp/backend.rs index 9888d35727b..e51b64384ba 100644 --- a/core/src/services/ftp/backend.rs +++ b/core/src/services/ftp/backend.rs @@ -31,14 +31,12 @@ use http::Uri; use log::debug; use serde::Deserialize; use suppaftp::list::File; - use suppaftp::types::FileType; use suppaftp::types::Response; use suppaftp::AsyncRustlsConnector; use suppaftp::AsyncRustlsFtpStream; use suppaftp::FtpError; use suppaftp::ImplAsyncFtpStream; - use suppaftp::Status; use tokio::sync::OnceCell; @@ -289,10 +287,10 @@ impl Debug for FtpBackend { #[async_trait] impl Accessor for FtpBackend { type Reader = FtpReader; - type BlockingReader = (); type Writer = FtpWriters; - type BlockingWriter = (); type Lister = FtpLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -343,6 +341,24 @@ impl Accessor for FtpBackend { return Ok(RpCreateDir::default()); } + async fn stat(&self, path: &str, _: OpStat) -> Result { + let file = self.ftp_stat(path).await?; + + let mode = if file.is_file() { + EntryMode::FILE + } else if file.is_directory() { + EntryMode::DIR + } else { + EntryMode::Unknown + }; + + let mut meta = Metadata::new(mode); + meta.set_content_length(file.size() as u64); + meta.set_last_modified(file.modified().into()); + + Ok(RpStat::new(meta)) + } + /// TODO: migrate to FileReader maybe? async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let mut ftp_stream = self.ftp_connect(Operation::Read).await?; @@ -418,24 +434,6 @@ impl Accessor for FtpBackend { Ok((RpWrite::new(), w)) } - async fn stat(&self, path: &str, _: OpStat) -> Result { - let file = self.ftp_stat(path).await?; - - let mode = if file.is_file() { - EntryMode::FILE - } else if file.is_directory() { - EntryMode::DIR - } else { - EntryMode::Unknown - }; - - let mut meta = Metadata::new(mode); - meta.set_content_length(file.size() as u64); - meta.set_last_modified(file.modified().into()); - - Ok(RpStat::new(meta)) - } - async fn delete(&self, path: &str, _: OpDelete) -> Result { let mut ftp_stream = self.ftp_connect(Operation::Delete).await?; diff --git a/core/src/services/gcs/backend.rs b/core/src/services/gcs/backend.rs index 6a016012351..3142f8a9b86 100644 --- a/core/src/services/gcs/backend.rs +++ b/core/src/services/gcs/backend.rs @@ -322,10 +322,10 @@ pub struct GcsBackend { #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] impl Accessor for GcsBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = GcsWriters; - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -375,37 +375,6 @@ impl Accessor for GcsBackend { am } - async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { - let resp = self.core.gcs_get_object(path, &args).await?; - - if resp.status().is_success() { - let size = parse_content_length(resp.headers())?; - Ok((RpRead::new().with_size(size), resp.into_body())) - } else if resp.status() == StatusCode::RANGE_NOT_SATISFIABLE { - Ok((RpRead::new(), IncomingAsyncBody::empty())) - } else { - Err(parse_error(resp).await?) - } - } - - async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { - let w = GcsWriter::new(self.core.clone(), path, args); - let w = oio::RangeWriter::new(w); - - Ok((RpWrite::default(), w)) - } - - async fn copy(&self, from: &str, to: &str, _: OpCopy) -> Result { - let resp = self.core.gcs_copy_object(from, to).await?; - - if resp.status().is_success() { - resp.into_body().consume().await?; - Ok(RpCopy::default()) - } else { - Err(parse_error(resp).await?) - } - } - async fn stat(&self, path: &str, args: OpStat) -> Result { let resp = self.core.gcs_get_object_metadata(path, &args).await?; @@ -437,6 +406,26 @@ impl Accessor for GcsBackend { Ok(RpStat::new(m)) } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { + let resp = self.core.gcs_get_object(path, &args).await?; + + if resp.status().is_success() { + let size = parse_content_length(resp.headers())?; + Ok((RpRead::new().with_size(size), resp.into_body())) + } else if resp.status() == StatusCode::RANGE_NOT_SATISFIABLE { + Ok((RpRead::new(), IncomingAsyncBody::empty())) + } else { + Err(parse_error(resp).await?) + } + } + + async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { + let w = GcsWriter::new(self.core.clone(), path, args); + let w = oio::RangeWriter::new(w); + + Ok((RpWrite::default(), w)) + } + async fn delete(&self, path: &str, _: OpDelete) -> Result { let resp = self.core.gcs_delete_object(path).await?; @@ -460,6 +449,40 @@ impl Accessor for GcsBackend { Ok((RpList::default(), oio::PageLister::new(l))) } + async fn copy(&self, from: &str, to: &str, _: OpCopy) -> Result { + let resp = self.core.gcs_copy_object(from, to).await?; + + if resp.status().is_success() { + resp.into_body().consume().await?; + Ok(RpCopy::default()) + } else { + Err(parse_error(resp).await?) + } + } + + async fn presign(&self, path: &str, args: OpPresign) -> Result { + // We will not send this request out, just for signing. + let mut req = match args.operation() { + PresignOperation::Stat(v) => self.core.gcs_head_object_xml_request(path, v)?, + PresignOperation::Read(v) => self.core.gcs_get_object_xml_request(path, v)?, + PresignOperation::Write(v) => { + self.core + .gcs_insert_object_xml_request(path, v, AsyncBody::Empty)? + } + }; + + self.core.sign_query(&mut req, args.expire()).await?; + + // We don't need this request anymore, consume it directly. + let (parts, _) = req.into_parts(); + + Ok(RpPresign::new(PresignedRequest::new( + parts.method, + parts.uri, + parts.headers, + ))) + } + async fn batch(&self, args: OpBatch) -> Result { let ops = args.into_operation(); if ops.len() > 100 { @@ -518,29 +541,6 @@ impl Accessor for GcsBackend { Err(parse_error(resp).await?) } } - - async fn presign(&self, path: &str, args: OpPresign) -> Result { - // We will not send this request out, just for signing. - let mut req = match args.operation() { - PresignOperation::Stat(v) => self.core.gcs_head_object_xml_request(path, v)?, - PresignOperation::Read(v) => self.core.gcs_get_object_xml_request(path, v)?, - PresignOperation::Write(v) => { - self.core - .gcs_insert_object_xml_request(path, v, AsyncBody::Empty)? - } - }; - - self.core.sign_query(&mut req, args.expire()).await?; - - // We don't need this request anymore, consume it directly. - let (parts, _) = req.into_parts(); - - Ok(RpPresign::new(PresignedRequest::new( - parts.method, - parts.uri, - parts.headers, - ))) - } } /// The raw json response returned by [`get`](https://cloud.google.com/storage/docs/json_api/v1/objects/get) diff --git a/core/src/services/gdrive/backend.rs b/core/src/services/gdrive/backend.rs index 3e88e912ca6..50a2531a299 100644 --- a/core/src/services/gdrive/backend.rs +++ b/core/src/services/gdrive/backend.rs @@ -43,10 +43,10 @@ pub struct GdriveBackend { #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] impl Accessor for GdriveBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = oio::OneShotWriter; - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -76,17 +76,6 @@ impl Accessor for GdriveBackend { ma } - async fn stat(&self, path: &str, _args: OpStat) -> Result { - let resp = self.core.gdrive_stat(path).await?; - - if resp.status() != StatusCode::OK { - return Err(parse_error(resp).await?); - } - - let meta = self.parse_metadata(resp.into_body().bytes().await?)?; - Ok(RpStat::new(meta)) - } - async fn create_dir(&self, path: &str, _args: OpCreateDir) -> Result { let parent = self.core.ensure_parent_path(path).await?; @@ -108,6 +97,17 @@ impl Accessor for GdriveBackend { Ok(RpCreateDir::default()) } + async fn stat(&self, path: &str, _args: OpStat) -> Result { + let resp = self.core.gdrive_stat(path).await?; + + if resp.status() != StatusCode::OK { + return Err(parse_error(resp).await?); + } + + let meta = self.parse_metadata(resp.into_body().bytes().await?)?; + Ok(RpStat::new(meta)) + } + async fn read(&self, path: &str, _args: OpRead) -> Result<(RpRead, Self::Reader)> { let resp = self.core.gdrive_get(path).await?; @@ -160,28 +160,6 @@ impl Accessor for GdriveBackend { )) } - async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { - let resp = self.core.gdrive_patch_metadata_request(from, to).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => { - let body = resp.into_body().bytes().await?; - let meta = serde_json::from_slice::(&body) - .map_err(new_json_deserialize_error)?; - - let mut cache = self.core.path_cache.lock().await; - - cache.remove(&build_abs_path(&self.core.root, from)); - cache.insert(build_abs_path(&self.core.root, to), meta.id.clone()); - - Ok(RpRename::default()) - } - _ => Err(parse_error(resp).await?), - } - } - async fn delete(&self, path: &str, _: OpDelete) -> Result { let resp = self.core.gdrive_delete(path).await; if let Ok(resp) = resp { @@ -276,6 +254,28 @@ impl Accessor for GdriveBackend { _ => Err(parse_error(resp).await?), } } + + async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { + let resp = self.core.gdrive_patch_metadata_request(from, to).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => { + let body = resp.into_body().bytes().await?; + let meta = serde_json::from_slice::(&body) + .map_err(new_json_deserialize_error)?; + + let mut cache = self.core.path_cache.lock().await; + + cache.remove(&build_abs_path(&self.core.root, from)); + cache.insert(build_abs_path(&self.core.root, to), meta.id.clone()); + + Ok(RpRename::default()) + } + _ => Err(parse_error(resp).await?), + } + } } impl GdriveBackend { diff --git a/core/src/services/ghac/backend.rs b/core/src/services/ghac/backend.rs index 546ca7ec748..596189eb264 100644 --- a/core/src/services/ghac/backend.rs +++ b/core/src/services/ghac/backend.rs @@ -227,10 +227,10 @@ pub struct GhacBackend { #[async_trait] impl Accessor for GhacBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = GhacWriter; - type BlockingWriter = (); type Lister = (); + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -254,6 +254,34 @@ impl Accessor for GhacBackend { am } + async fn stat(&self, path: &str, _: OpStat) -> Result { + let req = self.ghac_query(path).await?; + + let resp = self.client.send(req).await?; + + let location = if resp.status() == StatusCode::OK { + let slc = resp.into_body().bytes().await?; + let query_resp: GhacQueryResponse = + serde_json::from_slice(&slc).map_err(new_json_deserialize_error)?; + query_resp.archive_location + } else { + return Err(parse_error(resp).await?); + }; + + let req = self.ghac_head_location(&location).await?; + let resp = self.client.send(req).await?; + + let status = resp.status(); + match status { + StatusCode::OK => { + let meta = parse_into_metadata(path, resp.headers())?; + + Ok(RpStat::new(meta)) + } + _ => Err(parse_error(resp).await?), + } + } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let req = self.ghac_query(path).await?; @@ -308,34 +336,6 @@ impl Accessor for GhacBackend { Ok((RpWrite::default(), GhacWriter::new(self.clone(), cache_id))) } - async fn stat(&self, path: &str, _: OpStat) -> Result { - let req = self.ghac_query(path).await?; - - let resp = self.client.send(req).await?; - - let location = if resp.status() == StatusCode::OK { - let slc = resp.into_body().bytes().await?; - let query_resp: GhacQueryResponse = - serde_json::from_slice(&slc).map_err(new_json_deserialize_error)?; - query_resp.archive_location - } else { - return Err(parse_error(resp).await?); - }; - - let req = self.ghac_head_location(&location).await?; - let resp = self.client.send(req).await?; - - let status = resp.status(); - match status { - StatusCode::OK => { - let meta = parse_into_metadata(path, resp.headers())?; - - Ok(RpStat::new(meta)) - } - _ => Err(parse_error(resp).await?), - } - } - async fn delete(&self, path: &str, _: OpDelete) -> Result { if self.api_token.is_empty() { return Err(Error::new( diff --git a/core/src/services/hdfs/backend.rs b/core/src/services/hdfs/backend.rs index 1359477d3c5..a03aa6a3dee 100644 --- a/core/src/services/hdfs/backend.rs +++ b/core/src/services/hdfs/backend.rs @@ -16,7 +16,8 @@ // under the License. use std::collections::HashMap; -use std::fmt::{Debug, Formatter}; +use std::fmt::Debug; +use std::fmt::Formatter; use std::io; use std::path::PathBuf; use std::sync::Arc; @@ -245,10 +246,10 @@ unsafe impl Sync for HdfsBackend {} #[async_trait] impl Accessor for HdfsBackend { type Reader = oio::FuturesReader; - type BlockingReader = oio::StdReader; type Writer = HdfsWriter; - type BlockingWriter = HdfsWriter; type Lister = Option; + type BlockingReader = oio::StdReader; + type BlockingWriter = HdfsWriter; type BlockingLister = Option; fn info(&self) -> AccessorInfo { @@ -286,6 +287,25 @@ impl Accessor for HdfsBackend { Ok(RpCreateDir::default()) } + async fn stat(&self, path: &str, _: OpStat) -> Result { + let p = build_rooted_abs_path(&self.root, path); + + let meta = self.client.metadata(&p).map_err(new_std_io_error)?; + + let mode = if meta.is_dir() { + EntryMode::DIR + } else if meta.is_file() { + EntryMode::FILE + } else { + EntryMode::Unknown + }; + let mut m = Metadata::new(mode); + m.set_content_length(meta.len()); + m.set_last_modified(meta.modified().into()); + + Ok(RpStat::new(m)) + } + async fn read(&self, path: &str, _: OpRead) -> Result<(RpRead, Self::Reader)> { let p = build_rooted_abs_path(&self.root, path); @@ -358,6 +378,52 @@ impl Accessor for HdfsBackend { )) } + async fn delete(&self, path: &str, _: OpDelete) -> Result { + let p = build_rooted_abs_path(&self.root, path); + + let meta = self.client.metadata(&p); + + if let Err(err) = meta { + return if err.kind() == io::ErrorKind::NotFound { + Ok(RpDelete::default()) + } else { + Err(new_std_io_error(err)) + }; + } + + // Safety: Err branch has been checked, it's OK to unwrap. + let meta = meta.ok().unwrap(); + + let result = if meta.is_dir() { + self.client.remove_dir(&p) + } else { + self.client.remove_file(&p) + }; + + result.map_err(new_std_io_error)?; + + Ok(RpDelete::default()) + } + + async fn list(&self, path: &str, _: OpList) -> Result<(RpList, Self::Lister)> { + let p = build_rooted_abs_path(&self.root, path); + + let f = match self.client.read_dir(&p) { + Ok(f) => f, + Err(e) => { + return if e.kind() == io::ErrorKind::NotFound { + Ok((RpList::default(), None)) + } else { + Err(new_std_io_error(e)) + } + } + }; + + let rd = HdfsLister::new(&self.root, f); + + Ok((RpList::default(), Some(rd))) + } + async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { let from_path = build_rooted_abs_path(&self.root, from); self.client.metadata(&from_path).map_err(new_std_io_error)?; @@ -405,7 +471,15 @@ impl Accessor for HdfsBackend { Ok(RpRename::new()) } - async fn stat(&self, path: &str, _: OpStat) -> Result { + fn blocking_create_dir(&self, path: &str, _: OpCreateDir) -> Result { + let p = build_rooted_abs_path(&self.root, path); + + self.client.create_dir(&p).map_err(new_std_io_error)?; + + Ok(RpCreateDir::default()) + } + + fn blocking_stat(&self, path: &str, _: OpStat) -> Result { let p = build_rooted_abs_path(&self.root, path); let meta = self.client.metadata(&p).map_err(new_std_io_error)?; @@ -424,60 +498,6 @@ impl Accessor for HdfsBackend { Ok(RpStat::new(m)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let p = build_rooted_abs_path(&self.root, path); - - let meta = self.client.metadata(&p); - - if let Err(err) = meta { - return if err.kind() == io::ErrorKind::NotFound { - Ok(RpDelete::default()) - } else { - Err(new_std_io_error(err)) - }; - } - - // Safety: Err branch has been checked, it's OK to unwrap. - let meta = meta.ok().unwrap(); - - let result = if meta.is_dir() { - self.client.remove_dir(&p) - } else { - self.client.remove_file(&p) - }; - - result.map_err(new_std_io_error)?; - - Ok(RpDelete::default()) - } - - async fn list(&self, path: &str, _: OpList) -> Result<(RpList, Self::Lister)> { - let p = build_rooted_abs_path(&self.root, path); - - let f = match self.client.read_dir(&p) { - Ok(f) => f, - Err(e) => { - return if e.kind() == io::ErrorKind::NotFound { - Ok((RpList::default(), None)) - } else { - Err(new_std_io_error(e)) - } - } - }; - - let rd = HdfsLister::new(&self.root, f); - - Ok((RpList::default(), Some(rd))) - } - - fn blocking_create_dir(&self, path: &str, _: OpCreateDir) -> Result { - let p = build_rooted_abs_path(&self.root, path); - - self.client.create_dir(&p).map_err(new_std_io_error)?; - - Ok(RpCreateDir::default()) - } - fn blocking_read(&self, path: &str, _: OpRead) -> Result<(RpRead, Self::BlockingReader)> { let p = build_rooted_abs_path(&self.root, path); @@ -546,6 +566,52 @@ impl Accessor for HdfsBackend { )) } + fn blocking_delete(&self, path: &str, _: OpDelete) -> Result { + let p = build_rooted_abs_path(&self.root, path); + + let meta = self.client.metadata(&p); + + if let Err(err) = meta { + return if err.kind() == io::ErrorKind::NotFound { + Ok(RpDelete::default()) + } else { + Err(new_std_io_error(err)) + }; + } + + // Safety: Err branch has been checked, it's OK to unwrap. + let meta = meta.ok().unwrap(); + + let result = if meta.is_dir() { + self.client.remove_dir(&p) + } else { + self.client.remove_file(&p) + }; + + result.map_err(new_std_io_error)?; + + Ok(RpDelete::default()) + } + + fn blocking_list(&self, path: &str, _: OpList) -> Result<(RpList, Self::BlockingLister)> { + let p = build_rooted_abs_path(&self.root, path); + + let f = match self.client.read_dir(&p) { + Ok(f) => f, + Err(e) => { + return if e.kind() == io::ErrorKind::NotFound { + Ok((RpList::default(), None)) + } else { + Err(new_std_io_error(e)) + } + } + }; + + let rd = HdfsLister::new(&self.root, f); + + Ok((RpList::default(), Some(rd))) + } + fn blocking_rename(&self, from: &str, to: &str, _args: OpRename) -> Result { let from_path = build_rooted_abs_path(&self.root, from); self.client.metadata(&from_path).map_err(new_std_io_error)?; @@ -592,69 +658,4 @@ impl Accessor for HdfsBackend { Ok(RpRename::new()) } - - fn blocking_stat(&self, path: &str, _: OpStat) -> Result { - let p = build_rooted_abs_path(&self.root, path); - - let meta = self.client.metadata(&p).map_err(new_std_io_error)?; - - let mode = if meta.is_dir() { - EntryMode::DIR - } else if meta.is_file() { - EntryMode::FILE - } else { - EntryMode::Unknown - }; - let mut m = Metadata::new(mode); - m.set_content_length(meta.len()); - m.set_last_modified(meta.modified().into()); - - Ok(RpStat::new(m)) - } - - fn blocking_delete(&self, path: &str, _: OpDelete) -> Result { - let p = build_rooted_abs_path(&self.root, path); - - let meta = self.client.metadata(&p); - - if let Err(err) = meta { - return if err.kind() == io::ErrorKind::NotFound { - Ok(RpDelete::default()) - } else { - Err(new_std_io_error(err)) - }; - } - - // Safety: Err branch has been checked, it's OK to unwrap. - let meta = meta.ok().unwrap(); - - let result = if meta.is_dir() { - self.client.remove_dir(&p) - } else { - self.client.remove_file(&p) - }; - - result.map_err(new_std_io_error)?; - - Ok(RpDelete::default()) - } - - fn blocking_list(&self, path: &str, _: OpList) -> Result<(RpList, Self::BlockingLister)> { - let p = build_rooted_abs_path(&self.root, path); - - let f = match self.client.read_dir(&p) { - Ok(f) => f, - Err(e) => { - return if e.kind() == io::ErrorKind::NotFound { - Ok((RpList::default(), None)) - } else { - Err(new_std_io_error(e)) - } - } - }; - - let rd = HdfsLister::new(&self.root, f); - - Ok((RpList::default(), Some(rd))) - } } diff --git a/core/src/services/hdfs/writer.rs b/core/src/services/hdfs/writer.rs index a2ddd5c0021..6c77097d842 100644 --- a/core/src/services/hdfs/writer.rs +++ b/core/src/services/hdfs/writer.rs @@ -15,15 +15,18 @@ // specific language governing permissions and limitations // under the License. -use futures::future::BoxFuture; use std::io::Write; use std::pin::Pin; use std::sync::Arc; +use std::task::ready; +use std::task::Context; use std::task::Poll; -use std::task::{ready, Context}; use async_trait::async_trait; -use futures::{AsyncWrite, AsyncWriteExt, FutureExt}; +use futures::future::BoxFuture; +use futures::AsyncWrite; +use futures::AsyncWriteExt; +use futures::FutureExt; use crate::raw::*; use crate::*; diff --git a/core/src/services/http/backend.rs b/core/src/services/http/backend.rs index f295333ba23..0a907e0ae80 100644 --- a/core/src/services/http/backend.rs +++ b/core/src/services/http/backend.rs @@ -224,10 +224,10 @@ impl Debug for HttpBackend { #[async_trait] impl Accessor for HttpBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = (); - type BlockingWriter = (); type Lister = (); + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -251,6 +251,27 @@ impl Accessor for HttpBackend { ma } + async fn stat(&self, path: &str, args: OpStat) -> Result { + // Stat root always returns a DIR. + if path == "/" { + return Ok(RpStat::new(Metadata::new(EntryMode::DIR))); + } + + let resp = self.http_head(path, &args).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => parse_into_metadata(path, resp.headers()).map(RpStat::new), + // HTTP Server like nginx could return FORBIDDEN if auto-index + // is not enabled, we should ignore them. + StatusCode::NOT_FOUND | StatusCode::FORBIDDEN if path.ends_with('/') => { + Ok(RpStat::new(Metadata::new(EntryMode::DIR))) + } + _ => Err(parse_error(resp).await?), + } + } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let resp = self.http_get(path, &args).await?; @@ -272,27 +293,6 @@ impl Accessor for HttpBackend { _ => Err(parse_error(resp).await?), } } - - async fn stat(&self, path: &str, args: OpStat) -> Result { - // Stat root always returns a DIR. - if path == "/" { - return Ok(RpStat::new(Metadata::new(EntryMode::DIR))); - } - - let resp = self.http_head(path, &args).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => parse_into_metadata(path, resp.headers()).map(RpStat::new), - // HTTP Server like nginx could return FORBIDDEN if auto-index - // is not enabled, we should ignore them. - StatusCode::NOT_FOUND | StatusCode::FORBIDDEN if path.ends_with('/') => { - Ok(RpStat::new(Metadata::new(EntryMode::DIR))) - } - _ => Err(parse_error(resp).await?), - } - } } impl HttpBackend { diff --git a/core/src/services/huggingface/backend.rs b/core/src/services/huggingface/backend.rs index 70e6d39b512..defbc7b6086 100644 --- a/core/src/services/huggingface/backend.rs +++ b/core/src/services/huggingface/backend.rs @@ -244,10 +244,10 @@ pub struct HuggingfaceBackend { #[async_trait] impl Accessor for HuggingfaceBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = (); - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -268,28 +268,6 @@ impl Accessor for HuggingfaceBackend { am } - async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { - let resp = self.core.hf_resolve(path, args).await?; - - let status = resp.status(); - - match status { - StatusCode::OK | StatusCode::PARTIAL_CONTENT => { - let size = parse_content_length(resp.headers())?; - let range = parse_content_range(resp.headers())?; - Ok(( - RpRead::new().with_size(size).with_range(range), - resp.into_body(), - )) - } - StatusCode::RANGE_NOT_SATISFIABLE => { - resp.into_body().consume().await?; - Ok((RpRead::new().with_size(Some(0)), IncomingAsyncBody::empty())) - } - _ => Err(parse_error(resp).await?), - } - } - async fn stat(&self, path: &str, _: OpStat) -> Result { // Stat root always returns a DIR. if path == "/" { @@ -333,6 +311,28 @@ impl Accessor for HuggingfaceBackend { } } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { + let resp = self.core.hf_resolve(path, args).await?; + + let status = resp.status(); + + match status { + StatusCode::OK | StatusCode::PARTIAL_CONTENT => { + let size = parse_content_length(resp.headers())?; + let range = parse_content_range(resp.headers())?; + Ok(( + RpRead::new().with_size(size).with_range(range), + resp.into_body(), + )) + } + StatusCode::RANGE_NOT_SATISFIABLE => { + resp.into_body().consume().await?; + Ok((RpRead::new().with_size(Some(0)), IncomingAsyncBody::empty())) + } + _ => Err(parse_error(resp).await?), + } + } + async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { let l = HuggingfaceLister::new(self.core.clone(), path.to_string(), args.recursive()); diff --git a/core/src/services/ipfs/backend.rs b/core/src/services/ipfs/backend.rs index 081a56e73d9..8e990e31381 100644 --- a/core/src/services/ipfs/backend.rs +++ b/core/src/services/ipfs/backend.rs @@ -162,10 +162,10 @@ impl Debug for IpfsBackend { #[async_trait] impl Accessor for IpfsBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = (); - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -187,17 +187,6 @@ impl Accessor for IpfsBackend { ma } - async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { - let resp = self.ipfs_get(path, args.range()).await?; - - let status = resp.status(); - - match status { - StatusCode::OK | StatusCode::PARTIAL_CONTENT => Ok((RpRead::new(), resp.into_body())), - _ => Err(parse_error(resp).await?), - } - } - /// IPFS's stat behavior highly depends on its implementation. /// /// Based on IPFS [Path Gateway Specification](https://github.com/ipfs/specs/blob/main/http-gateways/PATH_GATEWAY.md), @@ -349,6 +338,17 @@ impl Accessor for IpfsBackend { } } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { + let resp = self.ipfs_get(path, args.range()).await?; + + let status = resp.status(); + + match status { + StatusCode::OK | StatusCode::PARTIAL_CONTENT => Ok((RpRead::new(), resp.into_body())), + _ => Err(parse_error(resp).await?), + } + } + async fn list(&self, path: &str, _: OpList) -> Result<(RpList, Self::Lister)> { let l = DirStream::new(Arc::new(self.clone()), path); Ok((RpList::default(), oio::PageLister::new(l))) diff --git a/core/src/services/ipmfs/backend.rs b/core/src/services/ipmfs/backend.rs index 0433028f0cb..a6565a63225 100644 --- a/core/src/services/ipmfs/backend.rs +++ b/core/src/services/ipmfs/backend.rs @@ -63,10 +63,10 @@ impl IpmfsBackend { #[async_trait] impl Accessor for IpmfsBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = oio::OneShotWriter; - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -105,24 +105,6 @@ impl Accessor for IpmfsBackend { } } - async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { - let resp = self.ipmfs_read(path, args.range()).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => Ok((RpRead::new(), resp.into_body())), - _ => Err(parse_error(resp).await?), - } - } - - async fn write(&self, path: &str, _: OpWrite) -> Result<(RpWrite, Self::Writer)> { - Ok(( - RpWrite::default(), - oio::OneShotWriter::new(IpmfsWriter::new(self.clone(), path.to_string())), - )) - } - async fn stat(&self, path: &str, _: OpStat) -> Result { // Stat root always returns a DIR. if path == "/" { @@ -155,6 +137,24 @@ impl Accessor for IpmfsBackend { } } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { + let resp = self.ipmfs_read(path, args.range()).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => Ok((RpRead::new(), resp.into_body())), + _ => Err(parse_error(resp).await?), + } + } + + async fn write(&self, path: &str, _: OpWrite) -> Result<(RpWrite, Self::Writer)> { + Ok(( + RpWrite::default(), + oio::OneShotWriter::new(IpmfsWriter::new(self.clone(), path.to_string())), + )) + } + async fn delete(&self, path: &str, _: OpDelete) -> Result { let resp = self.ipmfs_rm(path).await?; diff --git a/core/src/services/memcached/backend.rs b/core/src/services/memcached/backend.rs index 60250f1a985..f0d48e4a917 100644 --- a/core/src/services/memcached/backend.rs +++ b/core/src/services/memcached/backend.rs @@ -18,16 +18,17 @@ use std::collections::HashMap; use std::time::Duration; -use super::ascii; -use crate::raw::adapters::kv; -use crate::raw::*; -use crate::*; use async_trait::async_trait; use bb8::RunError; use serde::Deserialize; use tokio::net::TcpStream; use tokio::sync::OnceCell; +use super::ascii; +use crate::raw::adapters::kv; +use crate::raw::*; +use crate::*; + /// Config for MemCached services support #[derive(Default, Deserialize, Clone)] #[serde(default)] diff --git a/core/src/services/obs/backend.rs b/core/src/services/obs/backend.rs index ce562dd51b1..d00fbe423a4 100644 --- a/core/src/services/obs/backend.rs +++ b/core/src/services/obs/backend.rs @@ -249,10 +249,10 @@ pub struct ObsBackend { #[async_trait] impl Accessor for ObsBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = ObsWriters; - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -307,25 +307,19 @@ impl Accessor for ObsBackend { am } - async fn presign(&self, path: &str, args: OpPresign) -> Result { - let mut req = match args.operation() { - PresignOperation::Stat(v) => self.core.obs_head_object_request(path, v)?, - PresignOperation::Read(v) => self.core.obs_get_object_request(path, v)?, - PresignOperation::Write(v) => { - self.core - .obs_put_object_request(path, None, v, AsyncBody::Empty)? - } - }; - self.core.sign_query(&mut req, args.expire()).await?; + async fn stat(&self, path: &str, args: OpStat) -> Result { + let resp = self.core.obs_head_object(path, &args).await?; - // We don't need this request anymore, consume it directly. - let (parts, _) = req.into_parts(); + let status = resp.status(); - Ok(RpPresign::new(PresignedRequest::new( - parts.method, - parts.uri, - parts.headers, - ))) + // The response is very similar to azblob. + match status { + StatusCode::OK => parse_into_metadata(path, resp.headers()).map(RpStat::new), + StatusCode::NOT_FOUND if path.ends_with('/') => { + Ok(RpStat::new(Metadata::new(EntryMode::DIR))) + } + _ => Err(parse_error(resp).await?), + } } async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { @@ -362,50 +356,56 @@ impl Accessor for ObsBackend { Ok((RpWrite::default(), w)) } - async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { - let resp = self.core.obs_copy_object(from, to).await?; + async fn delete(&self, path: &str, _: OpDelete) -> Result { + let resp = self.core.obs_delete_object(path).await?; let status = resp.status(); match status { - StatusCode::OK => { - resp.into_body().consume().await?; - Ok(RpCopy::default()) + StatusCode::NO_CONTENT | StatusCode::ACCEPTED | StatusCode::NOT_FOUND => { + Ok(RpDelete::default()) } _ => Err(parse_error(resp).await?), } } - async fn stat(&self, path: &str, args: OpStat) -> Result { - let resp = self.core.obs_head_object(path, &args).await?; - - let status = resp.status(); - - // The response is very similar to azblob. - match status { - StatusCode::OK => parse_into_metadata(path, resp.headers()).map(RpStat::new), - StatusCode::NOT_FOUND if path.ends_with('/') => { - Ok(RpStat::new(Metadata::new(EntryMode::DIR))) - } - _ => Err(parse_error(resp).await?), - } + async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { + let l = ObsLister::new(self.core.clone(), path, args.recursive(), args.limit()); + Ok((RpList::default(), oio::PageLister::new(l))) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.obs_delete_object(path).await?; + async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { + let resp = self.core.obs_copy_object(from, to).await?; let status = resp.status(); match status { - StatusCode::NO_CONTENT | StatusCode::ACCEPTED | StatusCode::NOT_FOUND => { - Ok(RpDelete::default()) + StatusCode::OK => { + resp.into_body().consume().await?; + Ok(RpCopy::default()) } _ => Err(parse_error(resp).await?), } } - async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { - let l = ObsLister::new(self.core.clone(), path, args.recursive(), args.limit()); - Ok((RpList::default(), oio::PageLister::new(l))) + async fn presign(&self, path: &str, args: OpPresign) -> Result { + let mut req = match args.operation() { + PresignOperation::Stat(v) => self.core.obs_head_object_request(path, v)?, + PresignOperation::Read(v) => self.core.obs_get_object_request(path, v)?, + PresignOperation::Write(v) => { + self.core + .obs_put_object_request(path, None, v, AsyncBody::Empty)? + } + }; + self.core.sign_query(&mut req, args.expire()).await?; + + // We don't need this request anymore, consume it directly. + let (parts, _) = req.into_parts(); + + Ok(RpPresign::new(PresignedRequest::new( + parts.method, + parts.uri, + parts.headers, + ))) } } diff --git a/core/src/services/onedrive/backend.rs b/core/src/services/onedrive/backend.rs index f491c98106e..b63288c38c1 100644 --- a/core/src/services/onedrive/backend.rs +++ b/core/src/services/onedrive/backend.rs @@ -63,10 +63,10 @@ impl Debug for OnedriveBackend { #[async_trait] impl Accessor for OnedriveBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = oio::OneShotWriter; - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -86,31 +86,28 @@ impl Accessor for OnedriveBackend { ma } - async fn read(&self, path: &str, _args: OpRead) -> Result<(RpRead, Self::Reader)> { - let resp = self.onedrive_get_content(path).await?; + async fn create_dir(&self, path: &str, _: OpCreateDir) -> Result { + let path = build_rooted_abs_path(&self.root, path); + let path_before_last_slash = get_parent(&path); + let encoded_path = percent_encode_path(path_before_last_slash); - let status = resp.status(); + let uri = format!( + "https://graph.microsoft.com/v1.0/me/drive/root:{}:/children", + encoded_path + ); - match status { - StatusCode::OK | StatusCode::PARTIAL_CONTENT => { - let size = parse_content_length(resp.headers())?; - let range = parse_content_range(resp.headers())?; - Ok(( - RpRead::new().with_size(size).with_range(range), - resp.into_body(), - )) - } - _ => Err(parse_error(resp).await?), - } - } + let folder_name = get_basename(&path); + let folder_name = folder_name.strip_suffix('/').unwrap_or(folder_name); - async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { - let path = build_rooted_abs_path(&self.root, path); + let body = CreateDirPayload::new(folder_name.to_string()); - Ok(( - RpWrite::default(), - oio::OneShotWriter::new(OneDriveWriter::new(self.clone(), args, path)), - )) + let response = self.onedrive_create_dir(&uri, body).await?; + + let status = response.status(); + match status { + StatusCode::CREATED | StatusCode::OK => Ok(RpCreateDir::default()), + _ => Err(parse_error(response).await?), + } } async fn stat(&self, path: &str, _: OpStat) -> Result { @@ -152,6 +149,33 @@ impl Accessor for OnedriveBackend { } } + async fn read(&self, path: &str, _args: OpRead) -> Result<(RpRead, Self::Reader)> { + let resp = self.onedrive_get_content(path).await?; + + let status = resp.status(); + + match status { + StatusCode::OK | StatusCode::PARTIAL_CONTENT => { + let size = parse_content_length(resp.headers())?; + let range = parse_content_range(resp.headers())?; + Ok(( + RpRead::new().with_size(size).with_range(range), + resp.into_body(), + )) + } + _ => Err(parse_error(resp).await?), + } + } + + async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { + let path = build_rooted_abs_path(&self.root, path); + + Ok(( + RpWrite::default(), + oio::OneShotWriter::new(OneDriveWriter::new(self.clone(), args, path)), + )) + } + /// Delete operation /// Documentation: https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_delete?view=odsp-graph-online async fn delete(&self, path: &str, _: OpDelete) -> Result { @@ -170,30 +194,6 @@ impl Accessor for OnedriveBackend { Ok((RpList::default(), oio::PageLister::new(l))) } - - async fn create_dir(&self, path: &str, _: OpCreateDir) -> Result { - let path = build_rooted_abs_path(&self.root, path); - let path_before_last_slash = get_parent(&path); - let encoded_path = percent_encode_path(path_before_last_slash); - - let uri = format!( - "https://graph.microsoft.com/v1.0/me/drive/root:{}:/children", - encoded_path - ); - - let folder_name = get_basename(&path); - let folder_name = folder_name.strip_suffix('/').unwrap_or(folder_name); - - let body = CreateDirPayload::new(folder_name.to_string()); - - let response = self.onedrive_create_dir(&uri, body).await?; - - let status = response.status(); - match status { - StatusCode::CREATED | StatusCode::OK => Ok(RpCreateDir::default()), - _ => Err(parse_error(response).await?), - } - } } impl OnedriveBackend { diff --git a/core/src/services/oss/backend.rs b/core/src/services/oss/backend.rs index 6530453bd3c..6e109e13217 100644 --- a/core/src/services/oss/backend.rs +++ b/core/src/services/oss/backend.rs @@ -377,10 +377,10 @@ pub struct OssBackend { #[async_trait] impl Accessor for OssBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = OssWriters; - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -441,6 +441,20 @@ impl Accessor for OssBackend { am } + async fn stat(&self, path: &str, args: OpStat) -> Result { + let resp = self + .core + .oss_head_object(path, args.if_match(), args.if_none_match()) + .await?; + + let status = resp.status(); + + match status { + StatusCode::OK => parse_into_metadata(path, resp.headers()).map(RpStat::new), + _ => Err(parse_error(resp).await?), + } + } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let resp = self .core @@ -484,33 +498,6 @@ impl Accessor for OssBackend { Ok((RpWrite::default(), w)) } - async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { - let resp = self.core.oss_copy_object(from, to).await?; - let status = resp.status(); - - match status { - StatusCode::OK => { - resp.into_body().consume().await?; - Ok(RpCopy::default()) - } - _ => Err(parse_error(resp).await?), - } - } - - async fn stat(&self, path: &str, args: OpStat) -> Result { - let resp = self - .core - .oss_head_object(path, args.if_match(), args.if_none_match()) - .await?; - - let status = resp.status(); - - match status { - StatusCode::OK => parse_into_metadata(path, resp.headers()).map(RpStat::new), - _ => Err(parse_error(resp).await?), - } - } - async fn delete(&self, path: &str, _: OpDelete) -> Result { let resp = self.core.oss_delete_object(path).await?; let status = resp.status(); @@ -534,6 +521,19 @@ impl Accessor for OssBackend { Ok((RpList::default(), oio::PageLister::new(l))) } + async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { + let resp = self.core.oss_copy_object(from, to).await?; + let status = resp.status(); + + match status { + StatusCode::OK => { + resp.into_body().consume().await?; + Ok(RpCopy::default()) + } + _ => Err(parse_error(resp).await?), + } + } + async fn presign(&self, path: &str, args: OpPresign) -> Result { // We will not send this request out, just for signing. let mut req = match args.operation() { diff --git a/core/src/services/pcloud/backend.rs b/core/src/services/pcloud/backend.rs index 9338fb4893d..8283ae88461 100644 --- a/core/src/services/pcloud/backend.rs +++ b/core/src/services/pcloud/backend.rs @@ -15,15 +15,16 @@ // specific language governing permissions and limitations // under the License. -use async_trait::async_trait; -use http::StatusCode; -use log::debug; -use serde::Deserialize; use std::collections::HashMap; use std::fmt::Debug; use std::fmt::Formatter; use std::sync::Arc; +use async_trait::async_trait; +use http::StatusCode; +use log::debug; +use serde::Deserialize; + use super::core::*; use super::error::parse_error; use super::error::PcloudError; @@ -228,15 +229,10 @@ pub struct PcloudBackend { #[async_trait] impl Accessor for PcloudBackend { type Reader = IncomingAsyncBody; - - type BlockingReader = (); - type Writer = PcloudWriters; - - type BlockingWriter = (); - type Lister = oio::PageLister; - + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -270,61 +266,30 @@ impl Accessor for PcloudBackend { Ok(RpCreateDir::default()) } - async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { - self.core.ensure_dir_exists(to).await?; - - let resp = if from.ends_with('/') { - self.core.rename_folder(from, to).await? - } else { - self.core.rename_file(from, to).await? - }; + async fn stat(&self, path: &str, _args: OpStat) -> Result { + let resp = self.core.stat(path).await?; let status = resp.status(); match status { StatusCode::OK => { let bs = resp.into_body().bytes().await?; - let resp: PcloudError = + let resp: StatResponse = serde_json::from_slice(&bs).map_err(new_json_deserialize_error)?; let result = resp.result; - if result == 2009 || result == 2010 || result == 2055 || result == 2002 { + if result == 2010 || result == 2055 || result == 2002 { return Err(Error::new(ErrorKind::NotFound, &format!("{resp:?}"))); } if result != 0 { return Err(Error::new(ErrorKind::Unexpected, &format!("{resp:?}"))); } - Ok(RpRename::default()) - } - _ => Err(parse_error(resp).await?), - } - } - - async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { - self.core.ensure_dir_exists(to).await?; - - let resp = if from.ends_with('/') { - self.core.copy_folder(from, to).await? - } else { - self.core.copy_file(from, to).await? - }; - - let status = resp.status(); - - match status { - StatusCode::OK => { - let bs = resp.into_body().bytes().await?; - let resp: PcloudError = - serde_json::from_slice(&bs).map_err(new_json_deserialize_error)?; - let result = resp.result; - if result == 2009 || result == 2010 || result == 2055 || result == 2002 { - return Err(Error::new(ErrorKind::NotFound, &format!("{resp:?}"))); - } - if result != 0 { - return Err(Error::new(ErrorKind::Unexpected, &format!("{resp:?}"))); + if let Some(md) = resp.metadata { + let md = parse_stat_metadata(md); + return md.map(RpStat::new); } - Ok(RpCopy::default()) + Err(Error::new(ErrorKind::Unexpected, &format!("{resp:?}"))) } _ => Err(parse_error(resp).await?), } @@ -350,35 +315,6 @@ impl Accessor for PcloudBackend { } } - async fn stat(&self, path: &str, _args: OpStat) -> Result { - let resp = self.core.stat(path).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => { - let bs = resp.into_body().bytes().await?; - let resp: StatResponse = - serde_json::from_slice(&bs).map_err(new_json_deserialize_error)?; - let result = resp.result; - if result == 2010 || result == 2055 || result == 2002 { - return Err(Error::new(ErrorKind::NotFound, &format!("{resp:?}"))); - } - if result != 0 { - return Err(Error::new(ErrorKind::Unexpected, &format!("{resp:?}"))); - } - - if let Some(md) = resp.metadata { - let md = parse_stat_metadata(md); - return md.map(RpStat::new); - } - - Err(Error::new(ErrorKind::Unexpected, &format!("{resp:?}"))) - } - _ => Err(parse_error(resp).await?), - } - } - async fn write(&self, path: &str, _args: OpWrite) -> Result<(RpWrite, Self::Writer)> { let writer = PcloudWriter::new(self.core.clone(), path.to_string()); @@ -418,4 +354,64 @@ impl Accessor for PcloudBackend { let l = PcloudLister::new(self.core.clone(), path); Ok((RpList::default(), oio::PageLister::new(l))) } + + async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { + self.core.ensure_dir_exists(to).await?; + + let resp = if from.ends_with('/') { + self.core.copy_folder(from, to).await? + } else { + self.core.copy_file(from, to).await? + }; + + let status = resp.status(); + + match status { + StatusCode::OK => { + let bs = resp.into_body().bytes().await?; + let resp: PcloudError = + serde_json::from_slice(&bs).map_err(new_json_deserialize_error)?; + let result = resp.result; + if result == 2009 || result == 2010 || result == 2055 || result == 2002 { + return Err(Error::new(ErrorKind::NotFound, &format!("{resp:?}"))); + } + if result != 0 { + return Err(Error::new(ErrorKind::Unexpected, &format!("{resp:?}"))); + } + + Ok(RpCopy::default()) + } + _ => Err(parse_error(resp).await?), + } + } + + async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { + self.core.ensure_dir_exists(to).await?; + + let resp = if from.ends_with('/') { + self.core.rename_folder(from, to).await? + } else { + self.core.rename_file(from, to).await? + }; + + let status = resp.status(); + + match status { + StatusCode::OK => { + let bs = resp.into_body().bytes().await?; + let resp: PcloudError = + serde_json::from_slice(&bs).map_err(new_json_deserialize_error)?; + let result = resp.result; + if result == 2009 || result == 2010 || result == 2055 || result == 2002 { + return Err(Error::new(ErrorKind::NotFound, &format!("{resp:?}"))); + } + if result != 0 { + return Err(Error::new(ErrorKind::Unexpected, &format!("{resp:?}"))); + } + + Ok(RpRename::default()) + } + _ => Err(parse_error(resp).await?), + } + } } diff --git a/core/src/services/pcloud/core.rs b/core/src/services/pcloud/core.rs index cdbf46c36cd..d71d752aeee 100644 --- a/core/src/services/pcloud/core.rs +++ b/core/src/services/pcloud/core.rs @@ -15,15 +15,19 @@ // specific language governing permissions and limitations // under the License. -use std::fmt::{Debug, Formatter}; +use std::fmt::Debug; +use std::fmt::Formatter; -use crate::raw::*; -use crate::*; use bytes::Bytes; -use http::{Request, Response, StatusCode}; +use http::Request; +use http::Response; +use http::StatusCode; use serde::Deserialize; -use super::error::{parse_error, PcloudError}; +use super::error::parse_error; +use super::error::PcloudError; +use crate::raw::*; +use crate::*; #[derive(Clone)] pub struct PcloudCore { diff --git a/core/src/services/pcloud/writer.rs b/core/src/services/pcloud/writer.rs index 9bd47bb023f..108080ff172 100644 --- a/core/src/services/pcloud/writer.rs +++ b/core/src/services/pcloud/writer.rs @@ -20,12 +20,12 @@ use std::sync::Arc; use async_trait::async_trait; use http::StatusCode; +use super::core::PcloudCore; +use super::error::parse_error; +use super::error::PcloudError; use crate::raw::*; use crate::*; -use super::core::PcloudCore; -use super::error::{parse_error, PcloudError}; - pub type PcloudWriters = oio::OneShotWriter; pub struct PcloudWriter { diff --git a/core/src/services/rocksdb/backend.rs b/core/src/services/rocksdb/backend.rs index d4fec8b24e2..2787ca79e51 100644 --- a/core/src/services/rocksdb/backend.rs +++ b/core/src/services/rocksdb/backend.rs @@ -15,13 +15,14 @@ // specific language governing permissions and limitations // under the License. -use async_trait::async_trait; -use rocksdb::DB; -use serde::Deserialize; use std::collections::HashMap; use std::fmt::Debug; use std::fmt::Formatter; use std::sync::Arc; + +use async_trait::async_trait; +use rocksdb::DB; +use serde::Deserialize; use tokio::task; use crate::raw::adapters::kv; diff --git a/core/src/services/s3/backend.rs b/core/src/services/s3/backend.rs index 9d996ca1c21..94acf49c37d 100644 --- a/core/src/services/s3/backend.rs +++ b/core/src/services/s3/backend.rs @@ -983,10 +983,10 @@ pub struct S3Backend { #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] impl Accessor for S3Backend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = S3Writers; - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -1051,6 +1051,17 @@ impl Accessor for S3Backend { am } + async fn stat(&self, path: &str, args: OpStat) -> Result { + let resp = self.core.s3_head_object(path, args).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => parse_into_metadata(path, resp.headers()).map(RpStat::new), + _ => Err(parse_error(resp).await?), + } + } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let resp = self.core.s3_get_object(path, args).await?; @@ -1081,34 +1092,6 @@ impl Accessor for S3Backend { Ok((RpWrite::default(), w)) } - async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { - let resp = self.core.s3_copy_object(from, to).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => { - // According to the documentation, when using copy_object, a 200 error may occur and we need to detect it. - // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html#API_CopyObject_RequestSyntax - resp.into_body().consume().await?; - - Ok(RpCopy::default()) - } - _ => Err(parse_error(resp).await?), - } - } - - async fn stat(&self, path: &str, args: OpStat) -> Result { - let resp = self.core.s3_head_object(path, args).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => parse_into_metadata(path, resp.headers()).map(RpStat::new), - _ => Err(parse_error(resp).await?), - } - } - async fn delete(&self, path: &str, _: OpDelete) -> Result { let resp = self.core.s3_delete_object(path).await?; @@ -1135,6 +1118,23 @@ impl Accessor for S3Backend { Ok((RpList::default(), oio::PageLister::new(l))) } + async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { + let resp = self.core.s3_copy_object(from, to).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => { + // According to the documentation, when using copy_object, a 200 error may occur and we need to detect it. + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html#API_CopyObject_RequestSyntax + resp.into_body().consume().await?; + + Ok(RpCopy::default()) + } + _ => Err(parse_error(resp).await?), + } + } + async fn presign(&self, path: &str, args: OpPresign) -> Result { let (expire, op) = args.into_parts(); diff --git a/core/src/services/seafile/backend.rs b/core/src/services/seafile/backend.rs index 34641c567d0..5bf5879c14e 100644 --- a/core/src/services/seafile/backend.rs +++ b/core/src/services/seafile/backend.rs @@ -15,14 +15,15 @@ // specific language governing permissions and limitations // under the License. -use async_trait::async_trait; -use http::StatusCode; -use log::debug; -use serde::Deserialize; use std::collections::HashMap; use std::fmt::Debug; use std::fmt::Formatter; use std::sync::Arc; + +use async_trait::async_trait; +use http::StatusCode; +use log::debug; +use serde::Deserialize; use tokio::sync::RwLock; use super::core::parse_dir_detail; @@ -256,15 +257,10 @@ pub struct SeafileBackend { #[async_trait] impl Accessor for SeafileBackend { type Reader = IncomingAsyncBody; - - type BlockingReader = (); - type Writer = SeafileWriters; - - type BlockingWriter = (); - type Lister = oio::PageLister; - + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -290,6 +286,23 @@ impl Accessor for SeafileBackend { am } + async fn stat(&self, path: &str, _args: OpStat) -> Result { + if path == "/" { + return Ok(RpStat::new(Metadata::new(EntryMode::DIR))); + } + + let metadata = if path.ends_with('/') { + let dir_detail = self.core.dir_detail(path).await?; + parse_dir_detail(dir_detail) + } else { + let file_detail = self.core.file_detail(path).await?; + + parse_file_detail(file_detail) + }; + + metadata.map(RpStat::new) + } + async fn read(&self, path: &str, _args: OpRead) -> Result<(RpRead, Self::Reader)> { let resp = self.core.download_file(path).await?; @@ -308,23 +321,6 @@ impl Accessor for SeafileBackend { } } - async fn stat(&self, path: &str, _args: OpStat) -> Result { - if path == "/" { - return Ok(RpStat::new(Metadata::new(EntryMode::DIR))); - } - - let metadata = if path.ends_with('/') { - let dir_detail = self.core.dir_detail(path).await?; - parse_dir_detail(dir_detail) - } else { - let file_detail = self.core.file_detail(path).await?; - - parse_file_detail(file_detail) - }; - - metadata.map(RpStat::new) - } - async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { let w = SeafileWriter::new(self.core.clone(), args, path.to_string()); let w = oio::OneShotWriter::new(w); diff --git a/core/src/services/seafile/core.rs b/core/src/services/seafile/core.rs index a7cb8af7f5b..c8af7e17014 100644 --- a/core/src/services/seafile/core.rs +++ b/core/src/services/seafile/core.rs @@ -15,23 +15,22 @@ // specific language governing permissions and limitations // under the License. +use std::fmt::Debug; +use std::fmt::Formatter; +use std::sync::Arc; + use bytes::Bytes; use http::header; use http::Request; use http::Response; use http::StatusCode; use serde::Deserialize; -use std::sync::Arc; use tokio::sync::RwLock; -use std::fmt::Debug; -use std::fmt::Formatter; - +use super::error::parse_error; use crate::raw::*; use crate::*; -use super::error::parse_error; - /// Core of [seafile](https://www.seafile.com) services support. #[derive(Clone)] pub struct SeafileCore { diff --git a/core/src/services/seafile/lister.rs b/core/src/services/seafile/lister.rs index c1ee6b94705..792d6280324 100644 --- a/core/src/services/seafile/lister.rs +++ b/core/src/services/seafile/lister.rs @@ -18,12 +18,13 @@ use std::sync::Arc; use async_trait::async_trait; -use http::{header, Request, StatusCode}; +use http::header; +use http::Request; +use http::StatusCode; use serde::Deserialize; use super::core::SeafileCore; use super::error::parse_error; - use crate::raw::oio::Entry; use crate::raw::*; use crate::*; diff --git a/core/src/services/seafile/writer.rs b/core/src/services/seafile/writer.rs index 8c7c0ce9db6..0ff6ae91907 100644 --- a/core/src/services/seafile/writer.rs +++ b/core/src/services/seafile/writer.rs @@ -18,7 +18,9 @@ use std::sync::Arc; use async_trait::async_trait; -use http::{header, Request, StatusCode}; +use http::header; +use http::Request; +use http::StatusCode; use super::core::SeafileCore; use super::error::parse_error; diff --git a/core/src/services/sftp/backend.rs b/core/src/services/sftp/backend.rs index 572bb4dd1e5..839c1ffd867 100644 --- a/core/src/services/sftp/backend.rs +++ b/core/src/services/sftp/backend.rs @@ -243,10 +243,10 @@ impl Debug for SftpBackend { #[async_trait] impl Accessor for SftpBackend { type Reader = oio::TokioReader>>; - type BlockingReader = (); type Writer = SftpWriter; - type BlockingWriter = (); type Lister = Option; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -300,6 +300,16 @@ impl Accessor for SftpBackend { return Ok(RpCreateDir::default()); } + async fn stat(&self, path: &str, _: OpStat) -> Result { + let client = self.connect().await?; + let mut fs = client.fs(); + fs.set_cwd(&self.root); + + let meta: Metadata = fs.metadata(path).await?.into(); + + Ok(RpStat::new(meta)) + } + async fn read(&self, path: &str, _: OpRead) -> Result<(RpRead, Self::Reader)> { let client = self.connect().await?; @@ -344,50 +354,6 @@ impl Accessor for SftpBackend { Ok((RpWrite::new(), SftpWriter::new(file))) } - async fn copy(&self, from: &str, to: &str, _: OpCopy) -> Result { - let client = self.connect().await?; - - let mut fs = client.fs(); - fs.set_cwd(&self.root); - - if let Some((dir, _)) = to.rsplit_once('/') { - self.create_dir(dir, OpCreateDir::default()).await?; - } - - let src = fs.canonicalize(from).await?; - let dst = fs.canonicalize(to).await?; - let mut src_file = client.open(&src).await?; - let mut dst_file = client.create(dst).await?; - - src_file.copy_all_to(&mut dst_file).await?; - - Ok(RpCopy::default()) - } - - async fn rename(&self, from: &str, to: &str, _: OpRename) -> Result { - let client = self.connect().await?; - - let mut fs = client.fs(); - fs.set_cwd(&self.root); - - if let Some((dir, _)) = to.rsplit_once('/') { - self.create_dir(dir, OpCreateDir::default()).await?; - } - fs.rename(from, to).await?; - - Ok(RpRename::default()) - } - - async fn stat(&self, path: &str, _: OpStat) -> Result { - let client = self.connect().await?; - let mut fs = client.fs(); - fs.set_cwd(&self.root); - - let meta: Metadata = fs.metadata(path).await?.into(); - - Ok(RpStat::new(meta)) - } - async fn delete(&self, path: &str, _: OpDelete) -> Result { let client = self.connect().await?; @@ -468,6 +434,40 @@ impl Accessor for SftpBackend { Some(SftpLister::new(dir, path.to_owned())), )) } + + async fn copy(&self, from: &str, to: &str, _: OpCopy) -> Result { + let client = self.connect().await?; + + let mut fs = client.fs(); + fs.set_cwd(&self.root); + + if let Some((dir, _)) = to.rsplit_once('/') { + self.create_dir(dir, OpCreateDir::default()).await?; + } + + let src = fs.canonicalize(from).await?; + let dst = fs.canonicalize(to).await?; + let mut src_file = client.open(&src).await?; + let mut dst_file = client.create(dst).await?; + + src_file.copy_all_to(&mut dst_file).await?; + + Ok(RpCopy::default()) + } + + async fn rename(&self, from: &str, to: &str, _: OpRename) -> Result { + let client = self.connect().await?; + + let mut fs = client.fs(); + fs.set_cwd(&self.root); + + if let Some((dir, _)) = to.rsplit_once('/') { + self.create_dir(dir, OpCreateDir::default()).await?; + } + fs.rename(from, to).await?; + + Ok(RpRename::default()) + } } impl SftpBackend { diff --git a/core/src/services/supabase/backend.rs b/core/src/services/supabase/backend.rs index cb7bffc7d13..99f04723ad9 100644 --- a/core/src/services/supabase/backend.rs +++ b/core/src/services/supabase/backend.rs @@ -157,11 +157,11 @@ pub struct SupabaseBackend { #[async_trait] impl Accessor for SupabaseBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = oio::OneShotWriter; - type BlockingWriter = (); // todo: implement Lister to support list and scan type Lister = (); + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -183,24 +183,6 @@ impl Accessor for SupabaseBackend { am } - async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { - let resp = self.core.supabase_get_object(path, args.range()).await?; - - let status = resp.status(); - - match status { - StatusCode::OK | StatusCode::PARTIAL_CONTENT => Ok((RpRead::new(), resp.into_body())), - _ => Err(parse_error(resp).await?), - } - } - - async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { - Ok(( - RpWrite::default(), - oio::OneShotWriter::new(SupabaseWriter::new(self.core.clone(), path, args)), - )) - } - async fn stat(&self, path: &str, _args: OpStat) -> Result { // The get_object_info does not contain the file size. Therefore // we first try the get the metadata through head, if we fail, @@ -221,6 +203,24 @@ impl Accessor for SupabaseBackend { } } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { + let resp = self.core.supabase_get_object(path, args.range()).await?; + + let status = resp.status(); + + match status { + StatusCode::OK | StatusCode::PARTIAL_CONTENT => Ok((RpRead::new(), resp.into_body())), + _ => Err(parse_error(resp).await?), + } + } + + async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { + Ok(( + RpWrite::default(), + oio::OneShotWriter::new(SupabaseWriter::new(self.core.clone(), path, args)), + )) + } + async fn delete(&self, path: &str, _: OpDelete) -> Result { let resp = self.core.supabase_delete_object(path).await?; diff --git a/core/src/services/swift/backend.rs b/core/src/services/swift/backend.rs index 01e123a4d21..8c36a8d3f96 100644 --- a/core/src/services/swift/backend.rs +++ b/core/src/services/swift/backend.rs @@ -215,10 +215,10 @@ pub struct SwiftBackend { #[async_trait] impl Accessor for SwiftBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = oio::OneShotWriter; - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -244,6 +244,20 @@ impl Accessor for SwiftBackend { am } + async fn stat(&self, path: &str, _args: OpStat) -> Result { + let resp = self.core.swift_get_metadata(path).await?; + + let status = resp.status(); + + match status { + StatusCode::OK | StatusCode::NO_CONTENT => { + let meta = parse_into_metadata(path, resp.headers())?; + Ok(RpStat::new(meta)) + } + _ => Err(parse_error(resp).await?), + } + } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let resp = self.core.swift_read(path, args).await?; @@ -272,36 +286,6 @@ impl Accessor for SwiftBackend { return Ok((RpWrite::default(), w)); } - async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { - // cannot copy objects larger than 5 GB. - // Reference: https://docs.openstack.org/api-ref/object-store/#copy-object - let resp = self.core.swift_copy(from, to).await?; - - let status = resp.status(); - - match status { - StatusCode::CREATED | StatusCode::OK => { - resp.into_body().consume().await?; - Ok(RpCopy::default()) - } - _ => Err(parse_error(resp).await?), - } - } - - async fn stat(&self, path: &str, _args: OpStat) -> Result { - let resp = self.core.swift_get_metadata(path).await?; - - let status = resp.status(); - - match status { - StatusCode::OK | StatusCode::NO_CONTENT => { - let meta = parse_into_metadata(path, resp.headers())?; - Ok(RpStat::new(meta)) - } - _ => Err(parse_error(resp).await?), - } - } - async fn delete(&self, path: &str, _args: OpDelete) -> Result { let resp = self.core.swift_delete(path).await?; @@ -324,4 +308,20 @@ impl Accessor for SwiftBackend { Ok((RpList::default(), oio::PageLister::new(l))) } + + async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { + // cannot copy objects larger than 5 GB. + // Reference: https://docs.openstack.org/api-ref/object-store/#copy-object + let resp = self.core.swift_copy(from, to).await?; + + let status = resp.status(); + + match status { + StatusCode::CREATED | StatusCode::OK => { + resp.into_body().consume().await?; + Ok(RpCopy::default()) + } + _ => Err(parse_error(resp).await?), + } + } } diff --git a/core/src/services/upyun/backend.rs b/core/src/services/upyun/backend.rs index f2cd2a8c0ca..091977f6f59 100644 --- a/core/src/services/upyun/backend.rs +++ b/core/src/services/upyun/backend.rs @@ -15,15 +15,16 @@ // specific language governing permissions and limitations // under the License. -use async_trait::async_trait; -use http::StatusCode; -use log::debug; -use serde::Deserialize; use std::collections::HashMap; use std::fmt::Debug; use std::fmt::Formatter; use std::sync::Arc; +use async_trait::async_trait; +use http::StatusCode; +use log::debug; +use serde::Deserialize; + use super::core::parse_info; use super::core::UpyunCore; use super::error::parse_error; @@ -233,15 +234,10 @@ pub struct UpyunBackend { #[async_trait] impl Accessor for UpyunBackend { type Reader = IncomingAsyncBody; - - type BlockingReader = (); - type Writer = UpyunWriters; - - type BlockingWriter = (); - type Lister = oio::PageLister; - + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -290,32 +286,13 @@ impl Accessor for UpyunBackend { } } - async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { - let resp = self.core.move_object(from, to).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => { - resp.into_body().consume().await?; - - Ok(RpRename::default()) - } - _ => Err(parse_error(resp).await?), - } - } - - async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { - let resp = self.core.copy(from, to).await?; + async fn stat(&self, path: &str, _args: OpStat) -> Result { + let resp = self.core.info(path).await?; let status = resp.status(); match status { - StatusCode::OK => { - resp.into_body().consume().await?; - - Ok(RpCopy::default()) - } + StatusCode::OK => parse_info(resp.headers()).map(RpStat::new), _ => Err(parse_error(resp).await?), } } @@ -338,17 +315,6 @@ impl Accessor for UpyunBackend { } } - async fn stat(&self, path: &str, _args: OpStat) -> Result { - let resp = self.core.info(path).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => parse_info(resp.headers()).map(RpStat::new), - _ => Err(parse_error(resp).await?), - } - } - async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { let writer = UpyunWriter::new(self.core.clone(), args, path.to_string()); @@ -374,4 +340,34 @@ impl Accessor for UpyunBackend { let l = UpyunLister::new(self.core.clone(), path, args.limit()); Ok((RpList::default(), oio::PageLister::new(l))) } + + async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { + let resp = self.core.copy(from, to).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => { + resp.into_body().consume().await?; + + Ok(RpCopy::default()) + } + _ => Err(parse_error(resp).await?), + } + } + + async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { + let resp = self.core.move_object(from, to).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => { + resp.into_body().consume().await?; + + Ok(RpRename::default()) + } + _ => Err(parse_error(resp).await?), + } + } } diff --git a/core/src/services/upyun/core.rs b/core/src/services/upyun/core.rs index 4c02d1b338a..49b430768f7 100644 --- a/core/src/services/upyun/core.rs +++ b/core/src/services/upyun/core.rs @@ -15,20 +15,24 @@ // specific language governing permissions and limitations // under the License. -use std::fmt::{Debug, Formatter}; +use std::fmt::Debug; +use std::fmt::Formatter; use base64::Engine; -use hmac::{Hmac, Mac}; -use http::{header, HeaderMap, Request, Response}; +use hmac::Hmac; +use hmac::Mac; +use http::header; +use http::HeaderMap; +use http::Request; +use http::Response; use md5::Digest; use serde::Deserialize; use sha1::Sha1; +use self::constants::*; use crate::raw::*; use crate::*; -use self::constants::*; - pub(super) mod constants { pub const X_UPYUN_FILE_TYPE: &str = "x-upyun-file-type"; pub const X_UPYUN_FILE_SIZE: &str = "x-upyun-file-size"; diff --git a/core/src/services/upyun/lister.rs b/core/src/services/upyun/lister.rs index e9f17f08716..4fdd068cefb 100644 --- a/core/src/services/upyun/lister.rs +++ b/core/src/services/upyun/lister.rs @@ -19,7 +19,8 @@ use std::sync::Arc; use async_trait::async_trait; -use super::core::{ListObjectsResponse, UpyunCore}; +use super::core::ListObjectsResponse; +use super::core::UpyunCore; use super::error::parse_error; use crate::raw::oio::Entry; use crate::raw::*; diff --git a/core/src/services/upyun/writer.rs b/core/src/services/upyun/writer.rs index a245ae23c46..b4f50821220 100644 --- a/core/src/services/upyun/writer.rs +++ b/core/src/services/upyun/writer.rs @@ -20,12 +20,11 @@ use std::sync::Arc; use async_trait::async_trait; use http::StatusCode; -use crate::raw::*; -use crate::*; - use super::core::constants::X_UPYUN_MULTI_UUID; use super::core::UpyunCore; use super::error::parse_error; +use crate::raw::*; +use crate::*; pub type UpyunWriters = oio::MultipartUploadWriter; diff --git a/core/src/services/vercel_artifacts/backend.rs b/core/src/services/vercel_artifacts/backend.rs index 28af379781d..27ef788e19b 100644 --- a/core/src/services/vercel_artifacts/backend.rs +++ b/core/src/services/vercel_artifacts/backend.rs @@ -46,10 +46,10 @@ impl Debug for VercelArtifactsBackend { #[async_trait] impl Accessor for VercelArtifactsBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = oio::OneShotWriter; - type BlockingWriter = (); type Lister = (); + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -69,6 +69,21 @@ impl Accessor for VercelArtifactsBackend { ma } + async fn stat(&self, path: &str, _args: OpStat) -> Result { + let res = self.vercel_artifacts_stat(path).await?; + + let status = res.status(); + + match status { + StatusCode::OK => { + let meta = parse_into_metadata(path, res.headers())?; + Ok(RpStat::new(meta)) + } + + _ => Err(parse_error(res).await?), + } + } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let resp = self.vercel_artifacts_get(path, args).await?; @@ -91,21 +106,6 @@ impl Accessor for VercelArtifactsBackend { )), )) } - - async fn stat(&self, path: &str, _args: OpStat) -> Result { - let res = self.vercel_artifacts_stat(path).await?; - - let status = res.status(); - - match status { - StatusCode::OK => { - let meta = parse_into_metadata(path, res.headers())?; - Ok(RpStat::new(meta)) - } - - _ => Err(parse_error(res).await?), - } - } } impl VercelArtifactsBackend { diff --git a/core/src/services/webdav/backend.rs b/core/src/services/webdav/backend.rs index e1a3e895cfa..67e6a9b4ac5 100644 --- a/core/src/services/webdav/backend.rs +++ b/core/src/services/webdav/backend.rs @@ -246,10 +246,10 @@ impl Debug for WebdavBackend { #[async_trait] impl Accessor for WebdavBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = oio::OneShotWriter; - type BlockingWriter = (); type Lister = Option>; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -288,6 +288,36 @@ impl Accessor for WebdavBackend { Ok(RpCreateDir::default()) } + async fn stat(&self, path: &str, _: OpStat) -> Result { + let mut header_map = HeaderMap::new(); + // not include children + header_map.insert("Depth", "0".parse().unwrap()); + header_map.insert(header::ACCEPT, "application/xml".parse().unwrap()); + + let resp = self.webdav_propfind(path, Some(header_map)).await?; + + let status = resp.status(); + + if !status.is_success() { + return Err(parse_error(resp).await?); + } + + let bs = resp.into_body().bytes().await?; + let result: Multistatus = + quick_xml::de::from_reader(bs.reader()).map_err(new_xml_deserialize_error)?; + let item = result + .response + .first() + .ok_or_else(|| { + Error::new( + ErrorKind::Unexpected, + "Failed getting item stat: bad response", + ) + })? + .parse_into_metadata()?; + Ok(RpStat::new(item)) + } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let resp = self.webdav_get(path, args).await?; let status = resp.status(); @@ -319,74 +349,6 @@ impl Accessor for WebdavBackend { )) } - /// # Notes - /// - /// There is a strange dead lock issues when copying a non-exist file, so we will check - /// if the source exists first. - /// - /// For example: - async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { - if let Err(err) = self.stat(from, OpStat::default()).await { - if err.kind() == ErrorKind::NotFound { - return Err(err); - } - } - - self.ensure_parent_path(to).await?; - - let resp = self.webdav_copy(from, to).await?; - - let status = resp.status(); - - match status { - StatusCode::CREATED | StatusCode::NO_CONTENT => Ok(RpCopy::default()), - _ => Err(parse_error(resp).await?), - } - } - - async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { - self.ensure_parent_path(to).await?; - - let resp = self.webdav_move(from, to).await?; - - let status = resp.status(); - - match status { - StatusCode::CREATED | StatusCode::NO_CONTENT => Ok(RpRename::default()), - _ => Err(parse_error(resp).await?), - } - } - - async fn stat(&self, path: &str, _: OpStat) -> Result { - let mut header_map = HeaderMap::new(); - // not include children - header_map.insert("Depth", "0".parse().unwrap()); - header_map.insert(header::ACCEPT, "application/xml".parse().unwrap()); - - let resp = self.webdav_propfind(path, Some(header_map)).await?; - - let status = resp.status(); - - if !status.is_success() { - return Err(parse_error(resp).await?); - } - - let bs = resp.into_body().bytes().await?; - let result: Multistatus = - quick_xml::de::from_reader(bs.reader()).map_err(new_xml_deserialize_error)?; - let item = result - .response - .first() - .ok_or_else(|| { - Error::new( - ErrorKind::Unexpected, - "Failed getting item stat: bad response", - ) - })? - .parse_into_metadata()?; - Ok(RpStat::new(item)) - } - async fn delete(&self, path: &str, _: OpDelete) -> Result { let resp = self.webdav_delete(path).await?; @@ -426,6 +388,44 @@ impl Accessor for WebdavBackend { _ => Err(parse_error(resp).await?), } } + + /// # Notes + /// + /// There is a strange dead lock issues when copying a non-exist file, so we will check + /// if the source exists first. + /// + /// For example: + async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { + if let Err(err) = self.stat(from, OpStat::default()).await { + if err.kind() == ErrorKind::NotFound { + return Err(err); + } + } + + self.ensure_parent_path(to).await?; + + let resp = self.webdav_copy(from, to).await?; + + let status = resp.status(); + + match status { + StatusCode::CREATED | StatusCode::NO_CONTENT => Ok(RpCopy::default()), + _ => Err(parse_error(resp).await?), + } + } + + async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { + self.ensure_parent_path(to).await?; + + let resp = self.webdav_move(from, to).await?; + + let status = resp.status(); + + match status { + StatusCode::CREATED | StatusCode::NO_CONTENT => Ok(RpRename::default()), + _ => Err(parse_error(resp).await?), + } + } } impl WebdavBackend { diff --git a/core/src/services/webhdfs/backend.rs b/core/src/services/webhdfs/backend.rs index 2759c1e6a14..18f4b55ca1c 100644 --- a/core/src/services/webhdfs/backend.rs +++ b/core/src/services/webhdfs/backend.rs @@ -395,10 +395,10 @@ impl WebhdfsBackend { #[async_trait] impl Accessor for WebhdfsBackend { type Reader = IncomingAsyncBody; - type BlockingReader = (); type Writer = oio::OneShotWriter; - type BlockingWriter = (); type Lister = oio::PageLister; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); fn info(&self) -> AccessorInfo { @@ -460,6 +460,38 @@ impl Accessor for WebhdfsBackend { } } + async fn stat(&self, path: &str, _: OpStat) -> Result { + // if root exists and is a directory, stat will be ok + self.root_checker + .get_or_try_init(|| async { self.check_root().await }) + .await?; + + let resp = self.webhdfs_get_file_status(path).await?; + let status = resp.status(); + match status { + StatusCode::OK => { + let bs = resp.into_body().bytes().await?; + + let file_status = serde_json::from_slice::(&bs) + .map_err(new_json_deserialize_error)? + .file_status; + + let meta = match file_status.ty { + FileStatusType::Directory => Metadata::new(EntryMode::DIR), + FileStatusType::File => Metadata::new(EntryMode::FILE) + .with_content_length(file_status.length) + .with_last_modified(parse_datetime_from_from_timestamp_millis( + file_status.modification_time, + )?), + }; + + Ok(RpStat::new(meta)) + } + + _ => Err(parse_error(resp).await?), + } + } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let range = args.range(); let resp = self.webhdfs_read_file(path, range).await?; @@ -498,38 +530,6 @@ impl Accessor for WebhdfsBackend { )) } - async fn stat(&self, path: &str, _: OpStat) -> Result { - // if root exists and is a directory, stat will be ok - self.root_checker - .get_or_try_init(|| async { self.check_root().await }) - .await?; - - let resp = self.webhdfs_get_file_status(path).await?; - let status = resp.status(); - match status { - StatusCode::OK => { - let bs = resp.into_body().bytes().await?; - - let file_status = serde_json::from_slice::(&bs) - .map_err(new_json_deserialize_error)? - .file_status; - - let meta = match file_status.ty { - FileStatusType::Directory => Metadata::new(EntryMode::DIR), - FileStatusType::File => Metadata::new(EntryMode::FILE) - .with_content_length(file_status.length) - .with_last_modified(parse_datetime_from_from_timestamp_millis( - file_status.modification_time, - )?), - }; - - Ok(RpStat::new(meta)) - } - - _ => Err(parse_error(resp).await?), - } - } - async fn delete(&self, path: &str, _: OpDelete) -> Result { let resp = self.webhdfs_delete(path).await?; diff --git a/core/tests/behavior/blocking_write.rs b/core/tests/behavior/blocking_write.rs index 9e29a8274c2..7f4e6dc6c86 100644 --- a/core/tests/behavior/blocking_write.rs +++ b/core/tests/behavior/blocking_write.rs @@ -15,12 +15,14 @@ // specific language governing permissions and limitations // under the License. +use std::io::BufReader; +use std::io::Cursor; + use anyhow::Result; use log::debug; use log::warn; use sha2::Digest; use sha2::Sha256; -use std::io::{BufReader, Cursor}; use crate::*; diff --git a/core/tests/behavior/main.rs b/core/tests/behavior/main.rs index c8ae40fe929..20eb832c82b 100644 --- a/core/tests/behavior/main.rs +++ b/core/tests/behavior/main.rs @@ -45,7 +45,8 @@ mod blocking_write; // External dependencies use libtest_mimic::Arguments; use libtest_mimic::Trial; -use opendal::raw::tests::{init_test_service, TEST_RUNTIME}; +use opendal::raw::tests::init_test_service; +use opendal::raw::tests::TEST_RUNTIME; use opendal::*; pub static TEST_FIXTURE: Fixture = Fixture::new(); diff --git a/core/tests/behavior/utils.rs b/core/tests/behavior/utils.rs index f8ebae75a25..63f8bce841e 100644 --- a/core/tests/behavior/utils.rs +++ b/core/tests/behavior/utils.rs @@ -15,8 +15,9 @@ // specific language governing permissions and limitations // under the License. +use std::mem; use std::sync::Mutex; -use std::{mem, usize}; +use std::usize; use futures::Future; use libtest_mimic::Failed; diff --git a/deny.toml b/deny.toml index 4d4ee1c53ad..cf3f93b25ea 100644 --- a/deny.toml +++ b/deny.toml @@ -16,8 +16,6 @@ # under the License. [licenses] -unlicensed = "deny" -copyleft = "deny" allow = [ "Apache-2.0", "Apache-2.0 WITH LLVM-exception", @@ -27,10 +25,16 @@ allow = [ "ISC", "CC0-1.0", ] +copyleft = "deny" exceptions = [ - { allow = ["OpenSSL"], name = "ring" }, - { allow = ["Unicode-DFS-2016"], name = "unicode-ident" }, + { allow = [ + "OpenSSL", + ], name = "ring" }, + { allow = [ + "Unicode-DFS-2016", + ], name = "unicode-ident" }, ] +unlicensed = "deny" [[licenses.clarify]] name = "ring" From 2c2ac9ca27819cc5663846b197c2712449594b95 Mon Sep 17 00:00:00 2001 From: Xuanwo Date: Thu, 4 Jan 2024 23:10:28 +0800 Subject: [PATCH 2/2] Remove extra empty line Signed-off-by: Xuanwo --- core/src/services/b2/backend.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/core/src/services/b2/backend.rs b/core/src/services/b2/backend.rs index a488d60bebd..2c5128bad65 100644 --- a/core/src/services/b2/backend.rs +++ b/core/src/services/b2/backend.rs @@ -268,15 +268,10 @@ pub struct B2Backend { #[async_trait] impl Accessor for B2Backend { type Reader = IncomingAsyncBody; - type Writer = B2Writers; - type Lister = oio::PageLister; - type BlockingReader = (); - type BlockingWriter = (); - type BlockingLister = (); fn info(&self) -> AccessorInfo {