Skip to content

Commit

Permalink
api: add method to load BlobCacheConfigV2 from file
Browse files Browse the repository at this point in the history
Add method to load BlobCacheConfigV2 from configuration file.

Signed-off-by: Jiang Liu <gerry@linux.alibaba.com>
  • Loading branch information
jiangliu committed Mar 4, 2023
1 parent eceeefd commit 3b28a10
Show file tree
Hide file tree
Showing 6 changed files with 313 additions and 65 deletions.
145 changes: 144 additions & 1 deletion api/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -890,11 +890,54 @@ pub struct BlobCacheEntryConfigV2 {
/// Configuration information for local cache system.
#[serde(default)]
pub cache: CacheConfigV2,
/// Optional file path for metadata blobs.
/// Optional file path for metadata blob.
#[serde(default)]
pub metadata_path: Option<String>,
}

impl BlobCacheEntryConfigV2 {
/// Read configuration information from a file.
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self> {
let md = fs::metadata(path.as_ref())?;
if md.len() > 0x100000 {
return Err(eother!("configuration file size is too big"));
}
let content = fs::read_to_string(path)?;
Self::from_str(&content)
}

/// Validate the configuration object.
pub fn validate(&self) -> bool {
if self.version != 2 {
return false;
}
let config: ConfigV2 = self.into();
config.validate()
}
}

impl FromStr for BlobCacheEntryConfigV2 {
type Err = Error;

fn from_str(s: &str) -> Result<BlobCacheEntryConfigV2> {
if let Ok(v) = serde_json::from_str::<BlobCacheEntryConfigV2>(s) {
return if v.validate() {
Ok(v)
} else {
Err(einval!("invalid configuration"))
};
}
if let Ok(v) = toml::from_str::<BlobCacheEntryConfigV2>(s) {
return if v.validate() {
Ok(v)
} else {
Err(einval!("invalid configuration"))
};
}
Err(einval!("failed to parse configuration information"))
}
}

impl From<&BlobCacheEntryConfigV2> for ConfigV2 {
fn from(c: &BlobCacheEntryConfigV2) -> Self {
ConfigV2 {
Expand Down Expand Up @@ -943,6 +986,106 @@ impl ConfigV2Internal {
}
}

/// Blob cache object type for nydus/rafs bootstrap blob.
pub const BLOB_CACHE_TYPE_META_BLOB: &str = "bootstrap";
/// Blob cache object type for nydus/rafs data blob.
pub const BLOB_CACHE_TYPE_DATA_BLOB: &str = "datablob";

/// Configuration information for a cached blob.
#[derive(Debug, Deserialize, Serialize)]
pub struct BlobCacheEntry {
/// Type of blob object, bootstrap or data blob.
#[serde(rename = "type")]
pub blob_type: String,
/// Blob id.
#[serde(rename = "id")]
pub blob_id: String,
/// Configuration information to generate blob cache object.
#[serde(default, rename = "config")]
pub(crate) blob_config_legacy: Option<BlobCacheEntryConfig>,
/// Configuration information to generate blob cache object.
#[serde(default, rename = "config_v2")]
pub blob_config: Option<BlobCacheEntryConfigV2>,
/// Domain id for the blob, which is used to group cached blobs into management domains.
#[serde(default)]
pub domain_id: String,
}

impl BlobCacheEntry {
pub fn prepare_configuration_info(&mut self) -> bool {
if self.blob_config.is_none() {
if let Some(legacy) = self.blob_config_legacy.as_ref() {
match legacy.try_into() {
Err(_) => return false,
Ok(v) => self.blob_config = Some(v),
}
}
}

match self.blob_config.as_ref() {
None => false,
Some(cfg) => cfg.cache.validate() && cfg.backend.validate(),
}
}
}

impl BlobCacheEntry {
/// Read configuration information from a file.
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self> {
let md = fs::metadata(path.as_ref())?;
if md.len() > 0x100000 {
return Err(eother!("configuration file size is too big"));
}
let content = fs::read_to_string(path)?;
Self::from_str(&content)
}

/// Validate the configuration object.
pub fn validate(&self) -> bool {
if self.blob_type != BLOB_CACHE_TYPE_META_BLOB
&& self.blob_type != BLOB_CACHE_TYPE_DATA_BLOB
{
warn!("invalid blob type {} for blob cache entry", self.blob_type);
return false;
}
if let Some(config) = self.blob_config.as_ref() {
if config.validate() == false {
return false;
}
}
true
}
}

impl FromStr for BlobCacheEntry {
type Err = Error;

fn from_str(s: &str) -> Result<BlobCacheEntry> {
if let Ok(v) = serde_json::from_str::<BlobCacheEntry>(s) {
return if v.validate() {
Ok(v)
} else {
Err(einval!("invalid configuration"))
};
}
if let Ok(v) = toml::from_str::<BlobCacheEntry>(s) {
return if v.validate() {
Ok(v)
} else {
Err(einval!("invalid configuration"))
};
}
Err(einval!("failed to parse configuration information"))
}
}

/// Configuration information for a list of cached blob objects.
#[derive(Debug, Default, Deserialize, Serialize)]
pub struct BlobCacheList {
/// List of blob configuration information.
pub blobs: Vec<BlobCacheEntry>,
}

fn default_true() -> bool {
true
}
Expand Down
53 changes: 1 addition & 52 deletions api/src/http.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,14 @@
//
// SPDX-License-Identifier: Apache-2.0

use std::convert::TryInto;
use std::io;
use std::sync::mpsc::{RecvError, SendError};

use nydus_error::error::MetricsError;
use serde::Deserialize;
use serde_json::Error as SerdeError;

use crate::{BlobCacheEntryConfig, BlobCacheEntryConfigV2};
use crate::BlobCacheEntry;

/// Mount a filesystem.
#[derive(Clone, Deserialize, Debug)]
Expand Down Expand Up @@ -43,56 +42,6 @@ pub struct DaemonConf {
pub log_level: String,
}

/// Blob cache object type for nydus/rafs bootstrap blob.
pub const BLOB_CACHE_TYPE_META_BLOB: &str = "bootstrap";
/// Blob cache object type for nydus/rafs data blob.
pub const BLOB_CACHE_TYPE_DATA_BLOB: &str = "datablob";

/// Configuration information for a cached blob.
#[derive(Debug, Deserialize, Serialize)]
pub struct BlobCacheEntry {
/// Type of blob object, bootstrap or data blob.
#[serde(rename = "type")]
pub blob_type: String,
/// Blob id.
#[serde(rename = "id")]
pub blob_id: String,
/// Configuration information to generate blob cache object.
#[serde(default, rename = "config")]
pub(crate) blob_config_legacy: Option<BlobCacheEntryConfig>,
/// Configuration information to generate blob cache object.
#[serde(default, rename = "config_v2")]
pub blob_config: Option<BlobCacheEntryConfigV2>,
/// Domain id for the blob, which is used to group cached blobs into management domains.
#[serde(default)]
pub domain_id: String,
}

impl BlobCacheEntry {
pub fn prepare_configuration_info(&mut self) -> bool {
if self.blob_config.is_none() {
if let Some(legacy) = self.blob_config_legacy.as_ref() {
match legacy.try_into() {
Err(_) => return false,
Ok(v) => self.blob_config = Some(v),
}
}
}

match self.blob_config.as_ref() {
None => false,
Some(cfg) => cfg.cache.validate() && cfg.backend.validate(),
}
}
}

/// Configuration information for a list of cached blob objects.
#[derive(Debug, Default, Deserialize, Serialize)]
pub struct BlobCacheList {
/// List of blob configuration information.
pub blobs: Vec<BlobCacheEntry>,
}

/// Identifier for cached blob objects.
///
/// Domains are used to control the blob sharing scope. All blobs associated with the same domain
Expand Down
145 changes: 145 additions & 0 deletions docs/samples/blob_cache_entry.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
# Configuration file for Nydus Image Service

type = "bootstrap"
id = "image1"
domain_id = "domain1"

# Configuration file format version number, must be 2.
[config_v2]
version = 2
# Identifier for the instance.
id = "my_id"
# Optional file path for metadata blobs, for BlobCacheEntry only.
metadata_path = "/path/to/rafs/meta/data/blob"

[config_v2.backend]
# Type of storage backend, valid values: "localfs", "oss", "registry"
type = "localfs"

[config_v2.backend.localfs]
blob_file = "/tmp/nydus.blob.data"
dir = "/tmp"
alt_dirs = ["/var/nydus/cache"]

[config_v2.backend.oss]
# Oss http scheme, either 'http' or 'https'
scheme = "http"
# Oss endpoint
endpoint = "my_endpoint"
# Oss bucket name
bucket_name = "my_bucket_name"
# Prefix object_prefix to OSS object key, for example the simulation of subdirectory:
object_prefix = "my_object_prefix"
# Oss access key
access_key_id = "my_access_key_id"
# Oss secret
access_key_secret = "my_access_key_secret"
# Skip SSL certificate validation for HTTPS scheme.
skip_verify = true
# Drop the read request once http request timeout, in seconds.
timeout = 10
# Drop the read request once http connection timeout, in seconds.
connect_timeout = 10
# Retry count when read request failed.
retry_limit = 5

[config_v2.backend.oss.proxy]
# Access remote storage backend via proxy, e.g. Dragonfly dfdaemon server URL.
url = "localhost:6789"
# Proxy health checking endpoint.
ping_url = "localhost:6789/ping"
# Fallback to remote storage backend if proxy ping failed.
fallback = true
# Interval for proxy health checking, in seconds.
check_interval = 5
# Replace URL to http to request source registry with proxy, and allow fallback to https if the proxy is unhealthy.
use_http = false

[[config_v2.backend.oss.mirrors]]
# Mirror server URL, for example http://127.0.0.1:65001.
host = "http://127.0.0.1:65001"
# Ping URL to check mirror server health.
ping_url = "http://127.0.0.1:65001/ping"
# HTTP request headers to be passed to mirror server.
# headers =
# Whether the authorization process is through mirror, default to false.
auth_through = true
# Interval for mirror health checking, in seconds.
health_check_interval = 5
# Maximum number of failures before marking a mirror as unusable.
failure_limit = 5

[config_v2.backend.registry]
# Registry http scheme, either 'http' or 'https'
scheme = "https"
# Registry url host
host = "my.registry.com"
# Registry image name, like 'library/ubuntu'
repo = "nydus"
# Base64_encoded(username:password), the field should be sent to registry auth server to get a bearer token.
auth = "base64_encoded"
# Skip SSL certificate validation for HTTPS scheme.
skip_verify = true
# Drop the read request once http request timeout, in seconds.
timeout = 10
# Drop the read request once http connection timeout, in seconds.
connect_timeout = 10
# Retry count when read request failed.
retry_limit = 5
# The field is a bearer token to be sent to registry to authorize registry requests.
registry_token = "bear_token"
# The http scheme to access blobs.
# It is used to workaround some P2P subsystem that requires a different scheme than the registry.
blob_url_scheme = "https"
# Redirect blob access to a different host regardless of the one specified in 'host'.
blob_redirected_host = "redirect.registry.com"

[config_v2.backend.registry.proxy]
# Access remote storage backend via proxy, e.g. Dragonfly dfdaemon server URL.
url = "localhost:6789"
# Proxy health checking endpoint.
ping_url = "localhost:6789/ping"
# Fallback to remote storage backend if proxy ping failed.
fallback = true
# Interval for proxy health checking, in seconds.
check_interval = 5
# Replace URL to http to request source registry with proxy, and allow fallback to https if the proxy is unhealthy.
use_http = false

[[config_v2.backend.registry.mirrors]]
# Mirror server URL, for example http://127.0.0.1:65001.
host = "http://127.0.0.1:65001"
# Ping URL to check mirror server health.
ping_url = "http://127.0.0.1:65001/ping"
# HTTP request headers to be passed to mirror server.
# headers =
# Whether the authorization process is through mirror, default to false.
auth_through = true
# Interval for mirror health checking, in seconds.
health_check_interval = 5
# Maximum number of failures before marking a mirror as unusable.
failure_limit = 5

[config_v2.cache]
# Type of blob cache: "blobcache", "filecache", "fscache", "dummycache" or ""
type = "filecache"
# Whether to cache compressed or uncompressed data.
compressed = true
# Whether to validate data read from the cache.
validate = true

[config_v2.cache.filecache]
work_dir = "."

[config_v2.cache.fscache]
work_dir = "."

[config_v2.cache.prefetch]
# Whether to enable blob data prefetching.
enable = true
# Number of data prefetching working threads, valid values: 1-1024.
threads = 8
# The batch size to prefetch data from backend, valid values: 0-0x10000000.
batch_size = 1000000
# Network bandwidth rate limit in unit of Bytes and Zero means no limit.
bandwidth_limit = 10000000
9 changes: 6 additions & 3 deletions service/src/blob_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -328,9 +328,12 @@ impl BlobCacheMgr {
"blob_cache: `config.metadata_path` for meta blob is empty"
));
}
let path = Path::new(&path)
.canonicalize()
.map_err(|_e| einval!("blob_cache: `config.metadata_path` for meta blob is invalid"))?;
let path = Path::new(&path).canonicalize().map_err(|_e| {
einval!(format!(
"blob_cache: `config.metadata_path={}` for meta blob is invalid",
path
))
})?;
if !path.is_file() {
return Err(einval!(
"blob_cache: `config.metadata_path` for meta blob is not a file"
Expand Down
Loading

0 comments on commit 3b28a10

Please sign in to comment.