diff --git a/docs/CLI.md b/docs/CLI.md deleted file mode 100644 index c07b73be7c..0000000000 --- a/docs/CLI.md +++ /dev/null @@ -1,447 +0,0 @@ -# Command-Line Interface # - -The TUF command-line interface (CLI) requires a full -[TUF installation](INSTALLATION.rst). Be sure to include the installation of -extra dependencies and C extensions ( -```python3 -m pip install securesystemslib[crypto,pynacl]```). - -The use of the CLI is documented with examples below. - ----- -# Basic Examples # - -## Create a repository ## - -Create a TUF repository in the current working directory. A cryptographic key -is created and set for each top-level role. The written Targets metadata does -not sign for any targets, nor does it delegate trust to any roles. The -`--init` call will also set up a client directory. By default, these -directories will be `./tufrepo` and `./tufclient`. - -```Bash -$ repo.py --init -``` - -Optionally, the repository can be written to a specified location. -```Bash -$ repo.py --init --path -``` - -The default top-level key files created with `--init` are saved to disk -encrypted, with a default password of 'pw'. Instead of using the default -password, the user can enter one on the command line for each top-level role. -These optional command-line options also work with other CLI actions (e.g., -repo.py --add). -```Bash -$ repo.py --init [--targets_pw, --root_pw, --snapshot_pw, --timestamp_pw] -``` - - - -Create a bare TUF repository in the current working directory. A cryptographic -key is *not* created nor set for each top-level role. -```Bash -$ repo.py --init --bare -``` - - - -Create a TUF repository with [consistent -snapshots](https://github.com/theupdateframework/specification/blob/master/tuf-spec.md#7-consistent-snapshots) -enabled, where target filenames have their hash prepended (e.g., -`.README.txt`), and metadata filenames have their version numbers -prepended (e.g., `.snapshot.json`). -```Bash -$ repo.py --init --consistent -``` - - - -## Add a target file ## - -Copy a target file to the repo and add it to the Targets metadata (or the -Targets role specified in --role). More than one target file, or directory, -may be specified in --add. The --recursive option may be toggled to also -include files in subdirectories of a specified directory. The Snapshot -and Timestamp metadata are also updated and signed automatically, but this -behavior can be toggled off with --no_release. -```Bash -$ repo.py --add -$ repo.py --add [--recursive] -``` - -Similar to the --init case, the repository location can be chosen. -```Bash -$ repo.py --add --path -``` - - - -## Remove a target file ## - -Remove a target file from the Targets metadata (or the Targets role specified -in --role). More than one target file or glob pattern may be specified in ---remove. The Snapshot and Timestamp metadata are also updated and signed -automatically, but this behavior can be toggled off with --no_release. - -```Bash -$ repo.py --remove ... -``` - -Examples: - -Remove all target files, that match `foo*.tgz,` from the Targets metadata. -```Bash -$ repo.py --remove "foo*.tgz" -``` - -Remove all target files from the `my_role` metadata. -```Bash -$ repo.py --remove "*" --role my_role --sign tufkeystore/my_role_key -``` - - -## Generate key ## -Generate a cryptographic key. The generated key can later be used to sign -specific metadata with `--sign`. The supported key types are: `ecdsa`, -`ed25519`, and `rsa`. If a keytype is not given, an Ed25519 key is generated. - -If adding a top-level key to a bare repo (i.e., repo.py --init --bare), -the filenames of the top-level keys must be "root_key," "targets_key," -"snapshot_key," "timestamp_key." The filename can vary for any additional -top-level key. -```Bash -$ repo.py --key -$ repo.py --key -$ repo.py --key [--path --pw [my_password], - --filename ] -``` - -Instead of using a default password, the user can enter one on the command -line or be prompted for it via password masking. -```Bash -$ repo.py --key ecdsa --pw my_password -``` - -```Bash -$ repo.py --key rsa --pw -Enter a password for the RSA key (...): -Confirm: -``` - - - -## Sign metadata ## -Sign, with the specified key(s), the metadata of the role indicated in --role. -The Snapshot and Timestamp role are also automatically signed, if possible, but -this behavior can be disabled with --no_release. -```Bash -$ repo.py --sign ... [--role , --path ] -``` - -For example, to sign the delegated `foo` metadata: -```Bash -$ repo.py --sign --role foo -``` - - - -## Trust keys ## - -The Root role specifies the trusted keys of the top-level roles, including -itself. The --trust command-line option, in conjunction with --pubkeys and ---role, can be used to indicate the trusted keys of a role. - -```Bash -$ repo.py --trust --pubkeys --role -``` - -For example: -```Bash -$ repo.py --init --bare -$ repo.py --trust --pubkeys tufkeystore/my_key.pub tufkeystore/my_key_too.pub - --role root -``` - - - -### Distrust keys ### - -Conversely, the Root role can discontinue trust of specified key(s). - -Example of how to discontinue trust of a key: -```Bash -$ repo.py --distrust --pubkeys tufkeystore/my_key_too.pub --role root -``` - - - -## Delegations ## - -Delegate trust of target files from the Targets role (or the one specified in ---role) to some other role (--delegatee). --delegatee is trusted to sign for -target files that match the delegated glob pattern(s). The --delegate option -does not create metadata for the delegated role, rather it updates the -delegator's metadata to list the delegation to --delegatee. The Snapshot and -Timestamp metadata are also updated and signed automatically, but this behavior -can be toggled off with --no_release. - -```Bash -$ repo.py --delegate ... --delegatee --pubkeys - ... [--role --terminating --threshold ---sign ] -``` - -For example, to delegate trust of `foo*.gz` packages to the `foo` role: - -``` -$ repo.py --delegate "foo*.tgz" --delegatee foo --pubkeys tufkeystore/foo.pub -``` - - - -## Revocations ## - -Revoke trust of target files from a delegated role (--delegatee). The -"targets" role performs the revocation if --role is not specified. The ---revoke option does not delete the metadata belonging to --delegatee, instead -it removes the delegation to it from the delegator's (or --role) metadata. The -Snapshot and Timestamp metadata are also updated and signed automatically, but -this behavior can be toggled off with --no_release. - - -```Bash -$ repo.py --revoke --delegatee [--role ---sign ] -``` - - - -## Verbosity ## - -Set the verbosity of the logger (2, by default). The lower the number, the -greater the verbosity. Logger messages are saved to `tuf.log` in the current -working directory. -```Bash -$ repo.py --verbose <0-5> -``` - - - -## Clean ## - -Delete the repo in the current working directory, or the one specified with -`--path`. Specifically, the `tufrepo`, `tufclient`, and `tufkeystore` -directories are deleted. - -```Bash -$ repo.py --clean -$ repo.py --clean --path -``` ----- - - - - - - - - -# Further Examples # - -## Basic Update Delivery ## - -Steps: - -(1) initialize a repo. - -(2) delegate trust of target files to another role. - -(3) add a trusted file to the delegated role. - -(4) fetch the trusted file from the delegated role. - -```Bash -Step (1) -$ repo.py --init - -Step (2) -$ repo.py --key ed25519 --filename mykey -$ repo.py --delegate "README.*" --delegatee myrole --pubkeys tufkeystore/mykey.pub -$ repo.py --sign tufkeystore/mykey --role myrole -Enter a password for the encrypted key (tufkeystore/mykey): -$ echo "my readme text" > README.txt - -Step (3) -$ repo.py --add README.txt --role myrole --sign tufkeystore/mykey -Enter a password for the encrypted key (tufkeystore/mykey): -``` - -Serve the repo -```Bash -$ python3 -m http.server 8001 -``` - -```Bash -Step (4) -$ client.py --repo http://localhost:8001 README.txt -$ tree . -. -├── tuf.log -├── tufrepo -│   └── metadata -│   ├── current -│   │   ├── 1.root.json -│   │   ├── myrole.json -│   │   ├── root.json -│   │   ├── snapshot.json -│   │   ├── targets.json -│   │   └── timestamp.json -│   └── previous -│   ├── 1.root.json -│   ├── root.json -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -└── tuftargets - └── README.txt - - 5 directories, 13 files -``` - - -## Correcting a Key ## -The filename of the top-level keys must be "root_key," "targets_key," -"snapshot_key," and "root_key." The filename can vary for any additional -top-level key. - -Steps: - -(1) initialize a repo containing default keys for the top-level roles. -(2) distrust the default key for the root role. -(3) create a new key and trust its use with the root role. -(4) sign the root metadata file. - -```Bash -Step (1) -$ repo.py --init - -Step (2) -$ repo.py --distrust --pubkeys tufkeystore/root_key.pub --role root - -Step (3) -$ repo.py --key ed25519 --filename root_key -$ repo.py --trust --pubkeys tufkeystore/root_key.pub --role root - -Step (4) -$ repo.py --sign tufkeystore/root_key --role root -Enter a password for the encrypted key (tufkeystore/root_key): -``` - - -## More Update Delivery ## - -Steps: - -(1) create a bare repo. - -(2) add keys to the top-level roles. - -(3) delegate trust of particular target files to another role X, where role X -has a signature threshold 2 and is marked as a terminating delegation. The -keys for role X and Y should be created prior to performing the delegation. - -(4) Delegate from role X to role Y. - -(5) have role X sign for a file also signed by the Targets role, to demonstrate -the expected file that should be downloaded by the client. - -(6) perform an update. - -(7) halt the server, add README.txt to the Targets role, restart the server, -and fetch the Target's role README.txt. - -(8) Add LICENSE to 'role_y' and demonstrate that the client must not fetch it -because 'role_x' is a terminating delegation (and hasn't signed for it). - -```Bash -Steps (1) and (2) -$ repo.py --init --consistent --bare -$ repo.py --key ed25519 --filename root_key -$ repo.py --trust --pubkeys tufkeystore/root_key.pub --role root -$ repo.py --key ecdsa --filename targets_key -$ repo.py --trust --pubkeys tufkeystore/targets_key.pub --role targets -$ repo.py --key rsa --filename snapshot_key -$ repo.py --trust --pubkeys tufkeystore/snapshot_key.pub --role snapshot -$ repo.py --key ecdsa --filename timestamp_key -$ repo.py --trust --pubkeys tufkeystore/timestamp_key.pub --role timestamp -$ repo.py --sign tufkeystore/root_key --role root -Enter a password for the encrypted key (tufkeystore/root_key): -$ repo.py --sign tufkeystore/targets_key --role targets -Enter a password for the encrypted key (tufkeystore/targets_key): -``` - -```Bash -Steps (3) and (4) -$ repo.py --key ed25519 --filename key_x -$ repo.py --key ed25519 --filename key_x2 - -$ repo.py --delegate "README.*" "LICENSE" --delegatee role_x --pubkeys - tufkeystore/key_x.pub tufkeystore/key_x2.pub --threshold 2 --terminating -$ repo.py --sign tufkeystore/key_x tufkeystore/key_x2 --role role_x - -$ repo.py --key ed25519 --filename key_y - -$ repo.py --delegate "README.*" "LICENSE" --delegatee role_y --role role_x - --pubkeys tufkeystore/key_y.pub --sign tufkeystore/key_x tufkeystore/key_x2 - -$ repo.py --sign tufkeystore/key_y --role role_y -``` - -```Bash -Steps (5) and (6) -$ echo "role_x's readme" > README.txt -$ repo.py --add README.txt --role role_x --sign tufkeystore/key_x tufkeystore/key_x2 -``` - -Serve the repo -```Bash -$ python3 -m http.server 8001 -``` - -Fetch the role x's README.txt -```Bash -$ client.py --repo http://localhost:8001 README.txt -$ cat tuftargets/README.txt -role_x's readme -``` - - -```Bash -Step (7) -halt the server... - -$ echo "Target role's readme" > README.txt -$ repo.py --add README.txt - -restart the server... -``` - -```Bash -$ rm -rf tuftargets/ tuf.log -$ client.py --repo http://localhost:8001 README.txt -$ cat tuftargets/README.txt -Target role's readme -``` - -```Bash -Step (8) -$ echo "role_y's license" > LICENSE -$ repo.py --add LICENSE --role role_y --sign tufkeystore/key_y -``` - -```Bash -$ rm -rf tuftargets/ tuf.log -$ client.py --repo http://localhost:8001 LICENSE -Error: 'LICENSE' not found. -``` diff --git a/docs/GETTING_STARTED.rst b/docs/GETTING_STARTED.rst deleted file mode 100644 index f958373975..0000000000 --- a/docs/GETTING_STARTED.rst +++ /dev/null @@ -1,10 +0,0 @@ -Getting Started ---------------- - -- `Overview of TUF `_ -- `Installation `_ -- Beginner Tutorials (using the basic command-line interface): - - `Quickstart `_ - - `CLI Documentation and Examples `_ -- `Advanced Tutorial `_ -- `Guidelines for Contributors `_ diff --git a/docs/INSTALLATION.rst b/docs/INSTALLATION.rst index 6a85122e70..36fae88c7b 100644 --- a/docs/INSTALLATION.rst +++ b/docs/INSTALLATION.rst @@ -91,3 +91,5 @@ package manager, among other options:: $ brew install python3 $ brew install libffi + +.. TODO: Revise diff --git a/docs/QUICKSTART.md b/docs/QUICKSTART.md deleted file mode 100644 index 6d35fb1d7d..0000000000 --- a/docs/QUICKSTART.md +++ /dev/null @@ -1,149 +0,0 @@ -# Quickstart # - -In this quickstart tutorial, we'll use the basic TUF command-line interface -(CLI), which includes the `repo.py` script and the `client.py` script, to set -up a repository with an update and metadata about that update, then download -and verify that update as a client. - -Unlike the underlying TUF modules that the CLI uses, the CLI itself is a bit -bare-bones. Using the CLI is the easiest way to familiarize yourself with -how TUF works, however. It will serve as a very basic update system. - ----- - -**Step (0)** - Make sure TUF is installed. - -Make sure that TUF is installed, along with some of the optional cryptographic -libraries and C extensions. Try this command to do that: -`python3 -m pip install securesystemslib[colors,crypto,pynacl] tuf` - -If you run into errors during that pip command, please consult the more -detailed [TUF Installation Instructions](INSTALLATION.rst). (There are some -system libraries that you may need to install first.) - - -**Step (1)** - Create a basic repository and client. - -The following command will set up a basic update repository and basic client -that knows about the repository. `tufrepo`, `tufkeystore`, and -`tufclient` directories will be created in the current directory. - -```Bash -$ repo.py --init -``` - -Four sets of keys are created in the `tufkeystore` directory. Initial metadata -about the repository is created in the `tufrepo` directory, and also provided -to the client in the `tufclient` directory. - - -**Step (2)** - Add an update to the repository. - -We'll create a target file that will later be delivered as an update to clients. -Metadata about that file will be created and signed, and added to the -repository's metadata. - -```Bash -$ echo 'Test file' > testfile -$ repo.py --add testfile -$ tree tufrepo/ -tufrepo/ -├── metadata -│   ├── 1.root.json -│   ├── root.json -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -├── metadata.staged -│   ├── 1.root.json -│   ├── root.json -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -└── targets - └── testfile - - 3 directories, 11 files -``` - -The new file `testfile` is added to the repository, and metadata is updated in -the `tufrepo` directory. The Targets metadata (`targets.json`) now includes -the file size and hashes of the `testfile` target file, and this metadata is -signed by the Targets role's key, so that clients can verify that metadata -about `testfile` and then verify `testfile` itself. - - -**Step (3)** - Serve the repo. - -We'll host a toy http server containing the `testfile` update and the -repository's metadata. - -```Bash -$ cd "tufrepo/" -$ python3 -m http.server 8001 -``` - -**Step (4)** - Obtain and verify the `testfile` update on a client. - -The client can request the package `testfile` from the repository. TUF will -download and verify metadata from the repository as necessary to determine -what the trustworthy hashes and length of `testfile` are, then download -the target `testfile` from the repository and keep it only if it matches that -trustworthy metadata. - -```Bash -$ cd "../tufclient/" -$ client.py --repo http://localhost:8001 testfile -$ tree -. -├── tufrepo -│   └── metadata -│   ├── current -│   │   ├── 1.root.json -│   │   ├── root.json -│   │   ├── snapshot.json -│   │   ├── targets.json -│   │   └── timestamp.json -│   └── previous -│   ├── 1.root.json -│   ├── root.json -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -└── tuftargets - └── testfile - - 5 directories, 11 files -``` - -Now that a trustworthy update target has been obtained, an updater can proceed -however it normally would to install or use the update. - ----- - -### Next Steps - -TUF provides functionality for both ends of a software update system, the -**update provider** and the **update client**. - -`repo.py` made use of `tuf.repository_tool`'s functionality for an update -provider, helping you produce and sign metadata about your updates. - -`client.py` made use of `tuf.client.updater`'s client-side functionality, -performing download and the critical verification steps for metadata and the -update itself. - -You can look at [CLI.md](CLI.md) to toy with the TUF CLI a bit more. -After that, try out using the underlying modules for a great deal more control. -The more detailed [Advanced Tutorial](TUTORIAL.md) shows you how to use the -underlying modules, `repository_tool` and `updater`. - -Ultimately, a sophisticated update client will use or re-implement those -underlying modules. The TUF design is intended to play well with any update -workflow. - -Please provide feedback or questions for this or other tutorials, or -TUF in general, by checking out -[our contact info](https://github.com/theupdateframework/python-tuf#contact), or -creating [issues](https://github.com/theupdateframework/python-tuf/issues) in this -repository! diff --git a/docs/TUTORIAL.md b/docs/TUTORIAL.md deleted file mode 100644 index d8659e7213..0000000000 --- a/docs/TUTORIAL.md +++ /dev/null @@ -1,696 +0,0 @@ -# Advanced Tutorial # - -## Table of Contents ## -- [How to Create and Modify a TUF Repository](#how-to-create-and-modify-a-tuf-repository) - - [Overview](#overview) - - [Keys](#keys) - - [Create RSA Keys](#create-rsa-keys) - - [Import RSA Keys](#import-rsa-keys) - - [Create and Import Ed25519 Keys](#create-and-import-ed25519-keys) - - [Create Top-level Metadata](#create-top-level-metadata) - - [Create Root](#create-root) - - [Create Timestamp, Snapshot, Targets](#create-timestamp-snapshot-targets) - - [Targets](#targets) - - [Add Target Files](#add-target-files) - - [Remove Target Files](#remove-target-files) - - [Delegations](#delegations) - - [Revoke Delegated Role](#revoke-delegated-role) - - [Wrap-up](#wrap-up) -- [Delegate to Hashed Bins](#delegate-to-hashed-bins) -- [Consistent Snapshots](#consistent-snapshots) -- [How to Perform an Update](#how-to-perform-an-update) - -## How to Create and Modify a TUF Repository ## - -### Overview ### -A software update system must follow two steps to integrate The Update -Framework (TUF). First, it must add the framework to the client side of the -update system. The [tuf.client.updater](../tuf/client/README.md) module assists in -integrating TUF on the client side. Second, the software repository on the -server side must be modified to include a minimum of four top-level metadata -(root.json, targets.json, snapshot.json, and timestamp.json). No additional -software is required to convert a software repository to a TUF one. The -low-level repository tool that generates the required TUF metadata for a -software repository is the focus of this tutorial. There is also separate -document that [demonstrates how TUF protects against malicious -updates](../tuf/ATTACKS.md). - -The [repository tool](../tuf/repository_tool.py) contains functions to generate -all of the files needed to populate and manage a TUF repository. The tool may -either be imported into a Python module, or used with the Python interpreter in -interactive mode. - -A repository object that encapsulates the metadata files of the repository can -be created or loaded by the repository tool. Repository maintainers can modify -the repository object to manipulate the metadata files stored on the -repository. TUF clients use the metadata files to validate files requested and -downloaded. In addition to the repository object, where the majority of -changes are made, the repository tool provides functions to generate and -persist cryptographic keys. The framework utilizes cryptographic keys to sign -and verify metadata files. - -To begin, cryptographic keys are generated with the repository tool. However, -before metadata files can be validated by clients and target files fetched in a -secure manner, public keys must be pinned to particular metadata roles and -metadata signed by role's private keys. After covering keys, the four required -top-level metadata are created next. Examples are given demonstrating the -expected work flow, where the metadata roles are created in a specific order, -keys imported and loaded, and metadata signed and written to disk. Lastly, -target files are added to the repository, and a custom delegation performed to -extend the default roles of the repository. By the end, a fully populated TUF -repository is generated that can be used by clients to securely download -updates. - -### Keys ### -The repository tool supports multiple public-key algorithms, such as -[RSA](https://en.wikipedia.org/wiki/RSA_%28cryptosystem%29) and -[Ed25519](https://ed25519.cr.yp.to/), and multiple cryptography libraries. - -Using [RSA-PSS](https://tools.ietf.org/html/rfc8017#section-8.1) or -[ECDSA](https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm) -signatures requires the [cryptography](https://cryptography.io/) library. If -generation of Ed25519 signatures is needed -[PyNaCl](https://github.com/pyca/pynacl) library should be installed. This -tutorial assumes both dependencies are installed: refer to -[Installation Instructions](INSTALLATION.rst#install-with-more-cryptographic-flexibility) -for details. - -The Ed25519 and ECDSA keys are stored in JSON format and RSA keys are stored in PEM -format. Private keys are encrypted and passphrase-protected (strengthened with -PBKDF2-HMAC-SHA256.) Generating, importing, and loading cryptographic key -files can be done with functions available in the repository tool. - -To start, a public and private RSA key pair is generated with the -`generate_and_write_rsa_keypair()` function. The keys generated next are -needed to sign the repository metadata files created in upcoming sub-sections. - -Note: In the instructions below, lines that start with `>>>` denote commands -that should be entered by the reader, `#` begins the start of a comment, and -text without prepended symbols is the output of a command. - -#### Create RSA Keys #### -```python ->>> from tuf.repository_tool import * - -# Generate and write the first of two root keys for the TUF repository. The -# following function creates an RSA key pair, where the private key is saved to -# "root_key" and the public key to "root_key.pub" (both saved to the current -# working directory). ->>> generate_and_write_rsa_keypair(password="password", filepath="root_key", bits=2048) - -# If the key length is unspecified, it defaults to 3072 bits. A length of less -# than 2048 bits raises an exception. A similar function is available to supply -# a password on the prompt. If an empty password is entered, the private key -# is saved unencrypted. ->>> generate_and_write_rsa_keypair_with_prompt(filepath="root_key2") -enter password to encrypt private key file '/path/to/root_key2' -(leave empty if key should not be encrypted): -Confirm: -``` -The following four key files should now exist: - -1. **root_key** -2. **root_key.pub** -3. **root_key2** -4. **root_key2.pub** - -If a filepath is not given, the KEYID of the generated key is used as the -filename. The key files are written to the current working directory. -```python -# Continuing from the previous section . . . ->>> generate_and_write_rsa_keypair_with_prompt() -enter password to encrypt private key file '/path/to/KEYID' -(leave empty if key should not be encrypted): -Confirm: -``` - -### Import RSA Keys ### -```python -# Continuing from the previous section . . . - -# Import an existing public key. ->>> public_root_key = import_rsa_publickey_from_file("root_key.pub") - -# Import an existing private key. Importing a private key requires a password, -# whereas importing a public key does not. ->>> private_root_key = import_rsa_privatekey_from_file("root_key") -enter password to decrypt private key file '/path/to/root_key' -(leave empty if key not encrypted): -``` - -### Create and Import Ed25519 Keys ### -```Python -# Continuing from the previous section . . . - -# The same generation and import functions as for rsa keys exist for ed25519 ->>> generate_and_write_ed25519_keypair_with_prompt(filepath='ed25519_key') -enter password to encrypt private key file '/path/to/ed25519_key' -(leave empty if key should not be encrypted): -Confirm: - -# Import the ed25519 public key just created . . . ->>> public_ed25519_key = import_ed25519_publickey_from_file('ed25519_key.pub') - -# and its corresponding private key. ->>> private_ed25519_key = import_ed25519_privatekey_from_file('ed25519_key') -enter password to decrypt private key file '/path/to/ed25519_key' -(leave empty if key should not be encrypted): -``` - -Note: Methods are also available to generate and write keys from memory. -* generate_ed25519_key() -* generate_ecdsa_key() -* generate_rsa_key() - -* import_ecdsakey_from_pem(pem) -* import_rsakey_from_pem(pem) - -### Create Top-level Metadata ### -The [metadata document](METADATA.md) outlines the JSON files that must exist -on a TUF repository. The following sub-sections demonstrate the -`repository_tool.py` calls repository maintainers may issue to generate the -required roles. The top-level roles to be created are `root`, `timestamp`, -`snapshot`, and `target`. - -We begin with `root`, the locus of trust that specifies the public keys of the -top-level roles, including itself. - - -#### Create Root #### -```python -# Continuing from the previous section . . . - -# Create a new Repository object that holds the file path to the TUF repository -# and the four top-level role objects (Root, Targets, Snapshot, Timestamp). -# Metadata files are created when repository.writeall() or repository.write() -# are called. The repository directory is created if it does not exist. You -# may see log messages indicating any directories created. ->>> repository = create_new_repository("repository") - -# The Repository instance, 'repository', initially contains top-level Metadata -# objects. Add one of the public keys, created in the previous section, to the -# root role. Metadata is considered valid if it is signed by the public key's -# corresponding private key. ->>> repository.root.add_verification_key(public_root_key) - -# A role's verification key(s) (to be more precise, the verification key's -# keyid) may be queried. Other attributes include: signing_keys, version, -# signatures, expiration, threshold, and delegations (attribute available only -# to a Targets role). ->>> repository.root.keys -['b23514431a53676595922e955c2d547293da4a7917e3ca243a175e72bbf718df'] - -# Add a second public key to the root role. Although previously generated and -# saved to a file, the second public key must be imported before it can added -# to a role. ->>> public_root_key2 = import_rsa_publickey_from_file("root_key2.pub") ->>> repository.root.add_verification_key(public_root_key2) - -# The threshold of each role defaults to 1. Maintainers may change the -# threshold value, but repository_tool.py validates thresholds and warns users. -# Set the threshold of the root role to 2, which means the root metadata file -# is considered valid if it's signed by at least two valid keys. We also load -# the second private key, which hasn't been imported yet. ->>> repository.root.threshold = 2 ->>> private_root_key2 = import_rsa_privatekey_from_file("root_key2", password="password") - -# Load the root signing keys to the repository, which writeall() or write() -# (write multiple roles, or a single role, to disk) use to sign the root -# metadata. ->>> repository.root.load_signing_key(private_root_key) ->>> repository.root.load_signing_key(private_root_key2) - -# repository.status() shows missing verification and signing keys for the -# top-level roles, and whether signatures can be created (also see #955). -# This output shows that so far only the "root" role meets the key threshold and -# can successfully sign its metadata. ->>> repository.status() -'targets' role contains 0 / 1 public keys. -'snapshot' role contains 0 / 1 public keys. -'timestamp' role contains 0 / 1 public keys. -'root' role contains 2 / 2 signatures. -'targets' role contains 0 / 1 signatures. - -# In the next section we update the other top-level roles and create a repository -# with valid metadata. -``` - -#### Create Timestamp, Snapshot, Targets -Now that `root.json` has been set, the other top-level roles may be created. -The signing keys added to these roles must correspond to the public keys -specified by the Root role. - -On the client side, `root.json` must always exist. The other top-level roles, -created next, are requested by repository clients in (Root -> Timestamp -> -Snapshot -> Targets) order to ensure required metadata is downloaded in a -secure manner. - -```python -# Continuing from the previous section . . . - -# 'datetime' module needed to optionally set a role's expiration. ->>> import datetime - -# Generate keys for the remaining top-level roles. The root keys have been set above. ->>> generate_and_write_rsa_keypair(password='password', filepath='targets_key') ->>> generate_and_write_rsa_keypair(password='password', filepath='snapshot_key') ->>> generate_and_write_rsa_keypair(password='password', filepath='timestamp_key') - -# Add the verification keys of the remaining top-level roles. - ->>> repository.targets.add_verification_key(import_rsa_publickey_from_file('targets_key.pub')) ->>> repository.snapshot.add_verification_key(import_rsa_publickey_from_file('snapshot_key.pub')) ->>> repository.timestamp.add_verification_key(import_rsa_publickey_from_file('timestamp_key.pub')) - -# Import the signing keys of the remaining top-level roles. ->>> private_targets_key = import_rsa_privatekey_from_file('targets_key', password='password') ->>> private_snapshot_key = import_rsa_privatekey_from_file('snapshot_key', password='password') ->>> private_timestamp_key = import_rsa_privatekey_from_file('timestamp_key', password='password') - -# Load the signing keys of the remaining roles so that valid signatures are -# generated when repository.writeall() is called. ->>> repository.targets.load_signing_key(private_targets_key) ->>> repository.snapshot.load_signing_key(private_snapshot_key) ->>> repository.timestamp.load_signing_key(private_timestamp_key) - -# Optionally set the expiration date of the timestamp role. By default, roles -# are set to expire as follows: root(1 year), targets(3 months), snapshot(1 -# week), timestamp(1 day). ->>> repository.timestamp.expiration = datetime.datetime(2080, 10, 28, 12, 8) - -# Mark roles for metadata update (see #964, #958) ->>> repository.mark_dirty(['root', 'snapshot', 'targets', 'timestamp']) - -# Write all metadata to "repository/metadata.staged/" ->>> repository.writeall() -``` - -### Targets ### -TUF makes it possible for clients to validate downloaded target files by -including a target file's length, hash(es), and filepath in metadata. The -filepaths are relative to a `targets/` directory on the software repository. A -TUF client can download a target file by first updating the latest copy of -metadata (and thus available targets), verifying that their length and hashes -are valid, and saving the target file(s) locally to complete the update -process. - -In this section, the target files intended for clients are added to a -repository and listed in `targets.json` metadata. - -#### Add Target Files #### - -The repository maintainer adds target files to roles (e.g., `targets` and -`unclaimed`) by specifying their filepaths. The target files must exist at the -specified filepaths before the repository tool can generate and add their -(hash(es), length, and filepath) to metadata. - -First, the actual target files are manually created and saved to the `targets/` -directory of the repository: - -```Bash -# Create and save target files to the targets directory of the software -# repository. -$ cd repository/targets/ -$ echo 'file1' > file1.txt -$ echo 'file2' > file2.txt -$ echo 'file3' > file3.txt -$ mkdir myproject; echo 'file4' > myproject/file4.txt -$ cd ../../ -``` - -With the target files available on the `targets/` directory of the software -repository, the `add_targets()` method of a Targets role can be called to add -the target filepaths to metadata. - -```python -# Continuing from the previous section . . . - -# NOTE: If you exited the Python interactive interpreter above you need to -# re-import the repository_tool-functions and re-load the repository and -# signing keys. ->>> from tuf.repository_tool import * - -# The 'os' module is needed to gather file attributes, which will be included -# in a custom field for some of the target files added to metadata. ->>> import os - -# Load the repository created in the previous section. This repository so far -# contains metadata for the top-level roles, but no target paths are yet listed -# in targets metadata. ->>> repository = load_repository('repository') - -# Create a list of all targets in the directory. ->>> list_of_targets = ['file1.txt', 'file2.txt', 'file3.txt'] - -# Add the list of target paths to the metadata of the top-level Targets role. -# Any target file paths that might already exist are NOT replaced, and -# add_targets() does not create or move target files on the file system. Any -# target paths added to a role must fall under the expected targets directory, -# otherwise an exception is raised. The targets added to a role should actually -# exist once writeall() or write() is called, so that the hash and size of -# these targets can be included in Targets metadata. ->>> repository.targets.add_targets(list_of_targets) - -# Individual target files may also be added to roles, including custom data -# about the target. In the example below, file permissions of the target -# (octal number specifying file access for owner, group, others e.g., 0755) is -# added alongside the default fileinfo. All target objects in metadata include -# the target's filepath, hash, and length. -# Note: target path passed to add_target() method has to be relative -# to the targets directory or an exception is raised. ->>> target4_filepath = 'myproject/file4.txt' ->>> target4_abspath = os.path.abspath(os.path.join('repository', 'targets', target4_filepath)) ->>> octal_file_permissions = oct(os.stat(target4_abspath).st_mode)[4:] ->>> custom_file_permissions = {'file_permissions': octal_file_permissions} ->>> repository.targets.add_target(target4_filepath, custom_file_permissions) -``` - -The private keys of roles affected by the changes above must now be imported and -loaded. `targets.json` must be signed because a target file was added to its -metadata. `snapshot.json` keys must be loaded and its metadata signed because -`targets.json` has changed. Similarly, since `snapshot.json` has changed, the -`timestamp.json` role must also be signed. - -```Python -# Continuing from the previous section . . . - -# The private key of the updated targets metadata must be re-loaded before it -# can be signed and written (Note the load_repository() call above). ->>> private_targets_key = import_rsa_privatekey_from_file('targets_key') -enter password to decrypt private key file '/path/to/targets_key' -(leave empty if key not encrypted): - ->>> repository.targets.load_signing_key(private_targets_key) - -# Due to the load_repository() and new versions of metadata, we must also load -# the private keys of Snapshot and Timestamp to generate a valid set of metadata. ->>> private_snapshot_key = import_rsa_privatekey_from_file('snapshot_key') -enter password to decrypt private key file '/path/to/snapshot_key' -(leave empty if key not encrypted): ->>> repository.snapshot.load_signing_key(private_snapshot_key) - ->>> private_timestamp_key = import_rsa_privatekey_from_file('timestamp_key') -enter password to decrypt private key file '/path/to/timestamp_key' -(leave empty if key not encrypted): ->>> repository.timestamp.load_signing_key(private_timestamp_key) - -# Mark roles for metadata update (see #964, #958) ->>> repository.mark_dirty(['snapshot', 'targets', 'timestamp']) - -# Generate new versions of the modified top-level metadata (targets, snapshot, -# and timestamp). ->>> repository.writeall() -``` - -#### Remove Target Files #### - -Target files previously added to roles may also be removed. Removing a target -file requires first removing the target from a role and then writing the -new metadata to disk. -```python -# Continuing from the previous section . . . - -# Remove a target file listed in the "targets" metadata. The target file is -# not actually deleted from the file system. ->>> repository.targets.remove_target('myproject/file4.txt') - -# Mark roles for metadata update (see #964, #958) ->>> repository.mark_dirty(['snapshot', 'targets', 'timestamp']) - ->>> repository.writeall() -``` - -#### Excursion: Dump Metadata and Append Signature #### - -The following two functions are intended for those that wish to independently -sign metadata. Repository maintainers can dump the portion of metadata that is -normally signed, sign it with an external signing tool, and append the -signature to already existing metadata. - -First, the signable portion of metadata can be generated as follows: - -```Python ->>> signable_content = dump_signable_metadata('repository/metadata.staged/timestamp.json') -``` - -Then, use a tool like securesystemslib to create a signature over the signable -portion. *Note, to make the signing key count towards the role's signature -threshold, it needs to be added to `root.json`, e.g. via -`repository.timestamp.add_verification_key(key)` (not shown in below snippet).* -```python ->>> from securesystemslib.formats import encode_canonical ->>> from securesystemslib.keys import create_signature ->>> private_ed25519_key = import_ed25519_privatekey_from_file('ed25519_key') -enter password to decrypt private key file '/path/to/ed25519_key' ->>> signature = create_signature( -... private_ed25519_key, encode_canonical(signable_content).encode()) -``` - -Finally, append the signature to the metadata -```Python ->>> append_signature(signature, 'repository/metadata.staged/timestamp.json') -``` - -Note that the format of the signature is the format expected in metadata, which -is a dictionary that contains a KEYID, the signature itself, etc. See the -specification and [METADATA.md](METADATA.md) for a detailed example. - -### Delegations ### -All of the target files available on the software repository created so far -have been added to one role (the top-level Targets role). However, what if -multiple developers are responsible for the files of a project? What if -responsibility separation is desired? Performing a delegation, where one role -delegates trust of some paths to another role, is an option for integrators -that require additional roles on top of the top-level roles available by -default. - -In the next sub-section, the `unclaimed` role is delegated from the top-level -`targets` role. The `targets` role specifies the delegated role's public keys, -the paths it is trusted to provide, and its role name. - -```python -# Continuing from the previous section . . . - -# Generate a key for a new delegated role named "unclaimed". ->>> generate_and_write_rsa_keypair(password='password', filepath='unclaimed_key', bits=2048) ->>> public_unclaimed_key = import_rsa_publickey_from_file('unclaimed_key.pub') - -# Make a delegation (delegate trust of 'myproject/*.txt' files) from "targets" -# to "unclaimed", where "unclaimed" initially contains zero targets. ->>> repository.targets.delegate('unclaimed', [public_unclaimed_key], ['myproject/*.txt']) - -# Thereafter, we can access the delegated role by its name to e.g. add target -# files, just like we did with the top-level targets role. ->>> repository.targets("unclaimed").add_target("myproject/file4.txt") - -# Load the private key of "unclaimed" so that unclaimed's metadata can be -# signed, and valid metadata created. ->>> private_unclaimed_key = import_rsa_privatekey_from_file('unclaimed_key', password='password') - ->>> repository.targets("unclaimed").load_signing_key(private_unclaimed_key) - -# Mark roles for metadata update (see #964, #958) ->>> repository.mark_dirty(['snapshot', 'targets','timestamp', 'unclaimed']) - ->>> repository.writeall() -``` - - - - -#### Wrap-up #### - -In summary, the five steps a repository maintainer follows to create a TUF -repository are: - -1. Create a directory for the software repository that holds the TUF metadata and the target files. -2. Create top-level roles (`root.json`, `snapshot.json`, `targets.json`, and `timestamp.json`.) -3. Add target files to the `targets` role. -4. Optionally, create delegated roles to distribute target files. -5. Write the changes. - -The repository tool saves repository changes to a `metadata.staged` directory. -Repository maintainers may push finalized changes to the "live" repository by -copying the staged directory to its destination. -```Bash -# Copy the staged metadata directory changes to the live repository. -$ cp -r "repository/metadata.staged/" "repository/metadata/" -``` - -## Consistent Snapshots ## -The basic TUF repository we have generated above is adequate for repositories -that have some way of guaranteeing consistency of repository data. A community -software repository is one example where consistency of files and metadata can -become an issue. Repositories of this kind are continually updated by multiple -maintainers and software authors uploading their packages, increasing the -likelihood that a client downloading version X of a release unexpectedly -requests the target files of a version Y just released. - -To guarantee consistency of metadata and target files, a repository may -optionally support multiple versions of `snapshot.json` simultaneously, where a -client with version 1 of `snapshot.json` can download `target_file.zip` and -another client with version 2 of `snapshot.json` can also download a different -`target_file.zip` (same file name, but different file digest.) If the -`consistent_snapshot` parameter of writeall() or write() are `True`, metadata -and target file names on the file system have their digests prepended (note: -target file names specified in metadata do not contain digests in their names.) - -The repository maintainer is responsible for the duration of multiple versions -of metadata and target files available on a repository. Generating consistent -metadata and target files on the repository is enabled by setting the -`consistent_snapshot` argument of `writeall()` or `write()` . Note that -changing the consistent_snapshot setting involves writing a new version of -root. - - - -## Delegate to Hashed Bins ## -Why use hashed bin delegations? - -For software update systems with a large number of target files, delegating to -hashed bins (a special type of delegated role) might be an easier alternative -to manually performing the delegations. How many target files should each -delegated role contain? How will these delegations affect the number of -metadata that clients must additionally download in a typical update? Hashed -bin delegations are available to integrators that rather not deal with the -management of delegated roles and a great number of target files. - -A large number of target files may be distributed to multiple hashed bins with -`delegate_hashed_bins()`. The metadata files of delegated roles will be nearly -equal in size (i.e., target file paths are uniformly distributed by calculating -the target filepath's digest and determining which bin it should reside in.) -The updater client will use "lazy bin walk" (visit and download the minimum -metadata required to find a target) to find a target file's hashed bin -destination. This method is intended for repositories with a large number of -target files, a way of easily distributing and managing the metadata that lists -the targets, and minimizing the number of metadata files (and size) downloaded -by the client. - -The `delegate_hashed_bins()` method has the following form: -```Python -delegate_hashed_bins(list_of_targets, keys_of_hashed_bins, number_of_bins) -``` - -We next provide a complete example of retrieving target paths to add to hashed -bins, performing the hashed bin delegations, signing them, and delegating paths -to some role. - -```Python -# Continuing from the previous section . . . - -# Remove 'myproject/file4.txt' from unclaimed role and instead further delegate -# all targets in myproject/ to hashed bins. ->>> repository.targets('unclaimed').remove_target("myproject/file4.txt") - -# Get a list of target paths for the hashed bins. ->>> targets = ['myproject/file4.txt'] - -# Delegate trust to 32 hashed bin roles. Each role is responsible for the set -# of target files, determined by the path hash prefix. TUF evenly distributes -# hexadecimal ranges over the chosen number of bins (see output). -# To initialize the bins we use one key, which TUF warns us about (see output). -# However, we can assign separate keys to each bin, with the method used in -# previous sections, accessing a bin by its hash prefix range name, e.g.: -# "repository.targets('00-07').add_verification_key('public_00-07_key')". ->>> repository.targets('unclaimed').delegate_hashed_bins( -... targets, [public_unclaimed_key], 32) -Creating hashed bin delegations. -1 total targets. -32 hashed bins. -256 total hash prefixes. -Each bin ranges over 8 hash prefixes. -Adding a verification key that has already been used. [repeated 32x] - -# The hashed bin roles can also be accessed by iterating the "delegations" -# property of the delegating role, which we do here to load the signing key. ->>> for delegation in repository.targets('unclaimed').delegations: -... delegation.load_signing_key(private_unclaimed_key) - -# Mark roles for metadata update (see #964, #958) ->>> repository.mark_dirty(['00-07', '08-0f', '10-17', '18-1f', '20-27', '28-2f', -... '30-37', '38-3f', '40-47', '48-4f', '50-57', '58-5f', '60-67', '68-6f', -... '70-77', '78-7f', '80-87', '88-8f', '90-97', '98-9f', 'a0-a7', 'a8-af', -... 'b0-b7', 'b8-bf', 'c0-c7', 'c8-cf', 'd0-d7', 'd8-df', 'e0-e7', 'e8-ef', -... 'f0-f7', 'f8-ff', 'snapshot', 'timestamp', 'unclaimed']) - ->>> repository.writeall() - -``` - -## How to Perform an Update ## - -The following [repository tool](../tuf/repository_tool.py) function creates a directory -structure that a client downloading new software using TUF (via -[tuf/client/updater.py](../tuf/client/updater.py)) expects. The `root.json` metadata file must exist, and -also the directories that hold the metadata files downloaded from a repository. -Software updaters integrating TUF may use this directory to store TUF updates -saved on the client side. - -```python ->>> from tuf.repository_tool import * ->>> create_tuf_client_directory("repository/", "client/tufrepo/") -``` - -`create_tuf_client_directory()` moves metadata from `repository/metadata` to -`client/` in this example. The repository in `repository/` may be the -repository example created earlier in this document. - -## Test TUF Locally ## -Run the local TUF repository server. -```Bash -$ cd "repository/"; python3 -m http.server 8001 -``` - -We next retrieve targets from the TUF repository and save them to `client/`. -The `client.py` script is available to download metadata and files from a -specified repository. In a different command-line prompt, where `tuf` is -installed . . . -```Bash -$ cd "client/" -$ ls -tufrepo/ - -$ client.py --repo http://localhost:8001 file1.txt -$ ls . tuftargets/ -.: -tufrepo tuftargets - -tuftargets/: -file1.txt -``` diff --git a/docs/conf.py b/docs/conf.py index 4577404ba3..7f880e3dbc 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -35,11 +35,6 @@ # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['GETTING_STARTED.rst'] - # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for diff --git a/docs/images/repository_tool-diagram.png b/docs/images/repository_tool-diagram.png deleted file mode 100644 index 6bfbdeb0b7..0000000000 Binary files a/docs/images/repository_tool-diagram.png and /dev/null differ diff --git a/pylintrc b/pylintrc deleted file mode 100644 index 402406d245..0000000000 --- a/pylintrc +++ /dev/null @@ -1,426 +0,0 @@ -[MASTER] - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code -extension-pkg-whitelist= - -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=CVS - -# Add files or directories matching the regex patterns to the blacklist. The -# regex matches against base names, not paths. -ignore-patterns= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Use multiple processes to speed up Pylint. -jobs=1 - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - -# Pickle collected data for later comparisons. -persistent=yes - -# Specify a configuration file. -#rcfile= - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -confidence= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" -disable=parameter-unpacking, unpacking-in-except, long-suffix, old-ne-operator, old-octal-literal, import-star-module-level, raw-checker-failed, bad-inline-option, locally-disabled, locally-enabled, file-ignored, suppressed-message, useless-suppression, deprecated-pragma, apply-builtin, basestring-builtin, buffer-builtin, cmp-builtin, coerce-builtin, execfile-builtin, file-builtin, long-builtin, raw_input-builtin, reduce-builtin, standarderror-builtin, unicode-builtin, xrange-builtin, coerce-method, delslice-method, getslice-method, setslice-method, no-absolute-import, old-division, dict-iter-method, dict-view-method, next-method-called, metaclass-assignment, indexing-exception, raising-string, reload-builtin, oct-method, hex-method, nonzero-method, cmp-method, input-builtin, round-builtin, intern-builtin, unichr-builtin, map-builtin-not-iterating, zip-builtin-not-iterating, range-builtin-not-iterating, filter-builtin-not-iterating, using-cmp-argument, eq-without-hash, div-method, idiv-method, rdiv-method, exception-message-attribute, invalid-str-codec, sys-max-int, deprecated-str-translate-call, global-statement, broad-except, logging-not-lazy, C, R - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). See also the "--disable" option for examples. -enable= - - -[REPORTS] - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= - -# Set the output format. Available formats are text, parseable, colorized, json -# and msvs (visual studio).You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -#output-format=parseable -output-format=text - -# Tells whether to display a full report or only the messages -reports=no - -# Activate the evaluation score. -score=yes - - -[REFACTORING] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - - -[BASIC] - -# Naming hint for argument names -argument-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ - -# Regular expression matching correct argument names -argument-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ - -# Naming hint for attribute names -attr-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ - -# Regular expression matching correct attribute names -attr-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Naming hint for class attribute names -class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Naming hint for class names -class-name-hint=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Naming hint for constant names -const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - -# Naming hint for function names -function-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ - -# Regular expression matching correct function names -function-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no - -# Naming hint for inline iteration names -inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ - -# Regular expression matching correct inline iteration names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Naming hint for method names -method-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ - -# Regular expression matching correct method names -method-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ - -# Naming hint for module names -module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# List of decorators that produce properties, such as abc.abstractproperty. Add -# to this list to register other decorators that produce valid properties. -property-classes=abc.abstractproperty - -# Naming hint for variable names -variable-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ - -# Regular expression matching correct variable names -variable-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ - - -[FORMAT] - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Maximum number of characters on a single line. -max-line-length=80 - -# Maximum number of lines in a module -max-module-lines=1000 - -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma,dict-separator - -# Allow the body of a class to be on the same line as the declaration if body -# contains single statement. -single-line-class-stmt=no - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - - -[LOGGING] - -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=XXX, - - -[SIMILARITIES] - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=yes - -# Minimum lines number of a similarity. -min-similarity-lines=4 - - -[SPELLING] - -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. -spelling-dict= - -# List of comma separated words that should not be checked. -spelling-ignore-words= - -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no - - -[TYPECHECK] - -# List of decorators that produce context managers, such as -# contextlib.contextmanager. Add to this list to register other decorators that -# produce valid context managers. -contextmanager-decorators=contextlib.contextmanager - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members= - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# This flag controls whether pylint should warn about no-member and similar -# checks whenever an opaque object is returned when inferring. The inference -# can return multiple potential results while evaluating a Python object, but -# some branches might not be evaluated, which results in partial inference. In -# that case, it might be useful to still emit no-member and other checks for -# the rest of the inferred objects. -ignore-on-opaque-inference=yes - -# List of class names for which member attributes should not be checked (useful -# for classes with dynamically set attributes). This supports the use of -# qualified names. -ignored-classes=optparse.Values,thread._local,_thread._local - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules= - -# Show a hint with possible names when a member name was not found. The aspect -# of finding the hint is based on edit distance. -missing-member-hint=yes - -# The minimum edit distance a name should have in order to be considered a -# similar match for a missing member name. -missing-member-hint-distance=1 - -# The total number of similar names that should be taken in consideration when -# showing a hint for a missing member. -missing-member-max-choices=1 - - -[VARIABLES] - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - -# Tells whether unused global variables should be treated as a violation. -allow-global-unused-variables=yes - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_,_cb - -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_|junk - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.*|^ignored_|^unused_ - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# List of qualified module names which can have objects that can redefine -# builtins. -redefining-builtins-modules=future.builtins - - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict, _fields, _replace, _source, _make, _generate_and_write_metadata, _delete_obsolete_metadata, _log_status_of_top_level_roles, _load_top_level_metadata, _strip_version_number, _delegated_roles, _remove_invalid_and_duplicate_signatures, _repository_name, _targets_directory - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=5 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Maximum number of boolean expressions in a if statement -max-bool-expr=5 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of statements in function / method body -max-statements=50 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - - -[IMPORTS] - -# Allow wildcard imports from modules that define __all__. -allow-wildcard-with-all=no - -# Analyse import fallback blocks. This can be used to support both Python 2 and -# 3 compatible code, which means that the block might have code that exists -# only in one or another interpreter, leading to false positives when analysed. -analyse-fallback-blocks=no - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub,TERMIOS,Bastion,rexec - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - -# Force import order to recognize a module as part of the standard -# compatibility libraries. -known-standard-library= - -# Force import order to recognize a module as part of a third party library. -known-third-party=enchant - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception diff --git a/pyproject.toml b/pyproject.toml index 16d7675fc7..7dea5d93d6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,8 +7,6 @@ build-backend = "setuptools.build_meta" # Read more here: https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html#configuration-via-a-file [tool.black] line-length=80 -# TODO: remove "excludes" after deleting old test files -exclude="tests/.*old.py" # Isort section # Read more here: https://pycqa.github.io/isort/docs/configuration/config_files.html @@ -16,8 +14,6 @@ exclude="tests/.*old.py" profile="black" line_length=80 known_first_party = ["tuf"] -# TODO: remove "skip_glob" after deleting old test files -skip_glob="*old.py" # Pylint section @@ -59,8 +55,6 @@ module-rgx="^(_?[a-z][a-z0-9_]*|__init__)$" no-docstring-rgx="(__.*__|main|test.*|.*test|.*Test)$" variable-rgx="^[a-z][a-z0-9_]*$" docstring-min-length=10 -# TODO: remove "ignore-patterns" after deleting old test files -ignore-patterns=".*_old.py" [tool.pylint.logging] logging-format-style="old" @@ -83,8 +77,6 @@ disallow_untyped_defs = "True" disallow_untyped_calls = "True" show_error_codes = "True" disable_error_code = ["attr-defined"] -# TODO: remove "exclude" after deleting old test files -exclude=".*_old.py" [[tool.mypy.overrides]] module = [ diff --git a/setup.cfg b/setup.cfg index 0d4202b679..1076a7173c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -33,9 +33,6 @@ license_files = LICENSE LICENSE-MIT [options] packages = find: -scripts = - tuf/scripts/repo.py - tuf/scripts/client.py python_requires = ~=3.7 install_requires = requests>=2.19.1 diff --git a/tests/.coveragerc b/tests/.coveragerc index dd9c57e8ab..2c8c989206 100644 --- a/tests/.coveragerc +++ b/tests/.coveragerc @@ -2,9 +2,6 @@ branch = True omit = - # Command-line scripts. - */tuf/scripts/client.py - */tuf/scripts/repo.py */tests/* */site-packages/* diff --git a/tests/fast_server_exit_old.py b/tests/fast_server_exit_old.py deleted file mode 100644 index b54b7b9230..0000000000 --- a/tests/fast_server_exit_old.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2020, TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - fast_server_exit.py - - - Martin Vrachev. - - - October 29, 2020. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Used for tests in tests/test_utils.py. -""" - -import sys - -sys.exit(0) diff --git a/tests/repository_data/generate_old.py b/tests/repository_data/generate_old.py deleted file mode 100755 index e131329ec9..0000000000 --- a/tests/repository_data/generate_old.py +++ /dev/null @@ -1,163 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - generate.py - - - Vladimir Diaz - - - February 26, 2014. - - - See LICENSE-MIT.txt OR LICENSE-APACHE.txt for licensing information. - - - Provide a set of pre-generated key files and a basic repository that unit - tests can use in their test cases. The pre-generated files created by this - script should be copied by the unit tests as needed. The original versions - should be preserved. 'tuf/tests/repository_data/' will store the files - generated. 'generate.py' should not require re-execution if the - pre-generated repository files have already been created, unless they need to - change in some way. -""" - -import shutil -import datetime -import optparse -import stat - -from tuf.repository_tool import * - -import securesystemslib - -parser = optparse.OptionParser() -parser.add_option("-k","--keys", action='store_true', dest="should_generate_keys", - help="Generate a new set of keys", default=False) -parser.add_option("-d","--dry-run", action='store_true', dest="dry_run", - help="Do not write the files, just run", default=False) -(options, args) = parser.parse_args() - - -repository = create_new_repository('repository') - -root_key_file = 'keystore/root_key' -targets_key_file = 'keystore/targets_key' -snapshot_key_file = 'keystore/snapshot_key' -timestamp_key_file = 'keystore/timestamp_key' -delegation_key_file = 'keystore/delegation_key' - - -if options.should_generate_keys and not options.dry_run: - # Generate and save the top-level role keys, including the delegated roles. - # The unit tests should only have to import the keys they need from these - # pre-generated key files. - # Generate public and private key files for the top-level roles, and two - # delegated roles (these number of keys should be sufficient for most of the - # unit tests). Unit tests may generate additional keys, if needed. - generate_and_write_rsa_keypair(password='password', filepath=root_key_file) - generate_and_write_ed25519_keypair(password='password', filepath=targets_key_file) - generate_and_write_ed25519_keypair(password='password', filepath=snapshot_key_file) - generate_and_write_ed25519_keypair(password='password', filepath=timestamp_key_file) - generate_and_write_ed25519_keypair(password='password', filepath=delegation_key_file) - -# Import the public keys. These keys are needed so that metadata roles are -# assigned verification keys, which clients use to verify the signatures created -# by the corresponding private keys. -root_public = import_rsa_publickey_from_file(root_key_file + '.pub') -targets_public = import_ed25519_publickey_from_file(targets_key_file + '.pub') -snapshot_public = import_ed25519_publickey_from_file(snapshot_key_file + '.pub') -timestamp_public = import_ed25519_publickey_from_file(timestamp_key_file + '.pub') -delegation_public = import_ed25519_publickey_from_file(delegation_key_file + '.pub') - -# Import the private keys. These private keys are needed to generate the -# signatures included in metadata. -root_private = import_rsa_privatekey_from_file(root_key_file, 'password') -targets_private = import_ed25519_privatekey_from_file(targets_key_file, 'password') -snapshot_private = import_ed25519_privatekey_from_file(snapshot_key_file, 'password') -timestamp_private = import_ed25519_privatekey_from_file(timestamp_key_file, 'password') -delegation_private = import_ed25519_privatekey_from_file(delegation_key_file, 'password') - -# Add the verification keys to the top-level roles. -repository.root.add_verification_key(root_public) -repository.targets.add_verification_key(targets_public) -repository.snapshot.add_verification_key(snapshot_public) -repository.timestamp.add_verification_key(timestamp_public) - -# Load the signing keys, previously imported, for the top-level roles so that -# valid metadata can be written. -repository.root.load_signing_key(root_private) -repository.targets.load_signing_key(targets_private) -repository.snapshot.load_signing_key(snapshot_private) -repository.timestamp.load_signing_key(timestamp_private) - -# Create the target files (downloaded by clients) whose file size and digest -# are specified in the 'targets.json' file. -target1_filepath = 'repository/targets/file1.txt' -securesystemslib.util.ensure_parent_dir(target1_filepath) -target2_filepath = 'repository/targets/file2.txt' -securesystemslib.util.ensure_parent_dir(target2_filepath) -target3_filepath = 'repository/targets/file3.txt' -securesystemslib.util.ensure_parent_dir(target2_filepath) - -if not options.dry_run: - with open(target1_filepath, 'wt') as file_object: - file_object.write('This is an example target file.') - # As we will add this file's permissions to the custom_attribute in the - # target's metadata we need to ensure that the file has the same - # permissions when created by this script regardless of umask value on - # the host system generating the data - os.chmod(target1_filepath, 0o644) - - with open(target2_filepath, 'wt') as file_object: - file_object.write('This is an another example target file.') - - with open(target3_filepath, 'wt') as file_object: - file_object.write('This is role1\'s target file.') - -# Add target files to the top-level 'targets.json' role. These target files -# should already exist. 'target1_filepath' contains additional information -# about the target (i.e., file permissions in octal format.) -octal_file_permissions = oct(os.stat(target1_filepath).st_mode)[4:] -file_permissions = {'file_permissions': octal_file_permissions} -repository.targets.add_target(os.path.basename(target1_filepath), file_permissions) -repository.targets.add_target(os.path.basename(target2_filepath)) - -repository.targets.delegate('role1', [delegation_public], - [os.path.basename(target3_filepath)]) -repository.targets('role1').add_target(os.path.basename(target3_filepath)) -repository.targets('role1').load_signing_key(delegation_private) - -repository.targets('role1').delegate('role2', [delegation_public], []) -repository.targets('role2').load_signing_key(delegation_private) - -# Set the top-level expiration times far into the future so that -# they do not expire anytime soon, or else the tests fail. Unit tests may -# modify the expiration datetimes (of the copied files), if they wish. -repository.root.expiration = datetime.datetime(2030, 1, 1, 0, 0) -repository.targets.expiration = datetime.datetime(2030, 1, 1, 0, 0) -repository.snapshot.expiration = datetime.datetime(2030, 1, 1, 0, 0) -repository.timestamp.expiration = datetime.datetime(2030, 1, 1, 0, 0) -repository.targets('role1').expiration = datetime.datetime(2030, 1, 1, 0, 0) -repository.targets('role2').expiration = datetime.datetime(2030, 1, 1, 0, 0) - -# Create the actual metadata files, which are saved to 'metadata.staged'. -if not options.dry_run: - repository.writeall() - -# Move the staged.metadata to 'metadata' and create the client folder. The -# client folder, which includes the required directory structure and metadata -# files for clients to successfully load an 'tuf.client.updater.py' object. -staged_metadata_directory = 'repository/metadata.staged' -metadata_directory = 'repository/metadata' -if not options.dry_run: - shutil.copytree(staged_metadata_directory, metadata_directory) - -# Create the client files (required directory structure and minimal metadata) -# as expected by 'tuf.client.updater'. -if not options.dry_run: - create_tuf_client_directory('repository', os.path.join('client', 'test_repository1')) diff --git a/tests/repository_data/generate_project_data_old.py b/tests/repository_data/generate_project_data_old.py deleted file mode 100755 index db3d00dfdb..0000000000 --- a/tests/repository_data/generate_project_data_old.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - generate_project_data.py - - - Santiago Torres - - - January 22, 2014. - - - See LICENSE-MIT.txt OR LICENSE-APACHE.txt for licensing information. - - - Generate a pre-fabricated set of metadata files for - 'test_developer_tool_old.py' test cases. -""" - -import shutil -import datetime -import optparse -import os - -from tuf.developer_tool import * - -import securesystemslib - -parser = optparse.OptionParser() - -parser.add_option("-d","--dry-run", action='store_true', dest="dry_run", - help="Do not write the files, just run", default=False) -(options, args) = parser.parse_args() - - -project_key_file = 'keystore/root_key' -targets_key_file = 'keystore/targets_key' -delegation_key_file = 'keystore/delegation_key' - -# The files we use for signing in the unit tests should exist, if they are not -# populated, run 'generate.py'. -assert os.path.exists(project_key_file) -assert os.path.exists(targets_key_file) -assert os.path.exists(delegation_key_file) - -# Import the public keys. These keys are needed so that metadata roles are -# assigned verification keys, which clients use to verify the signatures created -# by the corresponding private keys. -project_public = import_rsa_publickey_from_file(project_key_file + '.pub') -targets_public = import_ed25519_publickey_from_file(targets_key_file + '.pub') -delegation_public = import_ed25519_publickey_from_file(delegation_key_file + '.pub') - -# Import the private keys. These private keys are needed to generate the -# signatures included in metadata. -project_private = import_rsa_privatekey_from_file(project_key_file, 'password') -targets_private = import_ed25519_privatekey_from_file(targets_key_file, 'password') -delegation_private = import_ed25519_privatekey_from_file(delegation_key_file, 'password') - -os.mkdir("project") -os.mkdir("project/targets") - -# Create the target files (downloaded by clients) whose file size and digest -# are specified in the 'targets.json' file. -target1_filepath = 'project/targets/file1.txt' -securesystemslib.util.ensure_parent_dir(target1_filepath) -target2_filepath = 'project/targets/file2.txt' -securesystemslib.util.ensure_parent_dir(target2_filepath) -target3_filepath = 'project/targets/file3.txt' -securesystemslib.util.ensure_parent_dir(target2_filepath) - -if not options.dry_run: - with open(target1_filepath, 'wt') as file_object: - file_object.write('This is an example target file.') - - with open(target2_filepath, 'wt') as file_object: - file_object.write('This is an another example target file.') - - with open(target3_filepath, 'wt') as file_object: - file_object.write('This is role1\'s target file.') - - -project = create_new_project("test-flat", 'project/test-flat', 'prefix', 'project/targets') - -# Add target files to the top-level projects role. These target files should -# already exist. -project.add_target('file1.txt') -project.add_target('file2.txt') - -# Add one key to the project. -project.add_verification_key(project_public) -project.load_signing_key(project_private) - -# Add the delegated role keys. -project.delegate('role1', [delegation_public], [target3_filepath]) -project('role1').load_signing_key(delegation_private) - -# Set the project expiration time far into the future so that its metadata does -# not expire anytime soon, or else the tests fail. Unit tests may modify the -# expiration datetimes (of the copied files), if they wish. -project.expiration = datetime.datetime(2030, 1, 1, 0, 0) -project('role1').expiration = datetime.datetime(2030, 1, 1, 0, 0) - -# Create the actual metadata files, which are saved to 'metadata.staged'. -if not options.dry_run: - project.write() diff --git a/tests/simple_https_server_old.py b/tests/simple_https_server_old.py deleted file mode 100755 index bf29d0dac6..0000000000 --- a/tests/simple_https_server_old.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - simple_https_server_old.py - - - Vladimir Diaz. - - - June 17, 2014 - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Provide a simple https server that can be used by the unit tests. For - example, 'download.py' can connect to the https server started by this module - to verify that https downloads are permitted. - - - ssl.SSLContext.wrap_socket: - https://docs.python.org/3/library/ssl.html#ssl.SSLContext.wrap_socket - - SimpleHTTPServer: - http://docs.python.org/library/simplehttpserver.html#module-SimpleHTTPServer -""" - -import sys -import ssl -import os -import http.server - -keyfile = os.path.join('ssl_certs', 'ssl_cert.key') -certfile = os.path.join('ssl_certs', 'ssl_cert.crt') - - -if len(sys.argv) > 1 and os.path.exists(sys.argv[1]): - certfile = sys.argv[1] - -httpd = http.server.HTTPServer(('localhost', 0), - http.server.SimpleHTTPRequestHandler) - -context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) -context.load_cert_chain(certfile, keyfile) -httpd.socket = context.wrap_socket(httpd.socket, server_side=True) - -port_message = 'bind succeeded, server port is: ' \ - + str(httpd.server_address[1]) -print(port_message) - -if len(sys.argv) > 1 and certfile != sys.argv[1]: - print('simple_https_server_old: cert file was not found: ' + sys.argv[1] + - '; using default: ' + certfile + " certfile") - -httpd.serve_forever() diff --git a/tests/slow_retrieval_server_old.py b/tests/slow_retrieval_server_old.py deleted file mode 100755 index c2586f2ef4..0000000000 --- a/tests/slow_retrieval_server_old.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - slow_retrieval_server_old.py - - - Konstantin Andrianov. - - - March 13, 2012. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Server that throttles data by sending one byte at a time (specified time - interval 'DELAY'). The server is used in 'test_slow_retrieval_attack_old.py'. -""" - -import os -import time -import http.server - - - -# HTTP request handler. -class Handler(http.server.BaseHTTPRequestHandler): - - # Overwrite do_GET. - def do_GET(self): - current_dir = os.getcwd() - try: - filepath = os.path.join(current_dir, self.path.lstrip('/')) - data = None - with open(filepath, 'r') as fileobj: - data = fileobj.read() - - self.send_response(200) - self.send_header('Content-length', str(len(data))) - self.end_headers() - - # Before sending any data, the server does nothing for a long time. - DELAY = 40 - time.sleep(DELAY) - self.wfile.write((data.encode('utf-8'))) - - except IOError as e: - self.send_error(404, 'File Not Found!') - - - -if __name__ == '__main__': - server_address = ('localhost', 0) - - httpd = http.server.HTTPServer(server_address, Handler) - port_message = 'bind succeeded, server port is: ' \ - + str(httpd.server_address[1]) - print(port_message) - httpd.serve_forever() diff --git a/tests/ssl_certs/proxy_ca.crt b/tests/ssl_certs/proxy_ca.crt deleted file mode 100644 index f079e58b7c..0000000000 --- a/tests/ssl_certs/proxy_ca.crt +++ /dev/null @@ -1,17 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICpDCCAYwCCQCFr/EhHmzVajANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDDAlw -cm94eTIgQ0EwHhcNMTgwOTIwMTkyOTQ2WhcNMjgwOTE3MTkyOTQ2WjAUMRIwEAYD -VQQDDAlwcm94eTIgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC/ -rVOeqSzJb01Vyliw3dnfLJsWfDfs/Lq5HLn+Xqnzl6MqnYirDqHzTErD3vl8lo/o -OJrziO0vYCWGXEylRQlZp+P37bLToSWiVqWZ8pH6CAh+AhA3WtegN5JwTgIUSP7A -aDlxuZrXlJM50QVlXJIPkc74M8ALz0nu5zmyWkGFvmTYS8503T8cXs9Alr4Bo++9 -Ilixv6lW4QS7FKTeQXlI49K4TeGGGsfmEO6Uj4WTUkwMZym9wfiqtaWc6I9ZMese -WmU3LuufY+pFCdjsdMWDJpYc+HabTSrbgXSF5Iq9a84Xuum39qhVpYhBwBtLk3ye -cxZmIxde1vnkWAitJFETAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAKV09r/x3WyO -McH0RU4WRVzvQN5F0e7swpDlLUX7YnfvpPEkavqQfmrL1cYyEDgsm/347Gvcs1Aa -iaT77axYroXOvCEJ3DxZdzUErKH6Jr3MmHKcZ/L35u6ZXKnmx/edFjdWr6ENkjuZ -NVvKbTrm4cl6Wy4bXkp6b24rBa9IFJncOouSkIvHENEcH//OD4xeTK8vSJTJ9nmw -TiJ0TjCRujtJWC6yb03ZV32VbeiHa1zLlZhcyKqUtt81dLti5t5+L2hAAVCcnEgI -DBWQdlRs/wilHGWVBo/9srOoMNsmvecTBpLH2JyC5VZ1+faYLPrNlgkWgHIFOTTi -h4ByR95Wbi8= ------END CERTIFICATE----- diff --git a/tests/ssl_certs/proxy_ca.key b/tests/ssl_certs/proxy_ca.key deleted file mode 100644 index 0e08b82d76..0000000000 --- a/tests/ssl_certs/proxy_ca.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAv61TnqksyW9NVcpYsN3Z3yybFnw37Py6uRy5/l6p85ejKp2I -qw6h80xKw975fJaP6Dia84jtL2AlhlxMpUUJWafj9+2y06ElolalmfKR+ggIfgIQ -N1rXoDeScE4CFEj+wGg5cbma15STOdEFZVySD5HO+DPAC89J7uc5slpBhb5k2EvO -dN0/HF7PQJa+AaPvvSJYsb+pVuEEuxSk3kF5SOPSuE3hhhrH5hDulI+Fk1JMDGcp -vcH4qrWlnOiPWTHrHlplNy7rn2PqRQnY7HTFgyaWHPh2m00q24F0heSKvWvOF7rp -t/aoVaWIQcAbS5N8nnMWZiMXXtb55FgIrSRREwIDAQABAoIBACxJObbA064+3xlh -RRioSXx86+BIFwvUYLgAYSDacl3rvTFNcJRFLznteKDE1dPpXZqD6Zk3G8YEauce -UD8nMj/awJs5+kVXSEC30E8/cmbYkE284E5J2OQVsunrvCM/skx2SD90aMhCdbm4 -B40h1EVwpOdH3alc3XIrTnNc0yK5MWAu41qwkxYxXHmW9Y0L8AjZve9JBrnKsJMB -ETEZFhHgi/IWtfh5PLbJO2dbSe7Nqo4ikyWo3r5b3yvuphFz1il88ZLjJ5nDmtlH -is7sk7pd0tYNsK1Di5G1ku50XvcbOE4F7mOVCxICTwjN+sdyG8o+AVlgbTKBo/JF -uEhthCECgYEA/3YXS9mAEujlstrV4VOksYWtySSrLHC56tLjj8cHVPJ1qkzT4OOC -X9TsWReDG4J8/t0DOHn+5dnhnqGcYjMMAQx095KHU1bQGrcRdmi6cjnNLTvfEbge -IcJTYG5P7NpLfLjB3DOGqFR4o0iz4K9ZLTYJc+BaCB9qJBEw6nuoP+sCgYEAwBTN -WpRDrmch0+LFPQwboLwtEPiFscTj8SInV0KsI/MK8+5Sm+tXS8PQHYJYcECEQxQM -2gfyM8vy33UP4yn4edJGWlaz7a4hyDxn944vv2fBQ3vjJTNz3X3skkhZ2/F+ZW9e -SFxPj+Vbif8VTEU+wK0f5SUmpRec4E7y3fq+kXkCgYEAib8ZbLLI1mlygfBx51/8 -rCRSwuTcz8ew2CgCwGInV+ys+bkXfmnuwNHE531AGrNPxvVRaUCO602C1NB7zI+N -53raDyyZf5yN9fnElr592l3EfqGL9Lf8t2NbJeIVgrdqgMP29E9sSpPRwOnQ5FRo -l3JNwoe0xDB8QRpr7+PhoyUCgYEAp+GGmmR7wzLgnhDV00WB4DqYKP0N3RH5KAhx -2hKr4b/LEuh5y00mP1Il06TZJ0M8VmRv1yCa0CqxXB00hZdpVRAz7UFagaJwZFJn -jDb6BJDqmdDt9tXBrxUgb7pMz6+CiaWNAjGsWFheaX5JXyAmeMDX369Y13KL6oEW -RG2jogECgYEA/1vLZcWNK/0yd4ClU+Xbu8xC29q82JUMsaazHtbgSNlOfo9LMQlH -z6xBiMYfHZ/SiHCy9RsO8GD4caXiF0RsTVnhqjSRJf3EARamufelNsu2ApLclkSN -fzSoB7ZHddGaYKYpXkGzcwFcKd/QjAlHm1yIsZu4B52AhCxC/WS2X54= ------END RSA PRIVATE KEY----- diff --git a/tests/ssl_certs/proxy_cert.key b/tests/ssl_certs/proxy_cert.key deleted file mode 100644 index 7693865610..0000000000 --- a/tests/ssl_certs/proxy_cert.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAzZO36nZvb9wLxBNB2cZyHqcX5poChJd1YnFBtxbtQwiISxid -eGdiWImQE80vpUyTQbI7TxM+w1xZeEeu4PXuYrOgdTDRFEnjM2mteG+3WpHQBN4H -xoah0msp3046fMkYqcEvhvHbsc5DAWgLK4JFHQPtG/+CIH0ZY+lBBPQhFIhBLYkt -YxNVqwpsXOGreASSw6mO6cVehCuVFJQO5NnI1sCAvp3SeosMKeIcDZxpZWmZhSwH -n3Rj6RMNM66C8zG4YlpvIniGzgV4UiW8XrTUG8HmzQ2295IcfB4No2DZeJDSR9oq -jOkyqJXll+tSiAMuzBRtTQKvGZ5bpZWW4XELEQIDAQABAoIBAQCAfW2cjD4GimCI -QwkLlq9JXWLg7S3ZtdjWmLdcOmY9WZ3mYhI6aVPcxs5Ysgyvonb/vui2+e5mqNf7 -B8LUNKK06lTGKqbjqXLqdYjJF/pgD3cXM7dkbE3EeNqJChogWIijwW11SMHqFmNn -A6LHpPqRshyHPWIV8FroSagr8nKio5BjUEuUiQUUAmSJPGN5qUhdIWXcQu8R1JB8 -9qqqtwPR4FELbFVGI2vYHaSWGnf9V0boPOsfFXWbSq/Ksj3Lm3gAqMtlAeOFu84l -fhP9RkgeXfaCXq0VaOM83UDgLqXm4Ni4wAMKRLwNs4LzumqMM/dfUTn+mGncj33q -idp5qnDhAoGBAOXkwuf60F7aBbo98A0vWZli2CbkspsJz2J573pf+lVWI+ZHBZLI -MOM2DgCOEIUfa2TIMkwFr2t9x6uXlACEwFbEtEBpM4J5qUHgGtXZIsnTsv3qUg/C -L89cNrMddOuuRkxQbyK1QMYZZmZQjSKG2jW6m1KING+shtkOzQ/P9ildAoGBAOTs -DLyyPeEZPj1UMqxVNmeYYRfWnt+YyTPulOIbSuFN0DhZPNLsjrhSxvDwe/3sYH/p -nKdjnlFlx8frz9wtkCt0hWvY0pG2Zam4IBCvreFN7rSvpzHwUAK3oXic2TRKKu1m -xUPZqMJwnWAPX+XxGFn0m7UJj+95VTEOJ2d12ClFAoGAdexXMgmM8uqg/3yf8xNz -wWNbfu/W0gJBN8FWXw52aWmrNob9y+IWeaYTnqNAxBhuzR6H9kkAR4IYduNkzrNJ -ufhigZu1CVuAv8LF4SXlW2PVL7wPZff08Efb4xrcC7y0YJbtuv8Af90tkpQFIU3N -Brx2yeoGA7aa4SJfe5nwKh0CgYAo1yP+lh4MBqDf+CGCNUGbgcfwpM17PprGtQ3C -uPPG9kbrhqAfUSy1Ha94VK8KQh2FNHxKMK+R/gKCXEOdGFPcLNGQyAHpFQ1WFg9C -atUumOS5P40oj6L2mSQpjHIDrieyat9Ol4pQBh9Nf/Cv6S9a/RS6W5ZeNttIASpu -fsutsQKBgQCq+BFeDYJH4f+C1233W3PXM0P1ivj+9TJMRUP63RRay6rv2ZTZXyPc -Rx6Lv4OVWh9VMfv1kHRloJ1GKEBo/uD3nid1WqoNxpXv1iwxeGtjXkFHfvCB7Ruu -vTyQhJQQ7WSCJJOfarstusIn0udOG3MLRgG4X1pPQghyS1AT8NUglw== ------END RSA PRIVATE KEY----- diff --git a/tests/ssl_certs/ssl_cert.crt b/tests/ssl_certs/ssl_cert.crt deleted file mode 100644 index 4812078bcf..0000000000 --- a/tests/ssl_certs/ssl_cert.crt +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIE1TCCAz2gAwIBAgIJAKqz8ew7Z44mMA0GCSqGSIb3DQEBCwUAMIGAMQswCQYD -VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxETAPBgNVBAcMCEJyb29rbHluMQww -CgYDVQQKDANOWVUxKTAnBgNVBAsMIENvbXB1dGVyIFNjaWVuY2UgYW5kIEVuZ2lu -ZWVyaW5nMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTYwMTI3MjEyMTMxWhcNMjYw -MTI0MjEyMTMxWjCBgDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMREw -DwYDVQQHDAhCcm9va2x5bjEMMAoGA1UECgwDTllVMSkwJwYDVQQLDCBDb21wdXRl -ciBTY2llbmNlIGFuZCBFbmdpbmVlcmluZzESMBAGA1UEAwwJbG9jYWxob3N0MIIB -ojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAxyFVeRsWnb1UlCKBks2azM9W -9K+J/ZkzdSb6eCxOIxv79M/Ug54CfWqkySSaQejsu0U/gJxkFYRvwQAy5lATrspY -2kyiWYiggWXFDWz+i8ETPkL9zn59v13sNIpT/IXQj0S3Mr9ZnsUn1qCyEOOIxJxZ -lyuV/M/XP1DP4tArhEvrex12V6MQIK+8fYzEjHG/W7vIIet+wTStIR8ArvVQi0Kv -PbbGCfrZ+e+gq+UpBLBuAfMzM95TW+YJ5duMchie2n6LDmOeegA4jMEv2ppeOr8Q -JJtZuKpXWVbJvLg81yrDjr1rAwJR/WQrnk8GQWPCyPLneAA4mJbi75LqjLxn0AoJ -b3kzLfGEMJJEWXspxNg06bLQU948hB4L7nKARq6s7KoESjEV+/L4koMPWJoNq6fx -OUVw2+S3ITNrDctecRQ1j3RGVPaj5l6bn03C7KV9uRrfqFY3OUjn7A0kDczvRnmr -e1BZIpe+mfGFB+Uu7JiQoBv6I6fqyrdH9rX1LUKlAgMBAAGjUDBOMB0GA1UdDgQW -BBT8LvRkvodP9bR/bBs/aI+AydRIvTAfBgNVHSMEGDAWgBT8LvRkvodP9bR/bBs/ -aI+AydRIvTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBgQC6kwuSEF0Y -5yLMf1TKfVkeBaZ4tOqR2kzpggzPPog+JcfIQgVmI2QTUDritHWFIM4YUwQ/00WU -uol2BCUpgaLci5gNgyTw8p538Q5cZHXE3kK/CWJA4zKag+oHdmXzGjMalqzvPuVJ -9VdtPrwHhB0Xntf72iWWhE2dIn1QZqVmJ/8hhIU8cQ91pIqTjYjhrYE/GhGH7HMW -bRiRolt37VxbzfXjEBMqVH6fOQq0piTRxwTNPBFp6JO5mRakRmWRvN3dnR8J9qXi -6tQhNNn2uQIpPlKlqVQnh5j5YxFrb50b0FCjDw+eNilXP93yjV4+lWK2QZychcGl -6/7Wu8snZkJCImPbwmcT80XSKesf918zIkauekWiaJE02+ljNtbM7MUAE+XLsKJy -NFGzpyZJ9LihGC/eeVl7K+xqC41jGVOXOOHtbDMbIQfaEZd1nPvy3+V/tublv+am -jPSlj/FW3bLTkjF0OspFjHvJeCeAJdM9kJdYfZoahd6kcejGJc+vjXE= ------END CERTIFICATE----- diff --git a/tests/ssl_certs/ssl_cert.key b/tests/ssl_certs/ssl_cert.key deleted file mode 100644 index b483851d7b..0000000000 --- a/tests/ssl_certs/ssl_cert.key +++ /dev/null @@ -1,39 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIG4wIBAAKCAYEAxyFVeRsWnb1UlCKBks2azM9W9K+J/ZkzdSb6eCxOIxv79M/U -g54CfWqkySSaQejsu0U/gJxkFYRvwQAy5lATrspY2kyiWYiggWXFDWz+i8ETPkL9 -zn59v13sNIpT/IXQj0S3Mr9ZnsUn1qCyEOOIxJxZlyuV/M/XP1DP4tArhEvrex12 -V6MQIK+8fYzEjHG/W7vIIet+wTStIR8ArvVQi0KvPbbGCfrZ+e+gq+UpBLBuAfMz -M95TW+YJ5duMchie2n6LDmOeegA4jMEv2ppeOr8QJJtZuKpXWVbJvLg81yrDjr1r -AwJR/WQrnk8GQWPCyPLneAA4mJbi75LqjLxn0AoJb3kzLfGEMJJEWXspxNg06bLQ -U948hB4L7nKARq6s7KoESjEV+/L4koMPWJoNq6fxOUVw2+S3ITNrDctecRQ1j3RG -VPaj5l6bn03C7KV9uRrfqFY3OUjn7A0kDczvRnmre1BZIpe+mfGFB+Uu7JiQoBv6 -I6fqyrdH9rX1LUKlAgMBAAECggGAEogMn0ehFC7xdxO7AUF3HYZSLlVDv0EJo+Zr -utFMuEG7ce4Bdfo3exp4mWt5m5akqUzpevuS6Nm5WLm/AuYC3upf2Hj3RuPLJB+n -dfdlvPXL56huXFAzPaLs/3q8FC0T2rFnZyadnYP1kCjGSYITUVDHmaTpwWxKOM85 -eX8r/ZTfJkb4o3E+Z/xSy1BVXkibqVrRZi63Th2r2wA6nQ2hYERlcJXY2kbpEDR3 -vGeIKLKOmknawwH2uf+vfh+vc1LNE7p9C5w16ex0OcmCo6G1ln7/dcwmXmcS3M0S -Bax5Jzu5ozaJFL9G59o0AUGJoZj9Gj9leeKPZvShsGcA0JmBMQiLIdhgRwj0B83x -HrYXTZ6P5BjJmwrIv4mGdv2bHV20pbWKAATUwo8EVBzylipexhhAtQJ5B6OsPDPS -HTluaEC2niD6lE613uRnzzbjw4SlwkoMLE0aqOhQyWIPS9/8oRjTzQi4otL7Dt69 -oMrVhmSfxUqZhh2R3KMHDcMKt5nBAoHBAOXkDovYOhTMD3ei0WbKpbSB1sJp5t2d -/9gVil4nWLa4ahw7/TsZi3Co+c9gD2UJku1L9JbOy6TVZ2LoXOybLvIJfeAjNdYH -vi/ElG7498fgsSyw6bua/1VEd7VtbtpWJIQt1LdJG1+O3ZbJNTY6tbLbYVuy4FIO -e/484F8kdZ9PtRsn+I0I7kfoYJ2IFoM0UWgwQETOBguBCua43ZnHoxrvyHKABAO+ -Iuvw4RBZKphGVxMCEjvTCB9S/CpGCRAkkQKBwQDdvu3reA/lVdFDN56VNUn0u3vr -zPSoiOjojlHDyWVAWiLB9I0qaE61UMvVgChM8VkmjhHYQEW6Cj0XMZMkCnsfKDQn -TYF16jt/sTteWSTcx0PTeiCGs3yM5wK4B8q9coOlzSqDd39mjDIFiUz4e+44OIcU -+ISc8pGbwxw0W8qRwIUJPTSVoaUZDnupuR/IE48q8CTPT1Gf00sMLWuv3SYuFHKX -djpcMLWVf4HclIY6y3BqNIZ0JaUAOd+OZT2kdtUCgcBLWPwLics/lcJcC9lmP3Ug -PI4PGna4nFiGkkjPo0XIXZkpt9+/xxeUzU1TUsC49PJbJFH+O7kzRV6lZFNQmWxB -mCrRk7jJdbA4J84esStFL7fiVfnFq3+UiuRRapSyqxk82WimyidWopSuHzR5mbSD -8rNuQqqTOnwZUAqaJHEIzi8lv2wPjaXLm7ZO65O1XShxZZ8q7fu9OYZBKMY46N3k -rkKchKjMMT1w53pcyVzUm/leGYewY/J9kc1kbZ/60oECgcEAj/qdzwt4/sa3BncB -wA4GxCJL9zJwFVI4MG/gRUjqNluQP/GDC2sI2A/rGeiJwlPfN/p9ObWZ0I8/VWT6 -DifEA9n96xsXGTIKigHQ85TcK4Iy1whwQCYgk/iXOljM2i+VrT1HAm+/yBz1icS5 -ton5hoWlqAcpTCLwSnvoP1Lud67ScspL73Aym89cmjo6mZWhmxasP/NXo3f1PaXs -SxdD6B2cvh2lDSEPdk+BSXEiquBXUI5kUtvyg/AP6Qxxdu01AoHAO05qTh9zokkT -yg0sZf4Z5i01em2ys4ZhQjhhbw+I5lIO76e/ZyUWpEZusBVd9TV5BHgiATOHw4yr -nbjEZKwLEb3SXoHl3/CD/l9vWk4gKAYDJdW+oPZttDlkp6dfPJVDupQwLhrxXYmE -fgs4WFmY3Q5b1wut2pnSs1UEPDqJBvykt59gFgn7yVwyTy8VLihNVtH4mwVPYXha -jz2T6BzRAPlYqx/FpkK2YHHNcyj+HFtnBUMMzacnSl/aXpJgHTKw ------END RSA PRIVATE KEY----- diff --git a/tests/ssl_certs/ssl_cert_2.crt b/tests/ssl_certs/ssl_cert_2.crt deleted file mode 100644 index 6d6fb63a53..0000000000 --- a/tests/ssl_certs/ssl_cert_2.crt +++ /dev/null @@ -1,30 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFOTCCA6GgAwIBAgIJAO+bbero+zKtMA0GCSqGSIb3DQEBCwUAMIGAMQswCQYD -VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxETAPBgNVBAcMCEJyb29rbHluMQww -CgYDVQQKDANOWVUxKTAnBgNVBAsMIENvbXB1dGVyIFNjaWVuY2UgYW5kIEVuZ2lu -ZWVyaW5nMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTgwOTI2MTgwMDAzWhcNMzgw -OTIxMTgwMDAzWjCBgDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMREw -DwYDVQQHDAhCcm9va2x5bjEMMAoGA1UECgwDTllVMSkwJwYDVQQLDCBDb21wdXRl -ciBTY2llbmNlIGFuZCBFbmdpbmVlcmluZzESMBAGA1UEAwwJbG9jYWxob3N0MIIB -ojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAxyFVeRsWnb1UlCKBks2azM9W -9K+J/ZkzdSb6eCxOIxv79M/Ug54CfWqkySSaQejsu0U/gJxkFYRvwQAy5lATrspY -2kyiWYiggWXFDWz+i8ETPkL9zn59v13sNIpT/IXQj0S3Mr9ZnsUn1qCyEOOIxJxZ -lyuV/M/XP1DP4tArhEvrex12V6MQIK+8fYzEjHG/W7vIIet+wTStIR8ArvVQi0Kv -PbbGCfrZ+e+gq+UpBLBuAfMzM95TW+YJ5duMchie2n6LDmOeegA4jMEv2ppeOr8Q -JJtZuKpXWVbJvLg81yrDjr1rAwJR/WQrnk8GQWPCyPLneAA4mJbi75LqjLxn0AoJ -b3kzLfGEMJJEWXspxNg06bLQU948hB4L7nKARq6s7KoESjEV+/L4koMPWJoNq6fx -OUVw2+S3ITNrDctecRQ1j3RGVPaj5l6bn03C7KV9uRrfqFY3OUjn7A0kDczvRnmr -e1BZIpe+mfGFB+Uu7JiQoBv6I6fqyrdH9rX1LUKlAgMBAAGjgbMwgbAwgZ8GA1Ud -IwSBlzCBlKGBhqSBgzCBgDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3Jr -MREwDwYDVQQHDAhCcm9va2x5bjEMMAoGA1UECgwDTllVMSkwJwYDVQQLDCBDb21w -dXRlciBTY2llbmNlIGFuZCBFbmdpbmVlcmluZzESMBAGA1UEAwwJbG9jYWxob3N0 -ggkA75tt6uj7Mq0wDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAYEAFWcl -1tAmt/3DJDjk0ppF62jbwcEOu1N9Nono9a70ojAQYYuMC7Ditw6rLbeXS8tP8ae/ -drlci3VxlE5PpmAjuP67Uv2CuGu/2iMqa99AWZ4mVN+x4YL6awvYs8ea6I1Xe8tQ -5+RqvNA+QtnjtfOeb6yWQBAGrc2eTX87IzqvV/EewkdKAs4GZUWG1Zjv3effqjTO -qRX94ltW1GWud7fVcqpZLOaK9U+4IaI2nNHuCtWODoyQmMoVApXyig/YQqFe0eyj -76m1T+2SZLRtn0xn1fTHuLZ2bdtTMZ7k5PTAKnBNEn1Rr9MAS+WEASN1ZyoQ3reL -VYrgkMTrrXPO8bdDTvP7z1Jzv5Cq9WMHFvOLfnj/vN9ZPH6w4QT3Zb97SAAOSPK/ -gzOzRtIe+hqCYBh/cwMoeeoAzes/nJgorj3IOTu8JXmtZrZGrdLIhu2Q8U+yKasf -+TUrr6xdcJI/fyVM5BVelpGhqHzzOQe1tO4VYQlAVaaVvFidDPHqTI2/S272 ------END CERTIFICATE----- diff --git a/tests/ssl_certs/ssl_cert_expired.crt b/tests/ssl_certs/ssl_cert_expired.crt deleted file mode 100644 index f0b79cb95a..0000000000 --- a/tests/ssl_certs/ssl_cert_expired.crt +++ /dev/null @@ -1,30 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFOTCCA6GgAwIBAgIJALtyUsChEIJpMA0GCSqGSIb3DQEBCwUAMIGAMQswCQYD -VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxETAPBgNVBAcMCEJyb29rbHluMQww -CgYDVQQKDANOWVUxKTAnBgNVBAsMIENvbXB1dGVyIFNjaWVuY2UgYW5kIEVuZ2lu -ZWVyaW5nMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTgwOTI2MTc0NTM2WhcNMTgw -OTI1MTc0NTM2WjCBgDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMREw -DwYDVQQHDAhCcm9va2x5bjEMMAoGA1UECgwDTllVMSkwJwYDVQQLDCBDb21wdXRl -ciBTY2llbmNlIGFuZCBFbmdpbmVlcmluZzESMBAGA1UEAwwJbG9jYWxob3N0MIIB -ojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAxyFVeRsWnb1UlCKBks2azM9W -9K+J/ZkzdSb6eCxOIxv79M/Ug54CfWqkySSaQejsu0U/gJxkFYRvwQAy5lATrspY -2kyiWYiggWXFDWz+i8ETPkL9zn59v13sNIpT/IXQj0S3Mr9ZnsUn1qCyEOOIxJxZ -lyuV/M/XP1DP4tArhEvrex12V6MQIK+8fYzEjHG/W7vIIet+wTStIR8ArvVQi0Kv -PbbGCfrZ+e+gq+UpBLBuAfMzM95TW+YJ5duMchie2n6LDmOeegA4jMEv2ppeOr8Q -JJtZuKpXWVbJvLg81yrDjr1rAwJR/WQrnk8GQWPCyPLneAA4mJbi75LqjLxn0AoJ -b3kzLfGEMJJEWXspxNg06bLQU948hB4L7nKARq6s7KoESjEV+/L4koMPWJoNq6fx -OUVw2+S3ITNrDctecRQ1j3RGVPaj5l6bn03C7KV9uRrfqFY3OUjn7A0kDczvRnmr -e1BZIpe+mfGFB+Uu7JiQoBv6I6fqyrdH9rX1LUKlAgMBAAGjgbMwgbAwgZ8GA1Ud -IwSBlzCBlKGBhqSBgzCBgDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3Jr -MREwDwYDVQQHDAhCcm9va2x5bjEMMAoGA1UECgwDTllVMSkwJwYDVQQLDCBDb21w -dXRlciBTY2llbmNlIGFuZCBFbmdpbmVlcmluZzESMBAGA1UEAwwJbG9jYWxob3N0 -ggkAu3JSwKEQgmkwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAYEAW4I1 -TacdFv3L9ENFkSLciPb7zFMckLUZfk/P+4VjdapWrfuydO4W/ogMxA4DK09thTsK -N/BgcExyKjDldGUfUv57Tqv3v2E5kbygNcNtP53fwMz3y+7QourzkDE5HWciw1Lb -hmbnCBTzt/UioSBdJnAH29GWpSS+Jzu745sRaI48AS/J5ApH2aVEnNQTCE7v1LNH -2bTTPYl3eDXiD8yOhvyiW1F4y2BSFbQRH/3aE6Goe4A75m8sX50+JlOgjyyQnAMf -vbfvZsjGfqdXv9Qpci50qKCFxHJLXXNAUbX3fDgKE+RoZUNZnmn2VDgJYnToz6on -RcVnppV09kmSjHXZBT04XXUA0vG3p+oU0TO4puJlePVf4Oz23/DRCPHSfVWgMeB2 -c1PpKit4+Bz7mypnsWVw8kk//l0GJ1cHnkkZElKJtPEB7I587jgTCDcN811TGNBc -rLLd/JwtYAvi1CPFt2ICGDvA4AKLY3rBNg5z1DrSE/iom1NTC00SFZJztYiX ------END CERTIFICATE----- diff --git a/tests/ssl_certs/ssl_cert_wronghost.crt b/tests/ssl_certs/ssl_cert_wronghost.crt deleted file mode 100644 index df7bfa37a6..0000000000 --- a/tests/ssl_certs/ssl_cert_wronghost.crt +++ /dev/null @@ -1,31 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFRTCCA62gAwIBAgIJAKY6b706lpuDMA0GCSqGSIb3DQEBCwUAMIGEMQswCQYD -VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxETAPBgNVBAcMCEJyb29rbHluMQww -CgYDVQQKDANOWVUxKTAnBgNVBAsMIENvbXB1dGVyIFNjaWVuY2UgYW5kIEVuZ2lu -ZWVyaW5nMRYwFAYDVQQDDA1ub3RteWhvc3RuYW1lMB4XDTE4MDkxMjE2NTkxN1oX -DTM4MDkwNzE2NTkxN1owgYQxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9y -azERMA8GA1UEBwwIQnJvb2tseW4xDDAKBgNVBAoMA05ZVTEpMCcGA1UECwwgQ29t -cHV0ZXIgU2NpZW5jZSBhbmQgRW5naW5lZXJpbmcxFjAUBgNVBAMMDW5vdG15aG9z -dG5hbWUwggGiMA0GCSqGSIb3DQEBAQUAA4IBjwAwggGKAoIBgQDHIVV5GxadvVSU -IoGSzZrMz1b0r4n9mTN1Jvp4LE4jG/v0z9SDngJ9aqTJJJpB6Oy7RT+AnGQVhG/B -ADLmUBOuyljaTKJZiKCBZcUNbP6LwRM+Qv3Ofn2/Xew0ilP8hdCPRLcyv1mexSfW -oLIQ44jEnFmXK5X8z9c/UM/i0CuES+t7HXZXoxAgr7x9jMSMcb9bu8gh637BNK0h -HwCu9VCLQq89tsYJ+tn576Cr5SkEsG4B8zMz3lNb5gnl24xyGJ7afosOY556ADiM -wS/aml46vxAkm1m4qldZVsm8uDzXKsOOvWsDAlH9ZCueTwZBY8LI8ud4ADiYluLv -kuqMvGfQCglveTMt8YQwkkRZeynE2DTpstBT3jyEHgvucoBGrqzsqgRKMRX78viS -gw9Ymg2rp/E5RXDb5LchM2sNy15xFDWPdEZU9qPmXpufTcLspX25Gt+oVjc5SOfs -DSQNzO9Geat7UFkil76Z8YUH5S7smJCgG/ojp+rKt0f2tfUtQqUCAwEAAaOBtzCB -tDCBowYDVR0jBIGbMIGYoYGKpIGHMIGEMQswCQYDVQQGEwJVUzERMA8GA1UECAwI -TmV3IFlvcmsxETAPBgNVBAcMCEJyb29rbHluMQwwCgYDVQQKDANOWVUxKTAnBgNV -BAsMIENvbXB1dGVyIFNjaWVuY2UgYW5kIEVuZ2luZWVyaW5nMRYwFAYDVQQDDA1u -b3RteWhvc3RuYW1lggkApjpvvTqWm4MwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B -AQsFAAOCAYEAvpBMce3kxwo9W0o4RqezkSxnNyax0ezbUNodIkx5kbzX09qQLqhK -SkhQY3CNmtrpsczUg1W2nldxioEouwfTlhi15H98E/8XytpGaHO7Rnbtq8nkOp3E -N1+DMfFR95OynbHSd7bfK9UEmH1CmCnttvCuQkLTxDCpEsQNAxvmU/yDONoDr+cu -jGo80XTnYTqHl5/UtGbCS4SAIdWgrXTIqVvY/eF+mR+3nQEYjBuqW0cNfXLyYLXH -XMc6qtfGX1P+NRWtlrWgGQmc0fry+GczRHMJuKtJMV2xZzPJAJqwwvj3Fjz8HNGu -ZX3kVdbkDjf8is2cWgyZqDecqPHDBW4Ey539s/5eurgOkEvhriS4/9RnVhgdzduj -nRdXkD10ficrFcBQO0KaTWT+iFBc9duuYPuLRyRTye5p3t0liOikH2XrRXs4IBfz -2mT4npXQl1liNixcCf/yUEUOSQAJDG6aRjDjD4SZBUPDLjfqKLid8M0BpLQrks9L -5hAg1WZXorY6 ------END CERTIFICATE----- diff --git a/tests/test_arbitrary_package_attack_old.py b/tests/test_arbitrary_package_attack_old.py deleted file mode 100755 index 0791751220..0000000000 --- a/tests/test_arbitrary_package_attack_old.py +++ /dev/null @@ -1,287 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_arbitrary_package_attack_old.py - - - Konstantin Andrianov. - - - February 22, 2012. - - March 21, 2014. - Refactored to use the 'unittest' module (test conditions in code, rather - than verifying text output), use pre-generated repository files, and - discontinue use of the old repository tools. -vladimir.v.diaz - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Simulate an arbitrary package attack, where an updater client attempts to - download a malicious file. TUF and non-TUF client scenarios are tested. - - There is no difference between 'updates' and 'target' files. -""" - -import os -import tempfile -import shutil -import json -import logging -import unittest -import sys -from urllib import request - -import tuf -import tuf.formats -import tuf.roledb -import tuf.keydb -import tuf.log -import tuf.client.updater as updater -import tuf.unittest_toolbox as unittest_toolbox - -from tests import utils - -import securesystemslib - -logger = logging.getLogger(__name__) - - -class TestArbitraryPackageAttack(unittest_toolbox.Modified_TestCase): - - @classmethod - def setUpClass(cls): - # Create a temporary directory to store the repository, metadata, and target - # files. 'temporary_directory' must be deleted in TearDownModule() so that - # temporary files are always removed, even when exceptions occur. - cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) - - # Launch a SimpleHTTPServer (serves files in the current directory). - # Test cases will request metadata and target files that have been - # pre-generated in 'tuf/tests/repository_data', which will be served by the - # SimpleHTTPServer launched here. The test cases of this unit test assume - # the pre-generated metadata files have a specific structure, such - # as a delegated role 'targets/role1', three target files, five key files, - # etc. - cls.server_process_handler = utils.TestServerProcess(log=logger) - - - - @classmethod - def tearDownClass(cls): - # Cleans the resources and flush the logged lines (if any). - cls.server_process_handler.clean() - - # Remove the temporary repository directory, which should contain all the - # metadata, targets, and key files generated of all the test cases. - shutil.rmtree(cls.temporary_directory) - - - - - def setUp(self): - # We are inheriting from custom class. - unittest_toolbox.Modified_TestCase.setUp(self) - - self.repository_name = 'test_repository1' - - # Copy the original repository files provided in the test folder so that - # any modifications made to repository files are restricted to the copies. - # The 'repository_data' directory is expected to exist in 'tuf/tests/'. - original_repository_files = os.path.join(os.getcwd(), 'repository_data') - temporary_repository_root = \ - self.make_temp_directory(directory=self.temporary_directory) - - # The original repository, keystore, and client directories will be copied - # for each test case. - original_repository = os.path.join(original_repository_files, 'repository') - original_client = os.path.join(original_repository_files, 'client') - - # Save references to the often-needed client repository directories. - # Test cases need these references to access metadata and target files. - self.repository_directory = \ - os.path.join(temporary_repository_root, 'repository') - self.client_directory = os.path.join(temporary_repository_root, 'client') - - # Copy the original 'repository', 'client', and 'keystore' directories - # to the temporary repository the test cases can use. - shutil.copytree(original_repository, self.repository_directory) - shutil.copytree(original_client, self.client_directory) - - # Set the url prefix required by the 'tuf/client/updater.py' updater. - # 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'. - repository_basepath = self.repository_directory[len(os.getcwd()):] - url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + repository_basepath - - # Setting 'tuf.settings.repository_directory' with the temporary client - # directory copied from the original repository files. - tuf.settings.repositories_directory = self.client_directory - self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, - 'metadata_path': 'metadata', - 'targets_path': 'targets'}} - - # Create the repository instance. The test cases will use this client - # updater to refresh metadata, fetch target files, etc. - self.repository_updater = updater.Updater(self.repository_name, - self.repository_mirrors) - - - - def tearDown(self): - # updater.Updater() populates the roledb with the name "test_repository1" - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - # Logs stdout and stderr from the sever subprocess. - self.server_process_handler.flush_log() - - # Remove temporary directory - unittest_toolbox.Modified_TestCase.tearDown(self) - - - - def test_without_tuf(self): - # Verify that a target file replaced with a malicious version is downloaded - # by a non-TUF client (i.e., a non-TUF client that does not verify hashes, - # detect mix-and-mix attacks, etc.) A tuf client, on the other hand, should - # detect that the downloaded target file is invalid. - - # Test: Download a valid target file from the repository. - # Ensure the target file to be downloaded has not already been downloaded, - # and generate its file size and digest. The file size and digest is needed - # to check that the malicious file was indeed downloaded. - target_path = os.path.join(self.repository_directory, 'targets', 'file1.txt') - client_target_path = os.path.join(self.client_directory, 'file1.txt') - self.assertFalse(os.path.exists(client_target_path)) - length, hashes = securesystemslib.util.get_file_details(target_path) - fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - url_prefix = self.repository_mirrors['mirror1']['url_prefix'] - url_file = os.path.join(url_prefix, 'targets', 'file1.txt') - - # On Windows, the URL portion should not contain back slashes. - request.urlretrieve(url_file.replace('\\', '/'), client_target_path) - - self.assertTrue(os.path.exists(client_target_path)) - length, hashes = securesystemslib.util.get_file_details(client_target_path) - download_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - self.assertEqual(fileinfo, download_fileinfo) - - # Test: Download a target file that has been modified by an attacker. - with open(target_path, 'wt') as file_object: - file_object.write('add malicious content.') - length, hashes = securesystemslib.util.get_file_details(target_path) - malicious_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - # On Windows, the URL portion should not contain back slashes. - request.urlretrieve(url_file.replace('\\', '/'), client_target_path) - - length, hashes = securesystemslib.util.get_file_details(client_target_path) - download_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - # Verify 'download_fileinfo' is unequal to the original trusted version. - self.assertNotEqual(download_fileinfo, fileinfo) - - # Verify 'download_fileinfo' is equal to the malicious version. - self.assertEqual(download_fileinfo, malicious_fileinfo) - - - - def test_with_tuf(self): - # Verify that a target file (on the remote repository) modified by an - # attacker is not downloaded by the TUF client. - # First test that the valid target file is successfully downloaded. - file1_fileinfo = self.repository_updater.get_one_valid_targetinfo('file1.txt') - destination = os.path.join(self.client_directory) - self.repository_updater.download_target(file1_fileinfo, destination) - client_target_path = os.path.join(destination, 'file1.txt') - self.assertTrue(os.path.exists(client_target_path)) - - # Modify 'file1.txt' and confirm that the TUF client rejects it. - target_path = os.path.join(self.repository_directory, 'targets', 'file1.txt') - with open(target_path, 'wt') as file_object: - file_object.write('malicious content, size 33 bytes.') - - try: - self.repository_updater.download_target(file1_fileinfo, destination) - - except tuf.exceptions.NoWorkingMirrorError as exception: - url_prefix = self.repository_mirrors['mirror1']['url_prefix'] - url_file = os.path.join(url_prefix, 'targets', 'file1.txt') - - # Verify that only one exception is raised for 'url_file'. - self.assertTrue(len(exception.mirror_errors), 1) - - # Verify that the expected 'tuf.exceptions.DownloadLengthMismatchError' exception - # is raised for 'url_file'. - self.assertTrue(url_file.replace('\\', '/') in exception.mirror_errors) - self.assertTrue( - isinstance(exception.mirror_errors[url_file.replace('\\', '/')], - securesystemslib.exceptions.BadHashError)) - - else: - self.fail('TUF did not prevent an arbitrary package attack.') - - - def test_with_tuf_and_metadata_tampering(self): - # Test that a TUF client does not download a malicious target file, and a - # 'targets.json' metadata file that has also been modified by the attacker. - # The attacker does not attach a valid signature to 'targets.json' - - # An attacker modifies 'file1.txt'. - target_path = os.path.join(self.repository_directory, 'targets', 'file1.txt') - with open(target_path, 'wt') as file_object: - file_object.write('malicious content, size 33 bytes.') - - # An attacker also tries to add the malicious target's length and digest - # to its metadata file. - length, hashes = securesystemslib.util.get_file_details(target_path) - - metadata_path = \ - os.path.join(self.repository_directory, 'metadata', 'targets.json') - - metadata = securesystemslib.util.load_json_file(metadata_path) - metadata['signed']['targets']['file1.txt']['hashes'] = hashes - metadata['signed']['targets']['file1.txt']['length'] = length - - tuf.formats.check_signable_object_format(metadata) - - with open(metadata_path, 'wb') as file_object: - file_object.write(json.dumps(metadata, indent=1, - separators=(',', ': '), sort_keys=True).encode('utf-8')) - - # Verify that the malicious 'targets.json' is not downloaded. Perform - # a refresh of top-level metadata to demonstrate that the malicious - # 'targets.json' is not downloaded. - try: - self.repository_updater.refresh() - file1_fileinfo = self.repository_updater.get_one_valid_targetinfo('file1.txt') - destination = os.path.join(self.client_directory) - self.repository_updater.download_target(file1_fileinfo, destination) - - except tuf.exceptions.NoWorkingMirrorError as exception: - url_prefix = self.repository_mirrors['mirror1']['url_prefix'] - url_file = os.path.join(url_prefix, 'targets', 'file1.txt') - - # Verify that an exception raised for only the malicious 'url_file'. - self.assertTrue(len(exception.mirror_errors), 1) - - # Verify that the specific and expected mirror exception is raised. - self.assertTrue(url_file.replace('\\', '/') in exception.mirror_errors) - self.assertTrue( - isinstance(exception.mirror_errors[url_file.replace('\\', '/')], - securesystemslib.exceptions.BadHashError)) - - else: - self.fail('TUF did not prevent an arbitrary package attack.') - - -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_developer_tool_old.py b/tests/test_developer_tool_old.py deleted file mode 100755 index bec0d62e8f..0000000000 --- a/tests/test_developer_tool_old.py +++ /dev/null @@ -1,428 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_developer_tool_old.py. - - - Santiago Torres Arias - Zane Fisher - - - January 22, 2014. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Unit test for the 'developer_tool.py' module. -""" - -import os -import unittest -import logging -import tempfile -import shutil -import sys - -import tuf -import tuf.log -import tuf.roledb -import tuf.keydb -import tuf.developer_tool as developer_tool -import tuf.exceptions - -import securesystemslib -import securesystemslib.exceptions - -from tuf.developer_tool import METADATA_DIRECTORY_NAME -from tuf.developer_tool import TARGETS_DIRECTORY_NAME - -from tests import utils - -logger = logging.getLogger(__name__) - -developer_tool.disable_console_log_messages() - -class TestProject(unittest.TestCase): - - tmp_dir = None - - @classmethod - def setUpClass(cls): - cls.tmp_dir = tempfile.mkdtemp(dir = os.getcwd()) - - - @classmethod - def tearDownClass(cls): - shutil.rmtree(cls.tmp_dir) - - - def tearDown(self): - # called after every test case - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - - def test_create_new_project(self): - # Test cases for the create_new_project function. In this test we will - # check input, correct file creation and format. We also check - # that a proper object is generated. We will use the normal layout for this - # test suite. - - # Create a local subfolder for this test. - local_tmp = tempfile.mkdtemp(dir = self.tmp_dir) - - # These are the usual values we will be throwing to the function, however - # we will swap these for nulls or malformed values every now and then to - # test input. - project_name = 'test_suite' - metadata_directory = local_tmp - location_in_repository = '/prefix' - targets_directory = None - key = None - - # Create a blank project. - project = developer_tool.create_new_project(project_name, metadata_directory, - location_in_repository) - - self.assertTrue(isinstance(project, developer_tool.Project)) - self.assertTrue(project.layout_type == 'repo-like') - self.assertTrue(project.prefix == location_in_repository) - self.assertTrue(project.project_name == project_name) - self.assertTrue(project.metadata_directory == - os.path.join(metadata_directory,METADATA_DIRECTORY_NAME)) - self.assertTrue(project.targets_directory == - os.path.join(metadata_directory,TARGETS_DIRECTORY_NAME)) - - # Create a blank project without a prefix. - project = developer_tool.create_new_project(project_name, metadata_directory) - self.assertTrue(isinstance(project, developer_tool.Project)) - self.assertTrue(project.layout_type == 'repo-like') - self.assertTrue(project.prefix == '') - self.assertTrue(project.project_name == project_name) - self.assertTrue(project.metadata_directory == - os.path.join(metadata_directory,METADATA_DIRECTORY_NAME)) - self.assertTrue(project.targets_directory == - os.path.join(metadata_directory,TARGETS_DIRECTORY_NAME)) - - # Create a blank project without a valid metadata directory. - self.assertRaises(securesystemslib.exceptions.FormatError, developer_tool.create_new_project, - 0, metadata_directory, location_in_repository) - self.assertRaises(securesystemslib.exceptions.FormatError, developer_tool.create_new_project, - project_name, 0, location_in_repository) - self.assertRaises(securesystemslib.exceptions.FormatError, developer_tool.create_new_project, - project_name, metadata_directory, 0) - - - # Create a new project with a flat layout. - targets_directory = tempfile.mkdtemp(dir = local_tmp) - metadata_directory = tempfile.mkdtemp(dir = local_tmp) - project = developer_tool.create_new_project(project_name, metadata_directory, - location_in_repository, targets_directory) - self.assertTrue(isinstance(project, developer_tool.Project)) - self.assertTrue(project.layout_type == 'flat') - self.assertTrue(project.prefix == location_in_repository) - self.assertTrue(project.project_name == project_name) - self.assertTrue(project.metadata_directory == metadata_directory) - self.assertTrue(project.targets_directory == targets_directory) - - # Finally, check that if targets_directory is set, it is valid. - self.assertRaises(securesystemslib.exceptions.FormatError, developer_tool.create_new_project, - project_name, metadata_directory, location_in_repository, 0) - - # Copy a key to our workspace and create a new project with it. - keystore_path = os.path.join('repository_data','keystore') - - # I will use the same key as the one provided in the repository - # tool tests for the root role, but this is not a root role... - root_key_path = os.path.join(keystore_path,'root_key.pub') - project_key = developer_tool.import_rsa_publickey_from_file(root_key_path) - - # Test create new project with a key added by default. - project = developer_tool.create_new_project(project_name, metadata_directory, - location_in_repository, targets_directory, project_key) - - self.assertTrue(isinstance(project, developer_tool.Project)) - self.assertTrue(project.layout_type == 'flat') - self.assertTrue(project.prefix == location_in_repository) - self.assertTrue(project.project_name == project_name) - self.assertTrue(project.metadata_directory == metadata_directory) - self.assertTrue(project.targets_directory == targets_directory) - self.assertTrue(len(project.keys) == 1) - self.assertTrue(project.keys[0] == project_key['keyid']) - - # Try to write to an invalid location. The OSError should be re-raised by - # create_new_project(). - shutil.rmtree(targets_directory) - tuf.roledb.clear_roledb() - tuf.keydb.clear_keydb() - - metadata_directory = '/' - valid_metadata_directory_name = developer_tool.METADATA_DIRECTORY_NAME - developer_tool.METADATA_DIRECTORY_NAME = '/' - - try: - developer_tool.create_new_project(project_name, metadata_directory, - location_in_repository, targets_directory, project_key) - - except (OSError, tuf.exceptions.RepositoryError): - pass - - developer_tool.METADATA_DIRECTORY_NAME = valid_metadata_directory_name - - - - def test_load_project(self): - # This test case will first try to load an existing project and test for - # verify the loaded object. It will next try to load a nonexisting project - # and expect a correct error handler. Finally, it will try to overwrite the - # existing prefix of the loaded project. - - # Create a local subfolder for this test. - local_tmp = tempfile.mkdtemp(dir = self.tmp_dir) - - # Test non-existent project filepath. - nonexistent_path = os.path.join(local_tmp, 'nonexistent') - self.assertRaises(securesystemslib.exceptions.StorageError, - developer_tool.load_project, nonexistent_path) - - # Copy the pregenerated metadata. - project_data_filepath = os.path.join('repository_data', 'project') - target_project_data_filepath = os.path.join(local_tmp, 'project') - shutil.copytree('repository_data/project', target_project_data_filepath) - - # Properly load a project. - repo_filepath = os.path.join(local_tmp, 'project', 'test-flat') - new_targets_path = os.path.join(local_tmp, 'project', 'targets') - project = developer_tool.load_project(repo_filepath, - new_targets_location = new_targets_path) - self.assertTrue(project._targets_directory == new_targets_path) - self.assertTrue(project.layout_type == 'flat') - - # Load a project overwriting the prefix. - project = developer_tool.load_project(repo_filepath, prefix='new') - self.assertTrue(project.prefix == 'new') - - # Load a project with a file missing. - file_to_corrupt = os.path.join(repo_filepath, 'test-flat.json') - with open(file_to_corrupt, 'wt') as fp: - fp.write('this is not a json file') - - self.assertRaises(securesystemslib.exceptions.Error, developer_tool.load_project, repo_filepath) - - - - - def test_add_verification_keys(self): - # Create a new project instance. - project = developer_tool.Project('test_verification_keys', 'somepath', - 'someotherpath', 'prefix') - - # Add invalid verification key. - self.assertRaises(securesystemslib.exceptions.FormatError, project.add_verification_key, 'invalid') - - # Add verification key. - # - load it first - keystore_path = os.path.join('repository_data', 'keystore') - first_verification_key_path = os.path.join(keystore_path,'root_key.pub') - first_verification_key = \ - developer_tool.import_rsa_publickey_from_file(first_verification_key_path) - - project.add_verification_key(first_verification_key) - - - # Add another verification key (should expect exception.) - second_verification_key_path = os.path.join(keystore_path, 'snapshot_key.pub') - second_verification_key = \ - developer_tool.import_ed25519_publickey_from_file(second_verification_key_path) - - self.assertRaises(securesystemslib.exceptions.Error, - project.add_verification_key,(second_verification_key)) - - - - # Add a verification key for the delegation. - project.delegate('somedelegation', [], []) - project('somedelegation').add_verification_key(first_verification_key) - project('somedelegation').add_verification_key(second_verification_key) - - - # Add another delegation of the delegation. - project('somedelegation').delegate('somesubdelegation', [], []) - project('somesubdelegation').add_verification_key(first_verification_key) - project('somesubdelegation').add_verification_key(second_verification_key) - - - def test_write(self): - - # Create tmp directory. - local_tmp = tempfile.mkdtemp(dir=self.tmp_dir) - - # Create new project inside tmp directory. - project = developer_tool.create_new_project('new_project', local_tmp, - 'prefix'); - - # Create some target files inside the tmp directory. - target_filepath = os.path.join(local_tmp, 'targets', 'test_target') - with open(target_filepath, 'wt') as fp: - fp.write('testing file') - - - # Add the targets. - project.add_target(os.path.basename(target_filepath)) - - # Add verification keys. - keystore_path = os.path.join('repository_data', 'keystore') - project_key_path = os.path.join(keystore_path, 'root_key.pub') - project_key = \ - developer_tool.import_rsa_publickey_from_file(project_key_path) - - - # Call status (for the sake of doing it and to improve test coverage by - # executing its statements.) - project.status() - - project.add_verification_key(project_key) - - - # Add another verification key (should expect exception.) - delegation_key_path = os.path.join(keystore_path, 'snapshot_key.pub') - delegation_key = \ - developer_tool.import_ed25519_publickey_from_file(delegation_key_path) - - # Add a subdelegation. - subdelegation_key_path = os.path.join(keystore_path, 'timestamp_key.pub') - subdelegation_key = \ - developer_tool.import_ed25519_publickey_from_file(subdelegation_key_path) - - # Add a delegation. - project.delegate('delegation', [delegation_key], []) - project('delegation').delegate('subdelegation', [subdelegation_key], []) - - # call write (except) - self.assertRaises(securesystemslib.exceptions.Error, project.write, ()) - - # Call status (for the sake of doing it and executing its statements.) - project.status() - - # Load private keys. - project_private_key_path = os.path.join(keystore_path, 'root_key') - project_private_key = \ - developer_tool.import_rsa_privatekey_from_file(project_private_key_path, - 'password') - - delegation_private_key_path = os.path.join(keystore_path, 'snapshot_key') - delegation_private_key = \ - developer_tool.import_ed25519_privatekey_from_file(delegation_private_key_path, - 'password') - - subdelegation_private_key_path = \ - os.path.join(keystore_path, 'timestamp_key') - subdelegation_private_key = \ - developer_tool.import_ed25519_privatekey_from_file(subdelegation_private_key_path, - 'password') - - # Test partial write. - # backup everything (again) - # + backup targets. - targets_backup = project.target_files - - # + backup delegations. - delegations_backup = \ - tuf.roledb.get_delegated_rolenames(project.project_name) - - # + backup layout type. - layout_type_backup = project.layout_type - - # + backup keyids. - keys_backup = project.keys - delegation_keys_backup = project('delegation').keys - - # + backup the prefix. - prefix_backup = project.prefix - - # + backup the name. - name_backup = project.project_name - - # Write and reload. - self.assertRaises(securesystemslib.exceptions.Error, project.write) - project.write(write_partial=True) - - project = developer_tool.load_project(local_tmp) - - # Check against backup. - self.assertEqual(list(project.target_files.keys()), list(targets_backup.keys())) - new_delegations = tuf.roledb.get_delegated_rolenames(project.project_name) - self.assertEqual(new_delegations, delegations_backup) - self.assertEqual(project.layout_type, layout_type_backup) - self.assertEqual(project.keys, keys_backup) - - self.assertEqual(project('delegation').keys, delegation_keys_backup) - - self.assertEqual(project.prefix, prefix_backup) - self.assertEqual(project.project_name, name_backup) - - roleinfo = tuf.roledb.get_roleinfo(project.project_name) - - self.assertEqual(roleinfo['partial_loaded'], True) - - - - # Load_signing_keys. - project('delegation').load_signing_key(delegation_private_key) - - project.status() - - project.load_signing_key(project_private_key) - - # Backup everything. - # + backup targets. - targets_backup = project.target_files - - # + backup delegations. - delegations_backup = \ - tuf.roledb.get_delegated_rolenames(project.project_name) - - # + backup layout type. - layout_type_backup = project.layout_type - - # + backup keyids - keys_backup = project.keys - delegation_keys_backup = project('delegation').keys - - # + backup the prefix. - prefix_backup = project.prefix - - # + backup the name. - name_backup = project.project_name - - # Call status (for the sake of doing it.) - project.status() - - # Call write. - project.write() - - # Call load. - project = developer_tool.load_project(local_tmp) - - - # Check against backup. - self.assertEqual(list(project.target_files.keys()), list(targets_backup.keys())) - - new_delegations = tuf.roledb.get_delegated_rolenames(project.project_name) - self.assertEqual(new_delegations, delegations_backup) - self.assertEqual(project.layout_type, layout_type_backup) - self.assertEqual(project.keys, keys_backup) - self.assertEqual(project('delegation').keys, delegation_keys_backup) - self.assertEqual(project.prefix, prefix_backup) - self.assertEqual(project.project_name, name_backup) - - - -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_download_old.py b/tests/test_download_old.py deleted file mode 100755 index 4af22738de..0000000000 --- a/tests/test_download_old.py +++ /dev/null @@ -1,392 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_download_old.py - - - Konstantin Andrianov. - - - March 26, 2012. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Unit test for 'download.py'. - - NOTE: Make sure test_download_old.py is ran in 'tuf/tests/' directory. - Otherwise, module that launches simple server would not be found. - - TODO: Adopt the environment variable management from test_proxy_use.py here. -""" - -import hashlib -import logging -import os -import sys -import unittest -import urllib3 -import warnings - -import tuf -import tuf.download as download -import tuf.requests_fetcher -import tuf.log -import tuf.unittest_toolbox as unittest_toolbox -import tuf.exceptions - -from tests import utils - -import requests.exceptions - -import securesystemslib - -logger = logging.getLogger(__name__) - - -class TestDownload(unittest_toolbox.Modified_TestCase): - def setUp(self): - """ - Create a temporary file and launch a simple server in the - current working directory. - """ - - unittest_toolbox.Modified_TestCase.setUp(self) - - # Making a temporary file. - current_dir = os.getcwd() - target_filepath = self.make_temp_data_file(directory=current_dir) - self.target_fileobj = open(target_filepath, 'r') - self.target_data = self.target_fileobj.read() - self.target_data_length = len(self.target_data) - - # Launch a SimpleHTTPServer (serves files in the current dir). - self.server_process_handler = utils.TestServerProcess(log=logger) - - rel_target_filepath = os.path.basename(target_filepath) - self.url = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + '/' + rel_target_filepath - - # Computing hash of target file data. - m = hashlib.md5() - m.update(self.target_data.encode('utf-8')) - digest = m.hexdigest() - self.target_hash = {'md5':digest} - - # Initialize the default fetcher for the download - self.fetcher = tuf.requests_fetcher.RequestsFetcher() - - - - # Stop server process and perform clean up. - def tearDown(self): - # Cleans the resources and flush the logged lines (if any). - self.server_process_handler.clean() - - self.target_fileobj.close() - - # Remove temp directory - unittest_toolbox.Modified_TestCase.tearDown(self) - - - # Test: Normal case. - def test_download_url_to_tempfileobj(self): - - download_file = download.safe_download - with download_file(self.url, self.target_data_length, self.fetcher) as temp_fileobj: - temp_fileobj.seek(0) - temp_file_data = temp_fileobj.read().decode('utf-8') - self.assertEqual(self.target_data, temp_file_data) - self.assertEqual(self.target_data_length, len(temp_file_data)) - - - # Test: Download url in more than one chunk. - def test_download_url_in_chunks(self): - - # Set smaller chunk size to ensure that the file will be downloaded - # in more than one chunk - default_chunk_size = tuf.settings.CHUNK_SIZE - tuf.settings.CHUNK_SIZE = 4 - # We don't have access to chunks from download_file() - # so we just confirm that the expectation of more than one chunk is - # correct and verify that no errors are raised during download - chunks_count = self.target_data_length/tuf.settings.CHUNK_SIZE - self.assertGreater(chunks_count, 1) - - download_file = download.safe_download - with download_file(self.url, self.target_data_length, self.fetcher) as temp_fileobj: - temp_fileobj.seek(0) - temp_file_data = temp_fileobj.read().decode('utf-8') - self.assertEqual(self.target_data, temp_file_data) - self.assertEqual(self.target_data_length, len(temp_file_data)) - - # Restore default settings - tuf.settings.CHUNK_SIZE = default_chunk_size - - - # Test: Incorrect lengths. - def test_download_url_to_tempfileobj_and_lengths(self): - # We do *not* catch - # 'securesystemslib.exceptions.DownloadLengthMismatchError' in the - # following two calls because the file at 'self.url' contains enough bytes - # to satisfy the smaller number of required bytes requested. - # safe_download() and unsafe_download() will only log a warning when the - # the server-reported length of the file does not match the - # required_length. 'updater.py' *does* verify the hashes of downloaded - # content. - download.safe_download(self.url, self.target_data_length - 4, self.fetcher).close() - download.unsafe_download(self.url, self.target_data_length - 4, self.fetcher).close() - - # We catch 'tuf.exceptions.DownloadLengthMismatchError' for safe_download() - # because it will not download more bytes than requested (in this case, a - # length greater than the size of the target file). - self.assertRaises(tuf.exceptions.DownloadLengthMismatchError, - download.safe_download, self.url, self.target_data_length + 1, self.fetcher) - - # Calling unsafe_download() with a mismatched length should not raise an - # exception. - download.unsafe_download(self.url, self.target_data_length + 1, self.fetcher).close() - - - - def test_download_url_to_tempfileobj_and_performance(self): - - """ - # Measuring performance of 'auto_flush = False' vs. 'auto_flush = True' - # in download._download_file() during write. No change was observed. - star_cpu = time.clock() - star_real = time.time() - - temp_fileobj = download_file(self.url, - self.target_data_length) - - end_cpu = time.clock() - end_real = time.time() - - self.assertEqual(self.target_data, temp_fileobj.read()) - self.assertEqual(self.target_data_length, len(temp_fileobj.read())) - temp_fileobj.close() - - print "Performance cpu time: "+str(end_cpu - star_cpu) - print "Performance real time: "+str(end_real - star_real) - - # TODO: [Not urgent] Show the difference by setting write(auto_flush=False) - """ - - - # Test: Incorrect/Unreachable URLs. - def test_download_url_to_tempfileobj_and_urls(self): - - download_file = download.safe_download - unsafe_download_file = download.unsafe_download - - with self.assertRaises(securesystemslib.exceptions.FormatError): - download_file(None, self.target_data_length, self.fetcher) - - url = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + '/' + self.random_string() - with self.assertRaises(tuf.exceptions.FetcherHTTPError) as cm: - download_file(url, self.target_data_length, self.fetcher) - self.assertEqual(cm.exception.status_code, 404) - - url1 = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port + 1) + '/' + self.random_string() - with self.assertRaises(requests.exceptions.ConnectionError): - download_file(url1, self.target_data_length, self.fetcher) - - # Specify an unsupported URI scheme. - url_with_unsupported_uri = self.url.replace('http', 'file') - self.assertRaises(requests.exceptions.InvalidSchema, download_file, url_with_unsupported_uri, - self.target_data_length, self.fetcher) - self.assertRaises(requests.exceptions.InvalidSchema, unsafe_download_file, - url_with_unsupported_uri, self.target_data_length, self.fetcher) - - - - - - ''' - # This test uses sites on the internet, requiring a net connection to succeed. - # Since this is the only such test in TUF, I'm not going to enable it... but - # it's here in case it's useful for diagnosis. - def test_https_validation(self): - """ - Use some known URLs on the net to ensure that TUF download checks SSL - certificates appropriately. - """ - # We should never get as far as the target file download itself, so the - # length we pass to safe_download and unsafe_download shouldn't matter. - irrelevant_length = 10 - - for bad_url in [ - 'https://expired.badssl.com/', # expired certificate - 'https://wrong.host.badssl.com/', ]: # hostname verification fail - - with self.assertRaises(requests.exceptions.SSLError): - download.safe_download(bad_url, irrelevant_length) - - with self.assertRaises(requests.exceptions.SSLError): - download.unsafe_download(bad_url, irrelevant_length) - ''' - - - - - def test_https_connection(self): - """ - Try various HTTPS downloads using trusted and untrusted certificates with - and without the correct hostname listed in the SSL certificate. - """ - # Make a temporary file to be served to the client. - current_directory = os.getcwd() - target_filepath = self.make_temp_data_file(directory=current_directory) - - with open(target_filepath, 'r') as target_file_object: - target_data_length = len(target_file_object.read()) - - # These cert files provide various test cases: - # good: A valid cert from an older generation of test_download.py tests. - # good2: A valid cert made simultaneous to the bad certs below, with the - # same settings otherwise, tested here in case the difference - # between the way the new bad certs and the old good cert were - # generated turns out to matter at some point. - # bad: An otherwise-valid cert with the wrong hostname. The good certs - # list "localhost", but this lists "notmyhostname". - # expired: An otherwise-valid cert but which is expired (no valid dates - # exist, fwiw: startdate > enddate). - good_cert_fname = os.path.join('ssl_certs', 'ssl_cert.crt') - good2_cert_fname = os.path.join('ssl_certs', 'ssl_cert_2.crt') - bad_cert_fname = os.path.join('ssl_certs', 'ssl_cert_wronghost.crt') - expired_cert_fname = os.path.join('ssl_certs', 'ssl_cert_expired.crt') - - # Launch four HTTPS servers (serve files in the current dir). - # 1: we expect to operate correctly - # 2: also good; uses a slightly different cert (controls for the cert - # generation method used for the next two, in case it comes to matter) - # 3: run with an HTTPS certificate with an unexpected hostname - # 4: run with an HTTPS certificate that is expired - # Be sure to offset from the port used in setUp to avoid collision. - - - good_https_server_handler = utils.TestServerProcess(log=logger, - server='simple_https_server_old.py', - extra_cmd_args=[good_cert_fname]) - good2_https_server_handler = utils.TestServerProcess(log=logger, - server='simple_https_server_old.py', - extra_cmd_args=[good2_cert_fname]) - bad_https_server_handler = utils.TestServerProcess(log=logger, - server='simple_https_server_old.py', - extra_cmd_args=[bad_cert_fname]) - expd_https_server_handler = utils.TestServerProcess(log=logger, - server='simple_https_server_old.py', - extra_cmd_args=[expired_cert_fname]) - - suffix = '/' + os.path.basename(target_filepath) - good_https_url = 'https://localhost:' \ - + str(good_https_server_handler.port) + suffix - good2_https_url = 'https://localhost:' \ - + str(good2_https_server_handler.port) + suffix - bad_https_url = 'https://localhost:' \ - + str(bad_https_server_handler.port) + suffix - expired_https_url = 'https://localhost:' \ - + str(expd_https_server_handler.port) + suffix - - # Download the target file using an HTTPS connection. - - # Use try-finally solely to ensure that the server processes are killed. - try: - # Trust the certfile that happens to use a different hostname than we - # will expect. - os.environ['REQUESTS_CA_BUNDLE'] = bad_cert_fname - # Clear sessions to ensure that the certificate we just specified is used. - # TODO: Confirm necessity of this session clearing and lay out mechanics. - self.fetcher._sessions = {} - - # Try connecting to the server process with the bad cert while trusting - # the bad cert. Expect failure because even though we trust it, the - # hostname we're connecting to does not match the hostname in the cert. - logger.info('Trying HTTPS download of target file: ' + bad_https_url) - with warnings.catch_warnings(): - # We're ok with a slightly fishy localhost cert - warnings.filterwarnings('ignore', - category=urllib3.exceptions.SubjectAltNameWarning) - - with self.assertRaises(requests.exceptions.SSLError): - download.safe_download(bad_https_url, target_data_length, self.fetcher) - with self.assertRaises(requests.exceptions.SSLError): - download.unsafe_download(bad_https_url, target_data_length, self.fetcher) - - # Try connecting to the server processes with the good certs while not - # trusting the good certs (trusting the bad cert instead). Expect failure - # because even though the server's cert file is otherwise OK, we don't - # trust it. - logger.info('Trying HTTPS download of target file: ' + good_https_url) - with self.assertRaises(requests.exceptions.SSLError): - download.safe_download(good_https_url, target_data_length, self.fetcher) - with self.assertRaises(requests.exceptions.SSLError): - download.unsafe_download(good_https_url, target_data_length, self.fetcher) - - logger.info('Trying HTTPS download of target file: ' + good2_https_url) - with self.assertRaises(requests.exceptions.SSLError): - download.safe_download(good2_https_url, target_data_length, self.fetcher) - with self.assertRaises(requests.exceptions.SSLError): - download.unsafe_download(good2_https_url, target_data_length, self.fetcher) - - - # Configure environment to now trust the certfile that is expired. - os.environ['REQUESTS_CA_BUNDLE'] = expired_cert_fname - # Clear sessions to ensure that the certificate we just specified is used. - # TODO: Confirm necessity of this session clearing and lay out mechanics. - self.fetcher._sessions = {} - - # Try connecting to the server process with the expired cert while - # trusting the expired cert. Expect failure because even though we trust - # it, it is expired. - logger.info('Trying HTTPS download of target file: ' + expired_https_url) - with self.assertRaises(requests.exceptions.SSLError): - download.safe_download(expired_https_url, target_data_length, self.fetcher) - with self.assertRaises(requests.exceptions.SSLError): - download.unsafe_download(expired_https_url, target_data_length, self.fetcher) - - - # Try connecting to the server processes with the good certs while - # trusting the appropriate good certs. Expect success. - # TODO: expand testing to switch expected certificates back and forth a - # bit more while clearing / not clearing sessions. - os.environ['REQUESTS_CA_BUNDLE'] = good_cert_fname - # Clear sessions to ensure that the certificate we just specified is used. - # TODO: Confirm necessity of this session clearing and lay out mechanics. - self.fetcher._sessions = {} - logger.info('Trying HTTPS download of target file: ' + good_https_url) - download.safe_download(good_https_url, target_data_length, self.fetcher).close() - download.unsafe_download(good_https_url, target_data_length,self.fetcher).close() - - os.environ['REQUESTS_CA_BUNDLE'] = good2_cert_fname - # Clear sessions to ensure that the certificate we just specified is used. - # TODO: Confirm necessity of this session clearing and lay out mechanics. - self.fetcher._sessions = {} - logger.info('Trying HTTPS download of target file: ' + good2_https_url) - download.safe_download(good2_https_url, target_data_length, self.fetcher).close() - download.unsafe_download(good2_https_url, target_data_length, self.fetcher).close() - - finally: - for proc_handler in [ - good_https_server_handler, - good2_https_server_handler, - bad_https_server_handler, - expd_https_server_handler]: - - # Cleans the resources and flush the logged lines (if any). - proc_handler.clean() - - - -# Run unit test. -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_endless_data_attack_old.py b/tests/test_endless_data_attack_old.py deleted file mode 100755 index aafed1a26c..0000000000 --- a/tests/test_endless_data_attack_old.py +++ /dev/null @@ -1,272 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_endless_data_attack_old.py - - - Konstantin Andrianov. - - - March 13, 2012. - - April 3, 2014. - Refactored to use the 'unittest' module (test conditions in code, rather - than verifying text output), use pre-generated repository files, and - discontinue use of the old repository tools. Minor edits to the test cases. - -vladimir.v.diaz - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Simulate an endless data attack, where an updater client tries to download a - target file modified by an attacker to contain a large amount of data (a TUF - client should only download up to the file's expected length). TUF and - non-TUF client scenarios are tested. - - There is no difference between 'updates' and 'target' files. -""" - -import os -import tempfile -import shutil -import json -import logging -import unittest -import sys -from urllib import request - -import tuf -import tuf.formats -import tuf.log -import tuf.client.updater as updater -import tuf.unittest_toolbox as unittest_toolbox -import tuf.roledb - -from tests import utils - -import securesystemslib - -logger = logging.getLogger(__name__) - - -class TestEndlessDataAttack(unittest_toolbox.Modified_TestCase): - - @classmethod - def setUpClass(cls): - # Create a temporary directory to store the repository, metadata, and target - # files. 'temporary_directory' must be deleted in TearDownModule() so that - # temporary files are always removed, even when exceptions occur. - cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) - - # Launch a SimpleHTTPServer (serves files in the current directory). - # Test cases will request metadata and target files that have been - # pre-generated in 'tuf/tests/repository_data', which will be served by the - # SimpleHTTPServer launched here. The test cases of this unit test assume - # the pre-generated metadata files have a specific structure, such - # as a delegated role 'targets/role1', three target files, five key files, - # etc. - cls.server_process_handler = utils.TestServerProcess(log=logger) - - - - @classmethod - def tearDownClass(cls): - # Cleans the resources and flush the logged lines (if any). - cls.server_process_handler.clean() - - # Remove the temporary repository directory, which should contain all the - # metadata, targets, and key files generated of all the test cases. - shutil.rmtree(cls.temporary_directory) - - - - - def setUp(self): - # We are inheriting from custom class. - unittest_toolbox.Modified_TestCase.setUp(self) - - self.repository_name = 'test_repository1' - - # Copy the original repository files provided in the test folder so that - # any modifications made to repository files are restricted to the copies. - # The 'repository_data' directory is expected to exist in 'tuf/tests/'. - original_repository_files = os.path.join(os.getcwd(), 'repository_data') - temporary_repository_root = \ - self.make_temp_directory(directory=self.temporary_directory) - - # The original repository, keystore, and client directories will be copied - # for each test case. - original_repository = os.path.join(original_repository_files, 'repository') - original_client = os.path.join(original_repository_files, 'client') - - # Save references to the often-needed client repository directories. - # Test cases need these references to access metadata and target files. - self.repository_directory = \ - os.path.join(temporary_repository_root, 'repository') - self.client_directory = os.path.join(temporary_repository_root, 'client') - - # Copy the original 'repository', 'client', and 'keystore' directories - # to the temporary repository the test cases can use. - shutil.copytree(original_repository, self.repository_directory) - shutil.copytree(original_client, self.client_directory) - - # Set the url prefix required by the 'tuf/client/updater.py' updater. - # 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'. - repository_basepath = self.repository_directory[len(os.getcwd()):] - url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + repository_basepath - - # Setting 'tuf.settings.repository_directory' with the temporary client - # directory copied from the original repository files. - tuf.settings.repositories_directory = self.client_directory - self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, - 'metadata_path': 'metadata', - 'targets_path': 'targets'}} - - # Create the repository instance. The test cases will use this client - # updater to refresh metadata, fetch target files, etc. - self.repository_updater = updater.Updater(self.repository_name, - self.repository_mirrors) - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - # Logs stdout and stderr from the sever subprocess. - self.server_process_handler.flush_log() - - # Remove temporary directory - unittest_toolbox.Modified_TestCase.tearDown(self) - - - def test_without_tuf(self): - # Verify that a target file replaced with a larger malicious version (to - # simulate an endless data attack) is downloaded by a non-TUF client (i.e., - # a non-TUF client that does not verify hashes, detect mix-and-mix attacks, - # etc.) A tuf client, on the other hand, should only download target files - # up to their expected lengths, as explicitly specified in metadata, or - # 'tuf.settings.py' (when retrieving 'timestamp.json' and 'root.json unsafely'.) - - # Test: Download a valid target file from the repository. - # Ensure the target file to be downloaded has not already been downloaded, - # and generate its file size and digest. The file size and digest is needed - # to verify that the malicious file was indeed downloaded. - target_path = os.path.join(self.repository_directory, 'targets', 'file1.txt') - client_target_path = os.path.join(self.client_directory, 'file1.txt') - self.assertFalse(os.path.exists(client_target_path)) - length, hashes = securesystemslib.util.get_file_details(target_path) - fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - url_prefix = self.repository_mirrors['mirror1']['url_prefix'] - url_file = os.path.join(url_prefix, 'targets', 'file1.txt') - - # On Windows, the URL portion should not contain backslashes. - request.urlretrieve(url_file.replace('\\', '/'), client_target_path) - - self.assertTrue(os.path.exists(client_target_path)) - length, hashes = securesystemslib.util.get_file_details(client_target_path) - download_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - self.assertEqual(fileinfo, download_fileinfo) - - # Test: Download a target file that has been modified by an attacker with - # extra data. - with open(target_path, 'a') as file_object: - file_object.write('append large amount of data' * 100000) - large_length, hashes = securesystemslib.util.get_file_details(target_path) - malicious_fileinfo = tuf.formats.make_targets_fileinfo(large_length, hashes) - - # Is the modified file actually larger? - self.assertTrue(large_length > length) - - # On Windows, the URL portion should not contain backslashes. - request.urlretrieve(url_file.replace('\\', '/'), client_target_path) - - length, hashes = securesystemslib.util.get_file_details(client_target_path) - download_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - # Verify 'download_fileinfo' is unequal to the original trusted version. - self.assertNotEqual(download_fileinfo, fileinfo) - - # Verify 'download_fileinfo' is equal to the malicious version. - self.assertEqual(download_fileinfo, malicious_fileinfo) - - - - def test_with_tuf(self): - # Verify that a target file (on the remote repository) modified by an - # attacker, to contain a large amount of extra data, is not downloaded by - # the TUF client. First test that the valid target file is successfully - # downloaded. - file1_fileinfo = self.repository_updater.get_one_valid_targetinfo('file1.txt') - destination = os.path.join(self.client_directory) - self.repository_updater.download_target(file1_fileinfo, destination) - client_target_path = os.path.join(destination, 'file1.txt') - self.assertTrue(os.path.exists(client_target_path)) - - # Verify the client's downloaded file matches the repository's. - target_path = os.path.join(self.repository_directory, 'targets', 'file1.txt') - length, hashes = securesystemslib.util.get_file_details(client_target_path) - fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - length, hashes = securesystemslib.util.get_file_details(client_target_path) - download_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - self.assertEqual(fileinfo, download_fileinfo) - - # Modify 'file1.txt' and confirm that the TUF client only downloads up to - # the expected file length. - with open(target_path, 'a') as file_object: - file_object.write('append large amount of data' * 10000) - - # Is the modified file actually larger? - large_length, hashes = securesystemslib.util.get_file_details(target_path) - self.assertTrue(large_length > length) - - os.remove(client_target_path) - self.repository_updater.download_target(file1_fileinfo, destination) - - # A large amount of data has been appended to the original content. The - # extra data appended should be discarded by the client, so the downloaded - # file size and hash should not have changed. - length, hashes = securesystemslib.util.get_file_details(client_target_path) - download_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - self.assertEqual(fileinfo, download_fileinfo) - - # Test that the TUF client does not download large metadata files, as well. - timestamp_path = os.path.join(self.repository_directory, 'metadata', - 'timestamp.json') - - original_length, hashes = securesystemslib.util.get_file_details(timestamp_path) - - with open(timestamp_path, 'r+') as file_object: - timestamp_content = securesystemslib.util.load_json_file(timestamp_path) - large_data = 'LargeTimestamp' * 10000 - timestamp_content['signed']['_type'] = large_data - json.dump(timestamp_content, file_object, indent=1, sort_keys=True) - - - modified_length, hashes = securesystemslib.util.get_file_details(timestamp_path) - self.assertTrue(modified_length > original_length) - - # Does the TUF client download the upper limit of an unsafely fetched - # 'timestamp.json'? 'timestamp.json' must not be greater than - # 'tuf.settings.DEFAULT_TIMESTAMP_REQUIRED_LENGTH'. - try: - self.repository_updater.refresh() - - except tuf.exceptions.NoWorkingMirrorError as exception: - for mirror_url, mirror_error in exception.mirror_errors.items(): - self.assertTrue(isinstance(mirror_error, securesystemslib.exceptions.Error)) - - else: - self.fail('TUF did not prevent an endless data attack.') - - -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_extraneous_dependencies_attack_old.py b/tests/test_extraneous_dependencies_attack_old.py deleted file mode 100755 index f086e7e86f..0000000000 --- a/tests/test_extraneous_dependencies_attack_old.py +++ /dev/null @@ -1,214 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2013 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_extraneous_dependencies_attack_old.py - - - Zane Fisher. - - - August 19, 2013. - - April 6, 2014. - Refactored to use the 'unittest' module (test conditions in code, rather - than verifying text output), use pre-generated repository files, and - discontinue use of the old repository tools. Modify the previous scenario - simulated for the mix-and-match attack. The metadata that specified the - dependencies of a project modified (previously a text file.) - -vladimir.v.diaz - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Simulate an extraneous dependencies attack. The client attempts to download - a file, which lists all the target dependencies, with one legitimate - dependency, and one extraneous dependency. A client should not download a - target dependency even if it is found on the repository. Valid targets are - listed and verified by TUF metadata, such as 'targets.txt'. - - There is no difference between 'updates' and 'target' files. -""" - -import os -import tempfile -import shutil -import json -import logging -import unittest -import sys - -import tuf.formats -import tuf.log -import tuf.client.updater as updater -import tuf.roledb -import tuf.keydb -import tuf.unittest_toolbox as unittest_toolbox - -from tests import utils - -import securesystemslib - -logger = logging.getLogger(__name__) - - - -class TestExtraneousDependenciesAttack(unittest_toolbox.Modified_TestCase): - - @classmethod - def setUpClass(cls): - # Create a temporary directory to store the repository, metadata, and target - # files. 'temporary_directory' must be deleted in TearDownModule() so that - # temporary files are always removed, even when exceptions occur. - cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) - - # Launch a SimpleHTTPServer (serves files in the current directory). - # Test cases will request metadata and target files that have been - # pre-generated in 'tuf/tests/repository_data', which will be served by the - # SimpleHTTPServer launched here. The test cases of this unit test assume - # the pre-generated metadata files have a specific structure, such - # as a delegated role 'targets/role1', three target files, five key files, - # etc. - cls.server_process_handler = utils.TestServerProcess(log=logger) - - - - @classmethod - def tearDownClass(cls): - # Cleans the resources and flush the logged lines (if any). - cls.server_process_handler.clean() - - # Remove the temporary repository directory, which should contain all the - # metadata, targets, and key files generated of all the test cases. - shutil.rmtree(cls.temporary_directory) - - - - - def setUp(self): - # We are inheriting from custom class. - unittest_toolbox.Modified_TestCase.setUp(self) - - self.repository_name = 'test_repository1' - - # Copy the original repository files provided in the test folder so that - # any modifications made to repository files are restricted to the copies. - # The 'repository_data' directory is expected to exist in 'tuf/tests/'. - original_repository_files = os.path.join(os.getcwd(), 'repository_data') - temporary_repository_root = \ - self.make_temp_directory(directory=self.temporary_directory) - - # The original repository, keystore, and client directories will be copied - # for each test case. - original_repository = os.path.join(original_repository_files, 'repository') - original_client = os.path.join(original_repository_files, 'client') - original_keystore = os.path.join(original_repository_files, 'keystore') - - # Save references to the often-needed client repository directories. - # Test cases need these references to access metadata and target files. - self.repository_directory = \ - os.path.join(temporary_repository_root, 'repository') - self.client_directory = os.path.join(temporary_repository_root, 'client') - self.keystore_directory = os.path.join(temporary_repository_root, 'keystore') - - # Copy the original 'repository', 'client', and 'keystore' directories - # to the temporary repository the test cases can use. - shutil.copytree(original_repository, self.repository_directory) - shutil.copytree(original_client, self.client_directory) - shutil.copytree(original_keystore, self.keystore_directory) - - # Set the url prefix required by the 'tuf/client/updater.py' updater. - # 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'. - repository_basepath = self.repository_directory[len(os.getcwd()):] - url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + repository_basepath - - # Setting 'tuf.settings.repository_directory' with the temporary client - # directory copied from the original repository files. - tuf.settings.repositories_directory = self.client_directory - self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, - 'metadata_path': 'metadata', - 'targets_path': 'targets'}} - - # Create the repository instance. The test cases will use this client - # updater to refresh metadata, fetch target files, etc. - self.repository_updater = updater.Updater(self.repository_name, - self.repository_mirrors) - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - # Logs stdout and stderr from the sever subprocess. - self.server_process_handler.flush_log() - - # Remove temporary directory - unittest_toolbox.Modified_TestCase.tearDown(self) - - - def test_with_tuf(self): - # An attacker tries to trick a client into installing an extraneous target - # file (a valid file on the repository, in this case) by listing it in the - # project's metadata file. For the purposes of test_with_tuf(), - # 'role1.json' is treated as the metadata file that indicates all - # the files needed to install/update the 'role1' project. The attacker - # simply adds the extraneous target file to 'role1.json', which the TUF - # client should reject as improperly signed. - role1_filepath = os.path.join(self.repository_directory, 'metadata', - 'role1.json') - file1_filepath = os.path.join(self.repository_directory, 'targets', - 'file1.txt') - length, hashes = securesystemslib.util.get_file_details(file1_filepath) - - role1_metadata = securesystemslib.util.load_json_file(role1_filepath) - role1_metadata['signed']['targets']['/file2.txt'] = {} - role1_metadata['signed']['targets']['/file2.txt']['hashes'] = hashes - role1_metadata['signed']['targets']['/file2.txt']['length'] = length - - tuf.formats.check_signable_object_format(role1_metadata) - - with open(role1_filepath, 'wt') as file_object: - json.dump(role1_metadata, file_object, indent=1, sort_keys=True) - - # Un-install the metadata of the top-level roles so that the client can - # download and detect the invalid 'role1.json'. - os.remove(os.path.join(self.client_directory, self.repository_name, - 'metadata', 'current', 'snapshot.json')) - os.remove(os.path.join(self.client_directory, self.repository_name, - 'metadata', 'current', 'targets.json')) - os.remove(os.path.join(self.client_directory, self.repository_name, - 'metadata', 'current', 'timestamp.json')) - os.remove(os.path.join(self.client_directory, self.repository_name, - 'metadata', 'current', 'role1.json')) - - # Verify that the TUF client rejects the invalid metadata and refuses to - # continue the update process. - self.repository_updater.refresh() - - try: - with utils.ignore_deprecation_warnings('tuf.client.updater'): - self.repository_updater.targets_of_role('role1') - - # Verify that the specific 'tuf.exceptions.ForbiddenTargetError' exception is raised - # by each mirror. - except tuf.exceptions.NoWorkingMirrorError as exception: - for mirror_url, mirror_error in exception.mirror_errors.items(): - url_prefix = self.repository_mirrors['mirror1']['url_prefix'] - url_file = os.path.join(url_prefix, 'metadata', 'role1.json') - - # Verify that 'role1.json' is the culprit. - self.assertEqual(url_file.replace('\\', '/'), mirror_url) - self.assertTrue(isinstance(mirror_error, securesystemslib.exceptions.BadSignatureError)) - - else: - self.fail('TUF did not prevent an extraneous dependencies attack.') - - -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_fetcher_ng.py b/tests/test_fetcher_ng.py index 78a8f1c68a..ee491af850 100644 --- a/tests/test_fetcher_ng.py +++ b/tests/test_fetcher_ng.py @@ -20,14 +20,13 @@ import urllib3.exceptions from tests import utils -from tuf import unittest_toolbox from tuf.api import exceptions from tuf.ngclient._internal.requests_fetcher import RequestsFetcher logger = logging.getLogger(__name__) -class TestFetcher(unittest_toolbox.Modified_TestCase): +class TestFetcher(unittest.TestCase): """Test RequestsFetcher class.""" server_process_handler: ClassVar[utils.TestServerProcess] @@ -48,29 +47,26 @@ def setUp(self) -> None: current working directory. """ - unittest_toolbox.Modified_TestCase.setUp(self) - # Making a temporary data file. - current_dir = os.getcwd() - target_filepath = self.make_temp_data_file(directory=current_dir) - - with open(target_filepath, "r", encoding="utf8") as target_fileobj: - self.file_contents = target_fileobj.read() - self.file_length = len(self.file_contents) + self.file_contents = b"junk data" + self.file_length = len(self.file_contents) + with tempfile.NamedTemporaryFile( + dir=os.getcwd(), delete=False + ) as self.target_file: + self.target_file.write(self.file_contents) - self.rel_target_filepath = os.path.basename(target_filepath) self.url_prefix = ( f"http://{utils.TEST_HOST_ADDRESS}:" f"{str(self.server_process_handler.port)}" ) - self.url = f"{self.url_prefix}/{self.rel_target_filepath}" + target_filename = os.path.basename(self.target_file.name) + self.url = f"{self.url_prefix}/{target_filename}" # Instantiate a concrete instance of FetcherInterface self.fetcher = RequestsFetcher() def tearDown(self) -> None: - # Remove temporary directory - unittest_toolbox.Modified_TestCase.tearDown(self) + os.remove(self.target_file.name) # Simple fetch. def test_fetch(self) -> None: @@ -79,9 +75,7 @@ def test_fetch(self) -> None: temp_file.write(chunk) temp_file.seek(0) - self.assertEqual( - self.file_contents, temp_file.read().decode("utf-8") - ) + self.assertEqual(self.file_contents, temp_file.read()) # URL data downloaded in more than one chunk def test_fetch_in_chunks(self) -> None: @@ -102,16 +96,14 @@ def test_fetch_in_chunks(self) -> None: chunks_count += 1 temp_file.seek(0) - self.assertEqual( - self.file_contents, temp_file.read().decode("utf-8") - ) + self.assertEqual(self.file_contents, temp_file.read()) # Check that we calculate chunks as expected self.assertEqual(chunks_count, expected_chunks_count) # Incorrect URL parsing def test_url_parsing(self) -> None: with self.assertRaises(exceptions.DownloadError): - self.fetcher.fetch(self.random_string()) + self.fetcher.fetch("missing-scheme-and-hostname-in-url") # File not found error def test_http_error(self) -> None: @@ -148,12 +140,12 @@ def test_session_get_timeout(self, mock_session_get: Any) -> None: # Simple bytes download def test_download_bytes(self) -> None: data = self.fetcher.download_bytes(self.url, self.file_length) - self.assertEqual(self.file_contents, data.decode("utf-8")) + self.assertEqual(self.file_contents, data) # Download file smaller than required max_length def test_download_bytes_upper_length(self) -> None: data = self.fetcher.download_bytes(self.url, self.file_length + 4) - self.assertEqual(self.file_contents, data.decode("utf-8")) + self.assertEqual(self.file_contents, data) # Download a file bigger than expected def test_download_bytes_length_mismatch(self) -> None: diff --git a/tests/test_fetcher_old.py b/tests/test_fetcher_old.py deleted file mode 100644 index 10e43354bf..0000000000 --- a/tests/test_fetcher_old.py +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2021, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -"""Unit test for RequestsFetcher. -""" - -import io -import logging -import math -import os -import sys -import tempfile -import unittest - -import tuf -import tuf.exceptions -import tuf.requests_fetcher -from tests import utils -from tuf import unittest_toolbox - -logger = logging.getLogger(__name__) - - -class TestFetcher(unittest_toolbox.Modified_TestCase): - """Unit tests for RequestFetcher.""" - - def setUp(self): - """ - Create a temporary file and launch a simple server in the - current working directory. - """ - - unittest_toolbox.Modified_TestCase.setUp(self) - - # Making a temporary file. - current_dir = os.getcwd() - target_filepath = self.make_temp_data_file(directory=current_dir) - with open(target_filepath, "r", encoding="utf8") as target_fileobj: - self.file_contents = target_fileobj.read() - self.file_length = len(self.file_contents) - - # Launch a SimpleHTTPServer (serves files in the current dir). - self.server_process_handler = utils.TestServerProcess(log=logger) - - rel_target_filepath = os.path.basename(target_filepath) - self.url = ( - "http://" - + utils.TEST_HOST_ADDRESS - + ":" - + str(self.server_process_handler.port) - + "/" - + rel_target_filepath - ) - - # Create a temporary file where the target file chunks are written - # during fetching - # pylint: disable-next=consider-using-with - self.temp_file = tempfile.TemporaryFile() - self.fetcher = tuf.requests_fetcher.RequestsFetcher() - - # Stop server process and perform clean up. - def tearDown(self): - # Cleans the resources and flush the logged lines (if any). - self.server_process_handler.clean() - - self.temp_file.close() - - # Remove temporary directory - unittest_toolbox.Modified_TestCase.tearDown(self) - - # Test: Normal case. - def test_fetch(self): - for chunk in self.fetcher.fetch(self.url, self.file_length): - self.temp_file.write(chunk) - - self.temp_file.seek(0) - temp_file_data = self.temp_file.read().decode("utf-8") - self.assertEqual(self.file_contents, temp_file_data) - - # Test if fetcher downloads file up to a required length - def test_fetch_restricted_length(self): - for chunk in self.fetcher.fetch(self.url, self.file_length - 4): - self.temp_file.write(chunk) - - self.temp_file.seek(0, io.SEEK_END) - self.assertEqual(self.temp_file.tell(), self.file_length - 4) - - # Test that fetcher does not download more than actual file length - def test_fetch_upper_length(self): - for chunk in self.fetcher.fetch(self.url, self.file_length + 4): - self.temp_file.write(chunk) - - self.temp_file.seek(0, io.SEEK_END) - self.assertEqual(self.temp_file.tell(), self.file_length) - - # Test incorrect URL parsing - def test_url_parsing(self): - with self.assertRaises(tuf.exceptions.URLParsingError): - self.fetcher.fetch(self.random_string(), self.file_length) - - # Test: Normal case with url data downloaded in more than one chunk - def test_fetch_in_chunks(self): - # Set smaller chunk size to ensure that the file will be downloaded - # in more than one chunk - default_chunk_size = tuf.settings.CHUNK_SIZE - tuf.settings.CHUNK_SIZE = 4 - - # expected_chunks_count: 3 - expected_chunks_count = math.ceil( - self.file_length / tuf.settings.CHUNK_SIZE - ) - self.assertEqual(expected_chunks_count, 3) - - chunks_count = 0 - for chunk in self.fetcher.fetch(self.url, self.file_length): - self.temp_file.write(chunk) - chunks_count += 1 - - self.temp_file.seek(0) - temp_file_data = self.temp_file.read().decode("utf-8") - self.assertEqual(self.file_contents, temp_file_data) - # Check that we calculate chunks as expected - self.assertEqual(chunks_count, expected_chunks_count) - - # Restore default settings - tuf.settings.CHUNK_SIZE = default_chunk_size - - -# Run unit test. -if __name__ == "__main__": - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_formats_old.py b/tests/test_formats_old.py deleted file mode 100755 index 498be2d107..0000000000 --- a/tests/test_formats_old.py +++ /dev/null @@ -1,971 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_formats_old.py - - - Vladimir Diaz - - - October 2012. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Unit test for 'formats.py' -""" - -import unittest -import datetime -import sys -import os - -import tuf -import tuf.formats - -from tests import utils - -import securesystemslib -import securesystemslib.util - - -class TestFormats(unittest.TestCase): - def setUp(self): - pass - - - - def tearDown(self): - pass - - - - def test_schemas(self): - # Test conditions for valid schemas. - valid_schemas = { - 'ISO8601_DATETIME_SCHEMA': (securesystemslib.formats.ISO8601_DATETIME_SCHEMA, - '1985-10-21T13:20:00Z'), - - 'UNIX_TIMESTAMP_SCHEMA': (securesystemslib.formats.UNIX_TIMESTAMP_SCHEMA, 499137720), - - 'HASH_SCHEMA': (securesystemslib.formats.HASH_SCHEMA, 'A4582BCF323BCEF'), - - 'HASHDICT_SCHEMA': (securesystemslib.formats.HASHDICT_SCHEMA, - {'sha256': 'A4582BCF323BCEF'}), - - 'HEX_SCHEMA': (securesystemslib.formats.HEX_SCHEMA, 'A4582BCF323BCEF'), - - 'KEYID_SCHEMA': (securesystemslib.formats.KEYID_SCHEMA, '123456789abcdef'), - - 'KEYIDS_SCHEMA': (securesystemslib.formats.KEYIDS_SCHEMA, - ['123456789abcdef', '123456789abcdef']), - - 'SCHEME_SCHEMA': (securesystemslib.formats.SCHEME_SCHEMA, 'rsassa-pss-sha256'), - - 'RELPATH_SCHEMA': (tuf.formats.RELPATH_SCHEMA, 'metadata/root/'), - - 'RELPATHS_SCHEMA': (tuf.formats.RELPATHS_SCHEMA, - ['targets/role1/', 'targets/role2/']), - - 'PATH_SCHEMA': (securesystemslib.formats.PATH_SCHEMA, '/home/someuser/'), - - 'PATHS_SCHEMA': (securesystemslib.formats.PATHS_SCHEMA, - ['/home/McFly/', '/home/Tannen/']), - - 'URL_SCHEMA': (securesystemslib.formats.URL_SCHEMA, - 'https://www.updateframework.com/'), - - 'VERSION_SCHEMA': (tuf.formats.VERSION_SCHEMA, - {'major': 1, 'minor': 0, 'fix': 8}), - - 'LENGTH_SCHEMA': (tuf.formats.LENGTH_SCHEMA, 8), - - 'NAME_SCHEMA': (securesystemslib.formats.NAME_SCHEMA, 'Marty McFly'), - - 'BOOLEAN_SCHEMA': (securesystemslib.formats.BOOLEAN_SCHEMA, True), - - 'THRESHOLD_SCHEMA': (tuf.formats.THRESHOLD_SCHEMA, 1), - - 'ROLENAME_SCHEMA': (tuf.formats.ROLENAME_SCHEMA, 'Root'), - - 'RSAKEYBITS_SCHEMA': (securesystemslib.formats.RSAKEYBITS_SCHEMA, 4096), - - 'PASSWORD_SCHEMA': (securesystemslib.formats.PASSWORD_SCHEMA, 'secret'), - - 'PASSWORDS_SCHEMA': (securesystemslib.formats.PASSWORDS_SCHEMA, ['pass1', 'pass2']), - - 'KEYVAL_SCHEMA': (securesystemslib.formats.KEYVAL_SCHEMA, - {'public': 'pubkey', 'private': 'privkey'}), - - 'KEY_SCHEMA': (securesystemslib.formats.KEY_SCHEMA, - {'keytype': 'rsa', - 'scheme': 'rsassa-pss-sha256', - 'keyval': {'public': 'pubkey', - 'private': 'privkey'}}), - - 'RSAKEY_SCHEMA': (securesystemslib.formats.RSAKEY_SCHEMA, - {'keytype': 'rsa', - 'scheme': 'rsassa-pss-sha256', - 'keyid': '123456789abcdef', - 'keyval': {'public': 'pubkey', - 'private': 'privkey'}}), - - 'TARGETS_FILEINFO_SCHEMA': (tuf.formats.TARGETS_FILEINFO_SCHEMA, - {'length': 1024, - 'hashes': {'sha256': 'A4582BCF323BCEF'}, - 'custom': {'type': 'paintjob'}}), - - 'METADATA_FILEINFO_SCHEMA': (tuf.formats.METADATA_FILEINFO_SCHEMA, - {'length': 1024, - 'hashes': {'sha256': 'A4582BCF323BCEF'}, - 'version': 1}), - - 'FILEDICT_SCHEMA': (tuf.formats.FILEDICT_SCHEMA, - {'metadata/root.json': {'length': 1024, - 'hashes': {'sha256': 'ABCD123'}, - 'custom': {'type': 'metadata'}}}), - - 'TARGETINFO_SCHEMA': (tuf.formats.TARGETINFO_SCHEMA, - {'filepath': 'targets/target1.gif', - 'fileinfo': {'length': 1024, - 'hashes': {'sha256': 'ABCD123'}, - 'custom': {'type': 'target'}}}), - - 'TARGETINFOS_SCHEMA': (tuf.formats.TARGETINFOS_SCHEMA, - [{'filepath': 'targets/target1.gif', - 'fileinfo': {'length': 1024, - 'hashes': {'sha256': 'ABCD123'}, - 'custom': {'type': 'target'}}}]), - - 'SIGNATURE_SCHEMA': (securesystemslib.formats.SIGNATURE_SCHEMA, - {'keyid': '123abc', - 'sig': 'A4582BCF323BCEF'}), - - 'SIGNATURESTATUS_SCHEMA': (tuf.formats.SIGNATURESTATUS_SCHEMA, - {'threshold': 1, - 'good_sigs': ['123abc'], - 'bad_sigs': ['123abc'], - 'unknown_sigs': ['123abc'], - 'untrusted_sigs': ['123abc'], - 'unknown_signing_schemes': ['123abc']}), - - 'SIGNABLE_SCHEMA': (tuf.formats.SIGNABLE_SCHEMA, - {'signed': 'signer', - 'signatures': [{'keyid': '123abc', - 'sig': 'A4582BCF323BCEF'}]}), - - 'KEYDICT_SCHEMA': (securesystemslib.formats.KEYDICT_SCHEMA, - {'123abc': {'keytype': 'rsa', - 'scheme': 'rsassa-pss-sha256', - 'keyval': {'public': 'pubkey', - 'private': 'privkey'}}}), - - 'KEYDB_SCHEMA': (tuf.formats.KEYDB_SCHEMA, - {'123abc': {'keytype': 'rsa', - 'scheme': 'rsassa-pss-sha256', - 'keyid': '123456789abcdef', - 'keyval': {'public': 'pubkey', - 'private': 'privkey'}}}), - - 'SCPCONFIG_SCHEMA': (tuf.formats.SCPCONFIG_SCHEMA, - {'general': {'transfer_module': 'scp', - 'metadata_path': '/path/meta.json', - 'targets_directory': '/targets'}, - 'scp': {'host': 'http://localhost:8001', - 'user': 'McFly', - 'identity_file': '/home/.ssh/file', - 'remote_directory': '/home/McFly'}}), - - 'RECEIVECONFIG_SCHEMA': (tuf.formats.RECEIVECONFIG_SCHEMA, - {'general': {'transfer_module': 'scp', - 'pushroots': ['/pushes'], - 'repository_directory': '/repo', - 'metadata_directory': '/repo/meta', - 'targets_directory': '/repo/targets', - 'backup_directory': '/repo/backup'}}), - - 'ROLE_SCHEMA': (tuf.formats.ROLE_SCHEMA, - {'keyids': ['123abc'], - 'threshold': 1, - 'paths': ['path1/', 'path2']}), - - 'ROLEDICT_SCHEMA': (tuf.formats.ROLEDICT_SCHEMA, - {'root': {'keyids': ['123abc'], - 'threshold': 1, - 'paths': ['path1/', 'path2']}}), - - 'ROOT_SCHEMA': (tuf.formats.ROOT_SCHEMA, - {'_type': 'root', - 'spec_version': '1.0.0', - 'version': 8, - 'consistent_snapshot': False, - 'expires': '1985-10-21T13:20:00Z', - 'keys': {'123abc': {'keytype': 'rsa', - 'scheme': 'rsassa-pss-sha256', - 'keyval': {'public': 'pubkey', - 'private': 'privkey'}}}, - 'roles': {'root': {'keyids': ['123abc'], - 'threshold': 1, - 'paths': ['path1/', 'path2']}}}), - - 'TARGETS_SCHEMA': (tuf.formats.TARGETS_SCHEMA, - {'_type': 'targets', - 'spec_version': '1.0.0', - 'version': 8, - 'expires': '1985-10-21T13:20:00Z', - 'targets': {'metadata/targets.json': {'length': 1024, - 'hashes': {'sha256': 'ABCD123'}, - 'custom': {'type': 'metadata'}}}, - 'delegations': {'keys': {'123abc': {'keytype':'rsa', - 'scheme': 'rsassa-pss-sha256', - 'keyval': {'public': 'pubkey', - 'private': 'privkey'}}}, - 'roles': [{'name': 'root', 'keyids': ['123abc'], - 'threshold': 1, - 'paths': ['path1/', 'path2']}]}}), - - 'SNAPSHOT_SCHEMA': (tuf.formats.SNAPSHOT_SCHEMA, - {'_type': 'snapshot', - 'spec_version': '1.0.0', - 'version': 8, - 'expires': '1985-10-21T13:20:00Z', - 'meta': {'snapshot.json': {'version': 1024}}}), - - 'TIMESTAMP_SCHEMA': (tuf.formats.TIMESTAMP_SCHEMA, - {'_type': 'timestamp', - 'spec_version': '1.0.0', - 'version': 8, - 'expires': '1985-10-21T13:20:00Z', - 'meta': {'metadattimestamp.json': {'length': 1024, - 'hashes': {'sha256': 'AB1245'}, - 'version': 1}}}), - - 'MIRROR_SCHEMA': (tuf.formats.MIRROR_SCHEMA, - {'url_prefix': 'http://localhost:8001', - 'metadata_path': 'metadata/', - 'targets_path': 'targets/', - 'confined_target_dirs': ['path1/', 'path2/'], - 'custom': {'type': 'mirror'}}), - - 'MIRROR_SCHEMA_NO_CONFINED_TARGETS': (tuf.formats.MIRROR_SCHEMA, - {'url_prefix': 'http://localhost:8001', - 'metadata_path': 'metadata/', - 'targets_path': 'targets/', - 'custom': {'type': 'mirror'}}), - - 'MIRRORDICT_SCHEMA': (tuf.formats.MIRRORDICT_SCHEMA, - {'mirror1': {'url_prefix': 'http://localhost:8001', - 'metadata_path': 'metadata/', - 'targets_path': 'targets/', - 'confined_target_dirs': ['path1/', 'path2/'], - 'custom': {'type': 'mirror'}}}), - - 'MIRRORLIST_SCHEMA': (tuf.formats.MIRRORLIST_SCHEMA, - {'_type': 'mirrors', - 'version': 8, - 'spec_version': '1.0.0', - 'expires': '1985-10-21T13:20:00Z', - 'mirrors': [{'url_prefix': 'http://localhost:8001', - 'metadata_path': 'metadata/', - 'targets_path': 'targets/', - 'confined_target_dirs': ['path1/', 'path2/'], - 'custom': {'type': 'mirror'}}]})} - - # Iterate 'valid_schemas', ensuring each 'valid_schema' correctly matches - # its respective 'schema_type'. - for schema_name, (schema_type, valid_schema) in valid_schemas.items(): - if not schema_type.matches(valid_schema): - print('bad schema: ' + repr(valid_schema)) - self.assertEqual(True, schema_type.matches(valid_schema)) - - # Test conditions for invalid schemas. - # Set the 'valid_schema' of 'valid_schemas' to an invalid - # value and test that it does not match 'schema_type'. - for schema_name, (schema_type, valid_schema) in valid_schemas.items(): - invalid_schema = 0xBAD - if isinstance(schema_type, securesystemslib.schema.Integer): - invalid_schema = 'BAD' - self.assertEqual(False, schema_type.matches(invalid_schema)) - - - def test_specfication_version_schema(self): - """Test valid and invalid SPECIFICATION_VERSION_SCHEMAs, using examples - from 'regex101.com/r/Ly7O1x/3/', referenced by - 'semver.org/spec/v2.0.0.html'. """ - valid_schemas = [ - "0.0.4", - "1.2.3", - "10.20.30", - "1.1.2-prerelease+meta", - "1.1.2+meta", - "1.1.2+meta-valid", - "1.0.0-alpha", - "1.0.0-beta", - "1.0.0-alpha.beta", - "1.0.0-alpha.beta.1", - "1.0.0-alpha.1", - "1.0.0-alpha0.valid", - "1.0.0-alpha.0valid", - "1.0.0-alpha-a.b-c-somethinglong+build.1-aef.1-its-okay", - "1.0.0-rc.1+build.1", - "2.0.0-rc.1+build.123", - "1.2.3-beta", - "10.2.3-DEV-SNAPSHOT", - "1.2.3-SNAPSHOT-123", - "1.0.0", - "2.0.0", - "1.1.7", - "2.0.0+build.1848", - "2.0.1-alpha.1227", - "1.0.0-alpha+beta", - "1.2.3----RC-SNAPSHOT.12.9.1--.12+788", - "1.2.3----R-S.12.9.1--.12+meta", - "1.2.3----RC-SNAPSHOT.12.9.1--.12", - "1.0.0+0.build.1-rc.10000aaa-kk-0.1", - "99999999999999999999999.999999999999999999.99999999999999999", - "1.0.0-0A.is.legal"] - - for valid_schema in valid_schemas: - self.assertTrue( - tuf.formats.SPECIFICATION_VERSION_SCHEMA.matches(valid_schema), - "'{}' should match 'SPECIFICATION_VERSION_SCHEMA'.".format( - valid_schema)) - - invalid_schemas = [ - "1", - "1.2", - "1.2.3-0123", - "1.2.3-0123.0123", - "1.1.2+.123", - "+invalid", - "-invalid", - "-invalid+invalid", - "-invalid.01", - "alpha", - "alpha.beta", - "alpha.beta.1", - "alpha.1", - "alpha+beta", - "alpha_beta", - "alpha.", - "alpha..", - "beta", - "1.0.0-alpha_beta", - "-alpha.", - "1.0.0-alpha..", - "1.0.0-alpha..1", - "1.0.0-alpha...1", - "1.0.0-alpha....1", - "1.0.0-alpha.....1", - "1.0.0-alpha......1", - "1.0.0-alpha.......1", - "01.1.1", - "1.01.1", - "1.1.01", - "1.2", - "1.2.3.DEV", - "1.2-SNAPSHOT", - "1.2.31.2.3----RC-SNAPSHOT.12.09.1--..12+788", - "1.2-RC-SNAPSHOT", - "-1.0.3-gamma+b7718", - "+justmeta", - "9.8.7+meta+meta", - "9.8.7-whatever+meta+meta", - "99999999999999999999999.999999999999999999.99999999999999999----RC-SNAPSHOT.12.09.1--------------------------------..12"] - - for invalid_schema in invalid_schemas: - self.assertFalse( - tuf.formats.SPECIFICATION_VERSION_SCHEMA.matches(invalid_schema), - "'{}' should not match 'SPECIFICATION_VERSION_SCHEMA'.".format( - invalid_schema)) - - - def test_build_dict_conforming_to_schema(self): - # Test construction of a few metadata formats using - # build_dict_conforming_to_schema(). - - # Try the wrong type of schema object. - STRING_SCHEMA = securesystemslib.schema.AnyString() - - with self.assertRaises(ValueError): - tuf.formats.build_dict_conforming_to_schema( - STRING_SCHEMA, string='some string') - - # Try building Timestamp metadata. - spec_version = tuf.SPECIFICATION_VERSION - version = 8 - length = 88 - hashes = {'sha256': '3c7fe3eeded4a34'} - expires = '1985-10-21T13:20:00Z' - filedict = {'snapshot.json': {'length': length, 'hashes': hashes, 'version': 1}} - - - # Try with and without _type and spec_version, both of which are - # automatically populated if they are not included. - self.assertTrue(tuf.formats.TIMESTAMP_SCHEMA.matches( # both - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.TIMESTAMP_SCHEMA, - _type='timestamp', - spec_version=spec_version, - version=version, - expires=expires, - meta=filedict))) - self.assertTrue(tuf.formats.TIMESTAMP_SCHEMA.matches( # neither - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.TIMESTAMP_SCHEMA, - version=version, - expires=expires, - meta=filedict))) - self.assertTrue(tuf.formats.TIMESTAMP_SCHEMA.matches( # one - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.TIMESTAMP_SCHEMA, - spec_version=spec_version, - version=version, - expires=expires, - meta=filedict))) - self.assertTrue(tuf.formats.TIMESTAMP_SCHEMA.matches( # the other - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.TIMESTAMP_SCHEMA, - _type='timestamp', - version=version, - expires=expires, - meta=filedict))) - - - # Try test arguments for invalid Timestamp creation. - bad_spec_version = 123 - bad_version = 'eight' - bad_expires = '2000' - bad_filedict = 123 - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.TIMESTAMP_SCHEMA, - _type='timestamp', - spec_version=bad_spec_version, - version=version, - expires=expires, - meta=filedict) - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.TIMESTAMP_SCHEMA, - _type='timestamp', - spec_version=spec_version, - version=bad_version, - expires=expires, - meta=filedict) - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.TIMESTAMP_SCHEMA, - _type='timestamp', - spec_version=spec_version, - version=version, - expires=bad_expires, - meta=filedict) - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.TIMESTAMP_SCHEMA, - _type='timestamp', - spec_version=spec_version, - version=version, - expires=expires, - meta=bad_filedict) - - with self.assertRaises(ValueError): - tuf.formats.build_dict_conforming_to_schema(123) - - - # Try building Root metadata. - consistent_snapshot = False - - keydict = {'123abc': {'keytype': 'rsa', - 'scheme': 'rsassa-pss-sha256', - 'keyval': {'public': 'pubkey', - 'private': 'privkey'}}} - - roledict = {'root': {'keyids': ['123abc'], - 'threshold': 1, - 'paths': ['path1/', 'path2']}} - - - self.assertTrue(tuf.formats.ROOT_SCHEMA.matches( - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROOT_SCHEMA, - _type='root', - spec_version=spec_version, - version=version, - expires=expires, - keys=keydict, - roles=roledict, - consistent_snapshot=consistent_snapshot))) - - - # Additional test arguments for invalid Root creation. - bad_keydict = 123 - bad_roledict = 123 - - # TODO: Later on, write a test looper that takes pairs of key-value args - # to substitute in on each run to shorten this.... There's a lot of - # test code that looks like this, and it'd be easier to use a looper. - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROOT_SCHEMA, - _type='root', - spec_version=bad_spec_version, - version=version, - expires=expires, - keys=keydict, - roles=roledict, - consistent_snapshot=consistent_snapshot) - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROOT_SCHEMA, - _type='root', - spec_version=spec_version, - version=bad_version, - expires=expires, - keys=keydict, - roles=roledict, - consistent_snapshot=consistent_snapshot) - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROOT_SCHEMA, - _type='root', - spec_version=spec_version, - version=version, - expires=bad_expires, - keys=keydict, - roles=roledict, - consistent_snapshot=consistent_snapshot) - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROOT_SCHEMA, - _type='root', - spec_version=spec_version, - version=version, - expires=expires, - keys=bad_keydict, - roles=roledict, - consistent_snapshot=consistent_snapshot) - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROOT_SCHEMA, - _type='root', - spec_version=spec_version, - version=version, - expires=expires, - keys=keydict, - roles=bad_roledict, - consistent_snapshot=consistent_snapshot) - - with self.assertRaises(TypeError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROOT_SCHEMA, 'bad') - - with self.assertRaises(ValueError): - tuf.formats.build_dict_conforming_to_schema( - 'bad', - _type='root', - spec_version=spec_version, - version=version, - expires=expires, - keys=keydict, - roles=roledict, - consistent_snapshot=consistent_snapshot) - - - - # Try building Snapshot metadata. - versiondict = {'targets.json' : {'version': version}} - - self.assertTrue(tuf.formats.SNAPSHOT_SCHEMA.matches( - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.SNAPSHOT_SCHEMA, - _type='snapshot', - spec_version=spec_version, - version=version, - expires=expires, - meta=versiondict))) - - # Additional test arguments for invalid Snapshot creation. - bad_versiondict = 123 - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.SNAPSHOT_SCHEMA, - _type='snapshot', - spec_version=bad_spec_version, - version=version, - expires=expires, - meta=versiondict) - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.SNAPSHOT_SCHEMA, - _type='snapshot', - spec_version=spec_version, - version=bad_version, - expires=expires, - meta=versiondict) - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.SNAPSHOT_SCHEMA, - _type='snapshot', - spec_version=spec_version, - version=version, - expires=bad_expires, - meta=versiondict) - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.SNAPSHOT_SCHEMA, - _type='snapshot', - spec_version=spec_version, - version=version, - expires=expires, - meta=bad_versiondict) - - - - # Try building Targets metadata. - filedict = {'metadata/targets.json': {'length': 1024, - 'hashes': {'sha256': 'ABCD123'}, - 'custom': {'type': 'metadata'}}} - - delegations = {'keys': {'123abc': {'keytype':'rsa', - 'scheme': 'rsassa-pss-sha256', - 'keyval': {'public': 'pubkey', - 'private': 'privkey'}}}, - 'roles': [{'name': 'root', 'keyids': ['123abc'], - 'threshold': 1, 'paths': ['path1/', 'path2']}]} - - - self.assertTrue(tuf.formats.TARGETS_SCHEMA.matches( - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.TARGETS_SCHEMA, - _type='targets', - spec_version=spec_version, - version=version, - expires=expires, - targets=filedict, - delegations=delegations))) - - # Try with no delegations included (should work, since they're optional). - self.assertTrue(tuf.formats.TARGETS_SCHEMA.matches( - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.TARGETS_SCHEMA, - _type='targets', - spec_version=spec_version, - version=version, - expires=expires, - targets=filedict))) - - - # Additional test arguments for invalid Targets creation. - bad_filedict = 123 - bad_delegations = 123 - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.TARGETS_SCHEMA, - _type='targets', - spec_version=spec_version, - version=bad_version, - expires=expires, - targets=filedict, - delegations=delegations) - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.TARGETS_SCHEMA, - _type='targets', - spec_version=spec_version, - version=version, - expires=bad_expires, - targets=filedict, - delegations=delegations) - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.TARGETS_SCHEMA, - _type='targets', - spec_version=spec_version, - version=version, - expires=expires, - targets=bad_filedict, - delegations=delegations) - - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.build_dict_conforming_to_schema( - tuf.formats.TARGETS_SCHEMA, - _type='targets', - spec_version=spec_version, - version=version, - expires=expires, - targets=filedict, - delegations=bad_delegations) - - - - def test_expiry_string_to_datetime(self): - dt = tuf.formats.expiry_string_to_datetime('1985-10-21T13:20:00Z') - self.assertEqual(dt, datetime.datetime(1985, 10, 21, 13, 20, 0)) - dt = tuf.formats.expiry_string_to_datetime('2038-01-19T03:14:08Z') - self.assertEqual(dt, datetime.datetime(2038, 1, 19, 3, 14, 8)) - - # First 3 fail via securesystemslib schema, last one because of strptime() - invalid_inputs = [ - '2038-1-19T03:14:08Z', # leading zeros not optional - '2038-01-19T031408Z', # strict time parsing - '2038-01-19T03:14:08Z-06:00', # timezone not allowed - '2038-13-19T03:14:08Z', # too many months - ] - for invalid_input in invalid_inputs: - with self.assertRaises(securesystemslib.exceptions.FormatError): - tuf.formats.expiry_string_to_datetime(invalid_input) - - - - def test_unix_timestamp_to_datetime(self): - # Test conditions for valid arguments. - UNIX_TIMESTAMP_SCHEMA = securesystemslib.formats.UNIX_TIMESTAMP_SCHEMA - self.assertTrue(datetime.datetime, tuf.formats.unix_timestamp_to_datetime(499137720)) - datetime_object = datetime.datetime(1985, 10, 26, 1, 22) - self.assertEqual(datetime_object, tuf.formats.unix_timestamp_to_datetime(499137720)) - - # Test conditions for invalid arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.formats.unix_timestamp_to_datetime, 'bad') - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.formats.unix_timestamp_to_datetime, 1000000000000000000000) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.formats.unix_timestamp_to_datetime, -1) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.formats.unix_timestamp_to_datetime, ['5']) - - - - def test_datetime_to_unix_timestamp(self): - # Test conditions for valid arguments. - datetime_object = datetime.datetime(2015, 10, 21, 19, 28) - self.assertEqual(1445455680, tuf.formats.datetime_to_unix_timestamp(datetime_object)) - - # Test conditions for invalid arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.formats.datetime_to_unix_timestamp, 'bad') - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.formats.datetime_to_unix_timestamp, 1000000000000000000000) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.formats.datetime_to_unix_timestamp, ['1']) - - - - def test_format_base64(self): - # Test conditions for valid arguments. - data = 'updateframework'.encode('utf-8') - self.assertEqual('dXBkYXRlZnJhbWV3b3Jr', tuf.formats.format_base64(data)) - self.assertTrue(isinstance(tuf.formats.format_base64(data), str)) - - # Test conditions for invalid arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.formats.format_base64, 123) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.formats.format_base64, True) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.formats.format_base64, ['123']) - - - def test_parse_base64(self): - # Test conditions for valid arguments. - base64 = 'dXBkYXRlZnJhbWV3b3Jr' - self.assertEqual(b'updateframework', tuf.formats.parse_base64(base64)) - self.assertTrue(isinstance(tuf.formats.parse_base64(base64), bytes)) - - # Test conditions for invalid arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.formats.parse_base64, 123) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.formats.parse_base64, True) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.formats.parse_base64, ['123']) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.formats.parse_base64, '/') - - - - def test_make_signable(self): - # Test conditions for expected make_signable() behavior. - SIGNABLE_SCHEMA = tuf.formats.SIGNABLE_SCHEMA - root_file = os.path.join('repository_data', 'repository', 'metadata', - 'root.json') - root = securesystemslib.util.load_json_file(root_file) - self.assertTrue(SIGNABLE_SCHEMA.matches(tuf.formats.make_signable(root))) - signable = tuf.formats.make_signable(root) - self.assertEqual('root', tuf.formats.check_signable_object_format(signable)) - - self.assertEqual(signable, tuf.formats.make_signable(signable)) - - # Test conditions for miscellaneous arguments. - self.assertTrue(SIGNABLE_SCHEMA.matches(tuf.formats.make_signable('123'))) - self.assertTrue(SIGNABLE_SCHEMA.matches(tuf.formats.make_signable(123))) - - - - - - def test_make_targets_fileinfo(self): - # Test conditions for valid arguments. - length = 1024 - hashes = {'sha256': 'A4582BCF323BCEF', 'sha512': 'A4582BCF323BFEF'} - custom = {'type': 'paintjob'} - - TARGETS_FILEINFO_SCHEMA = tuf.formats.TARGETS_FILEINFO_SCHEMA - make_targets_fileinfo = tuf.formats.make_targets_fileinfo - self.assertTrue(TARGETS_FILEINFO_SCHEMA.matches(make_targets_fileinfo(length, hashes, custom))) - self.assertTrue(TARGETS_FILEINFO_SCHEMA.matches(make_targets_fileinfo(length, hashes))) - - # Test conditions for invalid arguments. - bad_length = 'bad' - bad_hashes = 'bad' - bad_custom = 'bad' - - self.assertRaises(securesystemslib.exceptions.FormatError, make_targets_fileinfo, - bad_length, hashes, custom) - self.assertRaises(securesystemslib.exceptions.FormatError, make_targets_fileinfo, - length, bad_hashes, custom) - self.assertRaises(securesystemslib.exceptions.FormatError, make_targets_fileinfo, - length, hashes, bad_custom) - self.assertRaises(securesystemslib.exceptions.FormatError, make_targets_fileinfo, - bad_length, hashes) - self.assertRaises(securesystemslib.exceptions.FormatError, make_targets_fileinfo, - length, bad_hashes) - - - - def test_make_metadata_fileinfo(self): - # Test conditions for valid arguments. - length = 1024 - hashes = {'sha256': 'A4582BCF323BCEF', 'sha512': 'A4582BCF323BFEF'} - version = 8 - - METADATA_FILEINFO_SCHEMA = tuf.formats.METADATA_FILEINFO_SCHEMA - make_metadata_fileinfo = tuf.formats.make_metadata_fileinfo - self.assertTrue(METADATA_FILEINFO_SCHEMA.matches(make_metadata_fileinfo( - version, length, hashes))) - self.assertTrue(METADATA_FILEINFO_SCHEMA.matches(make_metadata_fileinfo(version))) - - # Test conditions for invalid arguments. - bad_version = 'bad' - bad_length = 'bad' - bad_hashes = 'bad' - - self.assertRaises(securesystemslib.exceptions.FormatError, make_metadata_fileinfo, - bad_version, length, hashes) - self.assertRaises(securesystemslib.exceptions.FormatError, make_metadata_fileinfo, - version, bad_length, hashes) - self.assertRaises(securesystemslib.exceptions.FormatError, make_metadata_fileinfo, - version, length, bad_hashes) - self.assertRaises(securesystemslib.exceptions.FormatError, make_metadata_fileinfo, - bad_version) - - - - def test_make_versioninfo(self): - # Test conditions for valid arguments. - version_number = 8 - versioninfo = {'version': version_number} - - VERSIONINFO_SCHEMA = tuf.formats.VERSIONINFO_SCHEMA - make_versioninfo = tuf.formats.make_versioninfo - self.assertTrue(VERSIONINFO_SCHEMA.matches(make_versioninfo(version_number))) - - # Test conditions for invalid arguments. - bad_version_number = '8' - - self.assertRaises(securesystemslib.exceptions.FormatError, make_versioninfo, bad_version_number) - - - - - - def test_expected_meta_rolename(self): - # Test conditions for valid arguments. - expected_rolename = tuf.formats.expected_meta_rolename - - self.assertEqual('root', expected_rolename('Root')) - self.assertEqual('targets', expected_rolename('Targets')) - self.assertEqual('snapshot', expected_rolename('Snapshot')) - self.assertEqual('timestamp', expected_rolename('Timestamp')) - self.assertEqual('mirrors', expected_rolename('Mirrors')) - self.assertEqual('targets role', expected_rolename('Targets Role')) - self.assertEqual('root', expected_rolename('Root')) - - # Test conditions for invalid arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, expected_rolename, 123) - self.assertRaises(securesystemslib.exceptions.FormatError, expected_rolename, tuf.formats.ROOT_SCHEMA) - self.assertRaises(securesystemslib.exceptions.FormatError, expected_rolename, True) - - - - def test_check_signable_object_format(self): - # Test condition for a valid argument. - root_file = os.path.join('repository_data', 'repository', 'metadata', - 'root.json') - root = securesystemslib.util.load_json_file(root_file) - root = tuf.formats.make_signable(root) - self.assertEqual('root', tuf.formats.check_signable_object_format(root)) - - # Test conditions for invalid arguments. - check_signable = tuf.formats.check_signable_object_format - self.assertRaises(securesystemslib.exceptions.FormatError, check_signable, 'root') - self.assertRaises(securesystemslib.exceptions.FormatError, check_signable, 123) - self.assertRaises(securesystemslib.exceptions.FormatError, check_signable, tuf.formats.ROOT_SCHEMA) - self.assertRaises(securesystemslib.exceptions.FormatError, check_signable, True) - - saved_type = root['signed']['_type'] - del root['signed']['_type'] - self.assertRaises(securesystemslib.exceptions.FormatError, check_signable, root) - root['signed']['_type'] = saved_type - - root['signed']['_type'] = 'Root' - self.assertRaises(securesystemslib.exceptions.FormatError, check_signable, root) - root['signed']['_type'] = 'root' - - del root['signed']['expires'] - self.assertRaises(securesystemslib.exceptions.FormatError, check_signable, root) - - - - def test_encode_canonical(self): - # Test conditions for valid arguments. - encode = securesystemslib.formats.encode_canonical - result = [] - output = result.append - bad_output = 123 - - self.assertEqual('""', encode("")) - self.assertEqual('[1,2,3]', encode([1, 2, 3])) - self.assertEqual('[1,2,3]', encode([1,2,3])) - self.assertEqual('[]', encode([])) - self.assertEqual('{"A":[99]}', encode({"A": [99]})) - self.assertEqual('{"x":3,"y":2}', encode({"x": 3, "y": 2})) - - self.assertEqual('{"x":3,"y":null}', encode({"x": 3, "y": None})) - - # Condition where 'encode()' sends the result to the callable - # 'output'. - self.assertEqual(None, encode([1, 2, 3], output)) - self.assertEqual('[1,2,3]', ''.join(result)) - - # Test conditions for invalid arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, encode, tuf.formats.ROOT_SCHEMA) - self.assertRaises(securesystemslib.exceptions.FormatError, encode, 8.0) - self.assertRaises(securesystemslib.exceptions.FormatError, encode, {"x": 8.0}) - self.assertRaises(securesystemslib.exceptions.FormatError, encode, 8.0, output) - - self.assertRaises(securesystemslib.exceptions.FormatError, encode, {"x": securesystemslib.exceptions.FormatError}) - - -# Run unit test. -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_indefinite_freeze_attack_old.py b/tests/test_indefinite_freeze_attack_old.py deleted file mode 100755 index 69d063a60d..0000000000 --- a/tests/test_indefinite_freeze_attack_old.py +++ /dev/null @@ -1,461 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_indefinite_freeze_attack_old.py - - - Konstantin Andrianov. - - - March 10, 2012. - - April 1, 2014. - Refactored to use the 'unittest' module (test conditions in code, rather - than verifying text output), use pre-generated repository files, and - discontinue use of the old repository tools. -vladimir.v.diaz - - March 9, 2016. - Additional test added relating to issue: - https://github.com/theupdateframework/python-tuf/issues/322 - If a metadata file is not updated (no indication of a new version - available), the expiration of the pre-existing, locally trusted metadata - must still be detected. This additional test complains if such does not - occur, and accompanies code in tuf.client.updater:refresh() to detect it. - -sebastien.awwad - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Simulate an indefinite freeze attack. In an indefinite freeze attack, - attacker is able to respond to client's requests with the same, outdated - metadata without the client being aware. -""" - -import os -import time -import tempfile -import shutil -import json -import logging -import unittest -import sys -from urllib import request -import unittest.mock as mock - -import tuf.formats -import tuf.log -import tuf.client.updater as updater -import tuf.repository_tool as repo_tool -import tuf.unittest_toolbox as unittest_toolbox -import tuf.roledb -import tuf.keydb -import tuf.exceptions - -from tests import utils - -import securesystemslib - -# The repository tool is imported and logs console messages by default. Disable -# console log messages generated by this unit test. -repo_tool.disable_console_log_messages() - -logger = logging.getLogger(__name__) - - -class TestIndefiniteFreezeAttack(unittest_toolbox.Modified_TestCase): - - @classmethod - def setUpClass(cls): - # Create a temporary directory to store the repository, metadata, and target - # files. 'temporary_directory' must be deleted in TearDownModule() so that - # temporary files are always removed, even when exceptions occur. - cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) - - # Launch a SimpleHTTPServer (serves files in the current directory). - # Test cases will request metadata and target files that have been - # pre-generated in 'tuf/tests/repository_data', which will be served by the - # SimpleHTTPServer launched here. The test cases of this unit test assume - # the pre-generated metadata files have a specific structure, such - # as a delegated role 'targets/role1', three target files, five key files, - # etc. - cls.server_process_handler = utils.TestServerProcess(log=logger) - - - - @classmethod - def tearDownClass(cls): - # Cleans the resources and flush the logged lines (if any). - cls.server_process_handler.clean() - - # Remove the temporary repository directory, which should contain all the - # metadata, targets, and key files generated of all the test cases. - shutil.rmtree(cls.temporary_directory) - - - - - def setUp(self): - # We are inheriting from custom class. - unittest_toolbox.Modified_TestCase.setUp(self) - self.repository_name = 'test_repository1' - - # Copy the original repository files provided in the test folder so that - # any modifications made to repository files are restricted to the copies. - # The 'repository_data' directory is expected to exist in 'tuf/tests/'. - original_repository_files = os.path.join(os.getcwd(), 'repository_data') - temporary_repository_root = \ - self.make_temp_directory(directory=self.temporary_directory) - - # The original repository, keystore, and client directories will be copied - # for each test case. - original_repository = os.path.join(original_repository_files, 'repository') - original_client = os.path.join(original_repository_files, 'client') - original_keystore = os.path.join(original_repository_files, 'keystore') - - # Save references to the often-needed client repository directories. - # Test cases need these references to access metadata and target files. - self.repository_directory = \ - os.path.join(temporary_repository_root, 'repository') - self.client_directory = os.path.join(temporary_repository_root, 'client') - self.keystore_directory = os.path.join(temporary_repository_root, 'keystore') - - # Copy the original 'repository', 'client', and 'keystore' directories - # to the temporary repository the test cases can use. - shutil.copytree(original_repository, self.repository_directory) - shutil.copytree(original_client, self.client_directory) - shutil.copytree(original_keystore, self.keystore_directory) - - # Set the url prefix required by the 'tuf/client/updater.py' updater. - # 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'. - repository_basepath = self.repository_directory[len(os.getcwd()):] - url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + repository_basepath - - # Setting 'tuf.settings.repository_directory' with the temporary client - # directory copied from the original repository files. - tuf.settings.repositories_directory = self.client_directory - self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, - 'metadata_path': 'metadata', - 'targets_path': 'targets'}} - - # Create the repository instance. The test cases will use this client - # updater to refresh metadata, fetch target files, etc. - self.repository_updater = updater.Updater(self.repository_name, - self.repository_mirrors) - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - # Logs stdout and stderr from the sever subprocess. - self.server_process_handler.flush_log() - - # Remove temporary directory - unittest_toolbox.Modified_TestCase.tearDown(self) - - - def test_without_tuf(self): - # Without TUF, Test 1 and Test 2 are functionally equivalent, so we skip - # Test 1 and only perform Test 2. - # - # Test 1: If we find that the timestamp acquired from a mirror indicates - # that there is no new snapshot file, and our current snapshot - # file is expired, is it recognized as such? - # Test 2: If an expired timestamp is downloaded, is it recognized as such? - - - # Test 2 Begin: - # - # 'timestamp.json' specifies the latest version of the repository files. A - # client should only accept the same version of this file up to a certain - # point, or else it cannot detect that new files are available for - # download. Modify the repository's timestamp.json' so that it expires - # soon, copy it over to the client, and attempt to re-fetch the same - # expired version. - # - # A non-TUF client (without a way to detect when metadata has expired) is - # expected to download the same version, and thus the same outdated files. - # Verify that the downloaded 'timestamp.json' contains the same file size - # and hash as the one available locally. - - timestamp_path = os.path.join(self.repository_directory, 'metadata', - 'timestamp.json') - - timestamp_metadata = securesystemslib.util.load_json_file(timestamp_path) - expiry_time = time.time() - 10 - expires = tuf.formats.unix_timestamp_to_datetime(int(expiry_time)) - expires = expires.isoformat() + 'Z' - timestamp_metadata['signed']['expires'] = expires - tuf.formats.check_signable_object_format(timestamp_metadata) - - with open(timestamp_path, 'wb') as file_object: - # Explicitly specify the JSON separators for Python 2 + 3 consistency. - timestamp_content = \ - json.dumps(timestamp_metadata, indent=1, separators=(',', ': '), - sort_keys=True).encode('utf-8') - file_object.write(timestamp_content) - - client_timestamp_path = os.path.join(self.client_directory, 'timestamp.json') - shutil.copy(timestamp_path, client_timestamp_path) - - length, hashes = securesystemslib.util.get_file_details(timestamp_path) - fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - url_prefix = self.repository_mirrors['mirror1']['url_prefix'] - url_file = os.path.join(url_prefix, 'metadata', 'timestamp.json') - - request.urlretrieve(url_file.replace('\\', '/'), client_timestamp_path) - - length, hashes = securesystemslib.util.get_file_details(client_timestamp_path) - download_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - # Verify 'download_fileinfo' is equal to the current local file. - self.assertEqual(download_fileinfo, fileinfo) - - - def test_with_tuf(self): - # Three tests are conducted here. - # - # Test 1: If we find that the timestamp acquired from a mirror indicates - # that there is no new snapshot file, and our current snapshot - # file is expired, is it recognized as such? - # Test 2: If an expired timestamp is downloaded, is it recognized as such? - # Test 3: If an expired Snapshot is downloaded, is it (1) rejected? (2) the - # local Snapshot file deleted? (3) and is the client able to recover when - # given a new, valid Snapshot? - - - # Test 1 Begin: - # - # Addresses this issue: https://github.com/theupdateframework/python-tuf/issues/322 - # - # If time has passed and our snapshot or targets role is expired, and - # the mirror whose timestamp we fetched doesn't indicate the existence of a - # new snapshot version, we still need to check that it's expired and notify - # the software update system / application / user. This test creates that - # scenario. The correct behavior is to raise an exception. - # - # Background: Expiration checks (updater._ensure_not_expired) were - # previously conducted when the metadata file was downloaded. If no new - # metadata file was downloaded, no expiry check would occur. In particular, - # while root was checked for expiration at the beginning of each - # updater.refresh() cycle, and timestamp was always checked because it was - # always fetched, snapshot and targets were never checked if the user did - # not receive evidence that they had changed. This bug allowed a class of - # freeze attacks. - # That bug was fixed and this test tests that fix going forward. - - # Modify the timestamp file on the remote repository. 'timestamp.json' - # must be properly updated and signed with 'repository_tool.py', otherwise - # the client will reject it as invalid metadata. - - # Load the repository - repository = repo_tool.load_repository(self.repository_directory) - - # Load the snapshot and timestamp keys - key_file = os.path.join(self.keystore_directory, 'timestamp_key') - timestamp_private = repo_tool.import_ed25519_privatekey_from_file(key_file, - 'password') - repository.timestamp.load_signing_key(timestamp_private) - key_file = os.path.join(self.keystore_directory, 'snapshot_key') - snapshot_private = repo_tool.import_ed25519_privatekey_from_file(key_file, - 'password') - repository.snapshot.load_signing_key(snapshot_private) - - # sign snapshot with expiry in near future (earlier than e.g. timestamp) - expiry = int(time.time() + 60*60) - repository.snapshot.expiration = tuf.formats.unix_timestamp_to_datetime( - expiry) - repository.mark_dirty(['snapshot', 'timestamp']) - repository.writeall() - - # And move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Refresh metadata on the client. For this refresh, all data is not expired. - logger.info('Test: Refreshing #1 - Initial metadata refresh occurring.') - self.repository_updater.refresh() - - logger.info('Test: Refreshing #2 - refresh after local snapshot expiry.') - - # mock current time to one second after snapshot expiry - mock_time = mock.Mock() - mock_time.return_value = expiry + 1 - with mock.patch('time.time', mock_time): - try: - self.repository_updater.refresh() # We expect this to fail! - - except tuf.exceptions.ExpiredMetadataError: - logger.info('Test: Refresh #2 - failed as expected. Expired local' - ' snapshot case generated a tuf.exceptions.ExpiredMetadataError' - ' exception as expected. Test pass.') - - else: - self.fail('TUF failed to detect expired stale snapshot metadata. Freeze' - ' attack successful.') - - - - - # Test 2 Begin: - # - # 'timestamp.json' specifies the latest version of the repository files. - # A client should only accept the same version of this file up to a certain - # point, or else it cannot detect that new files are available for download. - # Modify the repository's 'timestamp.json' so that it is about to expire, - # copy it over the to client, wait a moment until it expires, and attempt to - # re-fetch the same expired version. - - # The same scenario as in test_without_tuf() is followed here, except with - # a TUF client. The TUF client performs a refresh of top-level metadata, - # which includes 'timestamp.json', and should detect a freeze attack if - # the repository serves an outdated 'timestamp.json'. - - # Modify the timestamp file on the remote repository. 'timestamp.json' - # must be properly updated and signed with 'repository_tool.py', otherwise - # the client will reject it as invalid metadata. The resulting - # 'timestamp.json' should be valid metadata, but expired (as intended). - repository = repo_tool.load_repository(self.repository_directory) - - key_file = os.path.join(self.keystore_directory, 'timestamp_key') - timestamp_private = repo_tool.import_ed25519_privatekey_from_file(key_file, - 'password') - - repository.timestamp.load_signing_key(timestamp_private) - - # Set timestamp metadata to expire soon. - # We cannot set the timestamp expiration with - # 'repository.timestamp.expiration = ...' with already-expired timestamp - # metadata because of consistency checks that occur during that assignment. - expiry_time = time.time() + 60*60 - datetime_object = tuf.formats.unix_timestamp_to_datetime(int(expiry_time)) - repository.timestamp.expiration = datetime_object - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # mock current time to one second after timestamp expiry - mock_time = mock.Mock() - mock_time.return_value = expiry_time + 1 - with mock.patch('time.time', mock_time): - try: - self.repository_updater.refresh() # We expect NoWorkingMirrorError. - - except tuf.exceptions.NoWorkingMirrorError as e: - # Make sure the contained error is ExpiredMetadataError - for mirror_url, mirror_error in e.mirror_errors.items(): - self.assertTrue(isinstance(mirror_error, tuf.exceptions.ExpiredMetadataError)) - - else: - self.fail('TUF failed to detect expired, stale timestamp metadata.' - ' Freeze attack successful.') - - - - - # Test 3 Begin: - # - # Serve the client expired Snapshot. The client should reject the given, - # expired Snapshot and the locally trusted one, which should now be out of - # date. - # After the attack, attempt to re-issue a valid Snapshot to verify that - # the client is still able to update. A bug previously caused snapshot - # expiration or replay to result in an indefinite freeze; see - # github.com/theupdateframework/python-tuf/issues/736 - repository = repo_tool.load_repository(self.repository_directory) - - ts_key_file = os.path.join(self.keystore_directory, 'timestamp_key') - snapshot_key_file = os.path.join(self.keystore_directory, 'snapshot_key') - timestamp_private = repo_tool.import_ed25519_privatekey_from_file( - ts_key_file, 'password') - snapshot_private = repo_tool.import_ed25519_privatekey_from_file( - snapshot_key_file, 'password') - - repository.timestamp.load_signing_key(timestamp_private) - repository.snapshot.load_signing_key(snapshot_private) - - # Set ts to expire in 1 month. - ts_expiry_time = time.time() + 2630000 - - # Set snapshot to expire in 1 hour. - snapshot_expiry_time = time.time() + 60*60 - - ts_datetime_object = tuf.formats.unix_timestamp_to_datetime( - int(ts_expiry_time)) - snapshot_datetime_object = tuf.formats.unix_timestamp_to_datetime( - int(snapshot_expiry_time)) - repository.timestamp.expiration = ts_datetime_object - repository.snapshot.expiration = snapshot_datetime_object - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # mock current time to one second after snapshot expiry - mock_time = mock.Mock() - mock_time.return_value = snapshot_expiry_time + 1 - with mock.patch('time.time', mock_time): - try: - # We expect the following refresh() to raise a NoWorkingMirrorError. - self.repository_updater.refresh() - - except tuf.exceptions.NoWorkingMirrorError as e: - # Make sure the contained error is ExpiredMetadataError - for mirror_url, mirror_error in e.mirror_errors.items(): - self.assertTrue(isinstance(mirror_error, tuf.exceptions.ExpiredMetadataError)) - self.assertTrue(mirror_url.endswith('snapshot.json')) - - else: - self.fail('TUF failed to detect expired, stale Snapshot metadata.' - ' Freeze attack successful.') - - # The client should have rejected the malicious Snapshot metadata, and - # distrusted the local snapshot file that is no longer valid. - self.assertTrue('snapshot' not in self.repository_updater.metadata['current']) - self.assertEqual(sorted(['root', 'targets', 'timestamp']), - sorted(self.repository_updater.metadata['current'])) - - # Verify that the client is able to recover from the malicious Snapshot. - # Re-sign a valid Snapshot file that the client should accept. - repository = repo_tool.load_repository(self.repository_directory) - - repository.timestamp.load_signing_key(timestamp_private) - repository.snapshot.load_signing_key(snapshot_private) - - # Set snapshot to expire in 1 month. - snapshot_expiry_time = time.time() + 2630000 - - snapshot_datetime_object = tuf.formats.unix_timestamp_to_datetime( - int(snapshot_expiry_time)) - repository.snapshot.expiration = snapshot_datetime_object - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Verify that the client accepts the valid metadata file. - self.repository_updater.refresh() - self.assertTrue('snapshot' in self.repository_updater.metadata['current']) - self.assertEqual(sorted(['root', 'targets', 'timestamp', 'snapshot']), - sorted(self.repository_updater.metadata['current'])) - - - -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_key_revocation_integration_old.py b/tests/test_key_revocation_integration_old.py deleted file mode 100755 index 8cb77f127f..0000000000 --- a/tests/test_key_revocation_integration_old.py +++ /dev/null @@ -1,495 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2016 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_key_revocation_integration_old.py - - - Vladimir Diaz. - - - April 28, 2016. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Integration test that verifies top-level roles are updated after all of their - keys have been revoked. There are unit tests in 'test_repository_tool_old.py' - that verify key and role revocation of specific roles, but these should be - expanded to verify key revocations over the span of multiple snapshots of the - repository. - - The 'unittest_toolbox.py' module was created to provide additional testing - tools, such as automatically deleting temporary files created in test cases. - For more information on the additional testing tools, see - 'tests/unittest_toolbox.py'. -""" - -import os -import shutil -import tempfile -import logging -import unittest -import sys - -import tuf -import tuf.log -import tuf.roledb -import tuf.keydb -import tuf.repository_tool as repo_tool -import tuf.unittest_toolbox as unittest_toolbox -import tuf.client.updater as updater - -from tests import utils - -import securesystemslib - -logger = logging.getLogger(__name__) -repo_tool.disable_console_log_messages() - - -class TestKeyRevocation(unittest_toolbox.Modified_TestCase): - - @classmethod - def setUpClass(cls): - # Create a temporary directory to store the repository, metadata, and target - # files. 'temporary_directory' must be deleted in TearDownModule() so that - # temporary files are always removed, even when exceptions occur. - cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) - - # Launch a SimpleHTTPServer (serves files in the current directory). Test - # cases will request metadata and target files that have been pre-generated - # in 'tuf/tests/repository_data', which will be served by the - # SimpleHTTPServer launched here. The test cases of - # 'test_key_revocation.py' assume the pre-generated metadata files have a - # specific structure, such as a delegated role, three target files, five - # key files, etc. - cls.server_process_handler = utils.TestServerProcess(log=logger) - - - - @classmethod - def tearDownClass(cls): - # Cleans the resources and flush the logged lines (if any). - cls.server_process_handler.clean() - - # Remove the temporary repository directory, which should contain all the - # metadata, targets, and key files generated for the test cases. - shutil.rmtree(cls.temporary_directory) - - - - - def setUp(self): - # We are inheriting from custom class. - unittest_toolbox.Modified_TestCase.setUp(self) - - self.repository_name = 'test_repository1' - - # Copy the original repository files provided in the test folder so that - # any modifications made to repository files are restricted to the copies. - # The 'repository_data' directory is expected to exist in 'tuf.tests/'. - original_repository_files = os.path.join(os.getcwd(), 'repository_data') - temporary_repository_root = \ - self.make_temp_directory(directory=self.temporary_directory) - - # The original repository, keystore, and client directories will be copied - # for each test case. - original_repository = os.path.join(original_repository_files, 'repository') - original_keystore = os.path.join(original_repository_files, 'keystore') - original_client = os.path.join(original_repository_files, 'client') - - # Save references to the often-needed client repository directories. - # Test cases need these references to access metadata and target files. - self.repository_directory = \ - os.path.join(temporary_repository_root, 'repository') - self.keystore_directory = \ - os.path.join(temporary_repository_root, 'keystore') - self.client_directory = os.path.join(temporary_repository_root, 'client') - self.client_metadata = os.path.join(self.client_directory, - self.repository_name, 'metadata') - self.client_metadata_current = os.path.join(self.client_metadata, 'current') - self.client_metadata_previous = os.path.join(self.client_metadata, 'previous') - - # Copy the original 'repository', 'client', and 'keystore' directories - # to the temporary repository the test cases can use. - shutil.copytree(original_repository, self.repository_directory) - shutil.copytree(original_client, self.client_directory) - shutil.copytree(original_keystore, self.keystore_directory) - - # 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'. - repository_basepath = self.repository_directory[len(os.getcwd()):] - url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + repository_basepath - - # Setting 'tuf.settings.repository_directory' with the temporary client - # directory copied from the original repository files. - tuf.settings.repositories_directory = self.client_directory - - self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, - 'metadata_path': 'metadata', - 'targets_path': 'targets'}} - - # Creating repository instance. The test cases will use this client - # updater to refresh metadata, fetch target files, etc. - self.repository_updater = updater.Updater(self.repository_name, - self.repository_mirrors) - - # Metadata role keys are needed by the test cases to make changes to the - # repository (e.g., adding a new target file to 'targets.json' and then - # requesting a refresh()). - self.role_keys = _load_role_keys(self.keystore_directory) - - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - # Logs stdout and stderr from the sever subprocess. - self.server_process_handler.flush_log() - - # Remove temporary directory - unittest_toolbox.Modified_TestCase.tearDown(self) - - - # UNIT TESTS. - def test_timestamp_key_revocation(self): - # First verify that the Timestamp role is properly signed. Calling - # refresh() should not raise an exception. - self.repository_updater.refresh() - - # There should only be one key for Timestamp. Store the keyid to later - # verify that it has been revoked. - timestamp_roleinfo = tuf.roledb.get_roleinfo('timestamp', self.repository_name) - timestamp_keyid = timestamp_roleinfo['keyids'] - self.assertEqual(len(timestamp_keyid), 1) - - # Remove 'timestamp_keyid' and add a new key. Verify that the client - # detects the removal and addition of keys to the Timestamp role. - repository = repo_tool.load_repository(self.repository_directory) - repository.timestamp.remove_verification_key(self.role_keys['timestamp']['public']) - repository.timestamp.add_verification_key(self.role_keys['snapshot']['public']) - - # Root, Snapshot, and Timestamp must be rewritten. Root must be written - # because the timestamp key has changed; Snapshot, because Root has - # changed, and ... - repository.root.load_signing_key(self.role_keys['root']['private']) - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['snapshot']['private']) - repository.writeall() - - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # The client performs a refresh of top-level metadata to get the latest - # changes. - self.repository_updater.refresh() - - # Verify that the client is able to recognize that a new set of keys have - # been added to the Timestamp role. - # First, has 'timestamp_keyid' been removed? - timestamp_roleinfo = tuf.roledb.get_roleinfo('timestamp', self.repository_name) - self.assertTrue(timestamp_keyid not in timestamp_roleinfo['keyids']) - - # Second, is Timestamp's new key correct? The new key should be Snapshot's. - - self.assertEqual(len(timestamp_roleinfo['keyids']), 1) - snapshot_roleinfo = tuf.roledb.get_roleinfo('snapshot', self.repository_name) - self.assertEqual(timestamp_roleinfo['keyids'], snapshot_roleinfo['keyids']) - - - - def test_snapshot_key_revocation(self): - # First verify that the Snapshot role is properly signed. Calling - # refresh() should not raise an exception. - self.repository_updater.refresh() - - # There should only be one key for Snapshot. Store the keyid to later - # verify that it has been revoked. - snapshot_roleinfo = tuf.roledb.get_roleinfo('snapshot', self.repository_name) - snapshot_keyid = snapshot_roleinfo['keyids'] - self.assertEqual(len(snapshot_keyid), 1) - - - # Remove 'snapshot_keyid' and add a new key. Verify that the client - # detects the removal and addition of keys to the Snapshot role. - repository = repo_tool.load_repository(self.repository_directory) - repository.snapshot.remove_verification_key(self.role_keys['snapshot']['public']) - repository.snapshot.add_verification_key(self.role_keys['timestamp']['public']) - - # Root, Snapshot, and Timestamp must be rewritten. Root must be written - # because the timestamp key has changed; Snapshot, because Root has - # changed, and Timesamp, because it must sign its metadata with a new key. - repository.root.load_signing_key(self.role_keys['root']['private']) - # Note: we added Timestamp's key to the Snapshot role. - repository.snapshot.load_signing_key(self.role_keys['timestamp']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.writeall() - - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # The client performs a refresh of top-level metadata to get the latest - # changes. - self.repository_updater.refresh() - - # Verify that the client is able to recognize that a new set of keys have - # been added to the Snapshot role. - # First, has 'snapshot_keyid' been removed? - snapshot_roleinfo = tuf.roledb.get_roleinfo('snapshot', self.repository_name) - self.assertTrue(snapshot_keyid not in snapshot_roleinfo['keyids']) - - # Second, is Snapshot's new key correct? The new key should be - # Timestamp's. - self.assertEqual(len(snapshot_roleinfo['keyids']), 1) - timestamp_roleinfo = tuf.roledb.get_roleinfo('timestamp', self.repository_name) - self.assertEqual(snapshot_roleinfo['keyids'], timestamp_roleinfo['keyids']) - - - - - - def test_targets_key_revocation(self): - # First verify that the Targets role is properly signed. Calling - # refresh() should not raise an exception. - self.repository_updater.refresh() - - # There should only be one key for Targets. Store the keyid to later - # verify that it has been revoked. - targets_roleinfo = tuf.roledb.get_roleinfo('targets', self.repository_name) - targets_keyid = targets_roleinfo['keyids'] - self.assertEqual(len(targets_keyid), 1) - - # Remove 'targets_keyid' and add a new key. Verify that the client - # detects the removal and addition of keys to the Targets role. - repository = repo_tool.load_repository(self.repository_directory) - repository.targets.remove_verification_key(self.role_keys['targets']['public']) - repository.targets.add_verification_key(self.role_keys['timestamp']['public']) - - # Root, Snapshot, and Timestamp must be rewritten. Root must be written - # because the timestamp key has changed; Snapshot, because Root has - # changed, and Timestamp because it must sign its metadata with a new key. - repository.root.load_signing_key(self.role_keys['root']['private']) - # Note: we added Timestamp's key to the Targets role. - repository.targets.load_signing_key(self.role_keys['timestamp']['private']) - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.writeall() - - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # The client performs a refresh of top-level metadata to get the latest - # changes. - self.repository_updater.refresh() - - # Verify that the client is able to recognize that a new set of keys have - # been added to the Targets role. - # First, has 'targets_keyid' been removed? - targets_roleinfo = tuf.roledb.get_roleinfo('targets', self.repository_name) - self.assertTrue(targets_keyid not in targets_roleinfo['keyids']) - - # Second, is Targets's new key correct? The new key should be - # Timestamp's. - self.assertEqual(len(targets_roleinfo['keyids']), 1) - timestamp_roleinfo = tuf.roledb.get_roleinfo('timestamp', self.repository_name) - self.assertEqual(targets_roleinfo['keyids'], timestamp_roleinfo['keyids']) - - - - def test_root_key_revocation(self): - # First verify that the Root role is properly signed. Calling - # refresh() should not raise an exception. - self.repository_updater.refresh() - - # There should only be one key for Root. Store the keyid to later verify - # that it has been revoked. - root_roleinfo = tuf.roledb.get_roleinfo('root', self.repository_name) - root_keyid = root_roleinfo['keyids'] - self.assertEqual(len(root_keyid), 1) - - # Remove 'root_keyid' and add a new key. Verify that the client detects - # the removal and addition of keys to the Root file. - repository = repo_tool.load_repository(self.repository_directory) - - repository.root.add_verification_key(self.role_keys['snapshot']['public']) - repository.root.add_verification_key(self.role_keys['targets']['public']) - repository.root.add_verification_key(self.role_keys['timestamp']['public']) - - # Root, Snapshot, and Timestamp must be rewritten. Root must be written - # because the timestamp key has changed; Snapshot, because Root has - # changed, and Timestamp because it must sign its metadata with a new key. - repository.root.load_signing_key(self.role_keys['snapshot']['private']) - repository.root.load_signing_key(self.role_keys['targets']['private']) - repository.root.load_signing_key(self.role_keys['timestamp']['private']) - - # Note: We added the Snapshot, Targets, and Timestampkeys to the Root role. - # The Root's expected private key has not been loaded yet, so that we can - # verify that refresh() correctly raises a - # securesystemslib.exceptions.BadSignatureError exception. - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - - # Root's version number = 2 after the following writeall(). - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Note well: The client should reject the new Root file because the - # repository has revoked the only Root key that the client trusts. - try: - self.repository_updater.refresh() - - except tuf.exceptions.NoWorkingMirrorError as exception: - for mirror_exception in exception.mirror_errors.values(): - self.assertTrue(isinstance(mirror_exception, - securesystemslib.exceptions.BadSignatureError)) - - repository.root.add_verification_key(self.role_keys['root']['public']) - repository.root.load_signing_key(self.role_keys['root']['private']) - - # root, snapshot, and timestamp should be dirty - repository.dirty_roles() - repository.write('root', increment_version_number=False) - repository.write('snapshot') - repository.write('timestamp') - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Root's version number = 2... - # The client successfully performs a refresh of top-level metadata to get - # the latest changes. - self.repository_updater.refresh() - self.assertEqual(self.repository_updater.metadata['current']['root']['version'], 2) - - # Revoke the snapshot and targets keys (added to root) so that multiple - # snapshots are created. Discontinue signing with the old root key now - # that the client has successfully updated (note: the old Root key - # was revoked, but the repository continued signing with it to allow - # the client to update). - repository.root.remove_verification_key(self.role_keys['root']['public']) - repository.root.unload_signing_key(self.role_keys['root']['private']) - repository.root.remove_verification_key(self.role_keys['snapshot']['public']) - repository.root.unload_signing_key(self.role_keys['snapshot']['private']) - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Root's version number = 3... - self.repository_updater.refresh() - - repository.root.remove_verification_key(self.role_keys['targets']['public']) - repository.root.unload_signing_key(self.role_keys['targets']['private']) - - # The following should fail because root rotation requires the new Root - # to be signed with the previous self.role_keys['targets'] key. - self.assertRaises(tuf.exceptions.UnsignedMetadataError, - repository.writeall) - - repository.root.load_signing_key(self.role_keys['targets']['private']) - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Root's version number = 4... - self.repository_updater.refresh() - self.assertEqual(self.repository_updater.metadata['current']['root']['version'], 4) - - # Verify that the client is able to recognize that a new set of keys have - # been added to the Root role. - # First, has 'root_keyid' been removed? - root_roleinfo = tuf.roledb.get_roleinfo('root', self.repository_name) - self.assertTrue(root_keyid not in root_roleinfo['keyids']) - - # Second, is Root's new key correct? The new key should be - # Timestamp's. - self.assertEqual(len(root_roleinfo['keyids']), 1) - timestamp_roleinfo = tuf.roledb.get_roleinfo('timestamp', self.repository_name) - self.assertEqual(root_roleinfo['keyids'], timestamp_roleinfo['keyids']) - - - -def _load_role_keys(keystore_directory): - - # Populating 'self.role_keys' by importing the required public and private - # keys of 'tuf/tests/repository_data/'. The role keys are needed when - # modifying the remote repository used by the test cases in this unit test. - # The pre-generated key files in 'repository_data/keystore' are all encrypted with - # a 'password' passphrase. - EXPECTED_KEYFILE_PASSWORD = 'password' - - # Store and return the cryptography keys of the top-level roles, including 1 - # delegated role. - role_keys = {} - - root_key_file = os.path.join(keystore_directory, 'root_key') - targets_key_file = os.path.join(keystore_directory, 'targets_key') - snapshot_key_file = os.path.join(keystore_directory, 'snapshot_key') - timestamp_key_file = os.path.join(keystore_directory, 'timestamp_key') - delegation_key_file = os.path.join(keystore_directory, 'delegation_key') - - role_keys = {'root': {}, 'targets': {}, 'snapshot': {}, 'timestamp': {}, - 'role1': {}} - - # Import the top-level and delegated role public keys. - role_keys['root']['public'] = \ - repo_tool.import_rsa_publickey_from_file(root_key_file+'.pub') - role_keys['targets']['public'] = \ - repo_tool.import_ed25519_publickey_from_file(targets_key_file + '.pub') - role_keys['snapshot']['public'] = \ - repo_tool.import_ed25519_publickey_from_file(snapshot_key_file + '.pub') - role_keys['timestamp']['public'] = \ - repo_tool.import_ed25519_publickey_from_file(timestamp_key_file + '.pub') - role_keys['role1']['public'] = \ - repo_tool.import_ed25519_publickey_from_file(delegation_key_file + '.pub') - - # Import the private keys of the top-level and delegated roles. - role_keys['root']['private'] = \ - repo_tool.import_rsa_privatekey_from_file(root_key_file, - EXPECTED_KEYFILE_PASSWORD) - role_keys['targets']['private'] = \ - repo_tool.import_ed25519_privatekey_from_file(targets_key_file, - EXPECTED_KEYFILE_PASSWORD) - role_keys['snapshot']['private'] = \ - repo_tool.import_ed25519_privatekey_from_file(snapshot_key_file, - EXPECTED_KEYFILE_PASSWORD) - role_keys['timestamp']['private'] = \ - repo_tool.import_ed25519_privatekey_from_file(timestamp_key_file, - EXPECTED_KEYFILE_PASSWORD) - role_keys['role1']['private'] = \ - repo_tool.import_ed25519_privatekey_from_file(delegation_key_file, - EXPECTED_KEYFILE_PASSWORD) - - return role_keys - - - -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_keydb_old.py b/tests/test_keydb_old.py deleted file mode 100755 index b76b5c0f39..0000000000 --- a/tests/test_keydb_old.py +++ /dev/null @@ -1,407 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_keydb_old.py - - - Vladimir Diaz - - - October 2012. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Unit test for 'keydb.py'. -""" - -import unittest -import logging -import sys - -import tuf -import tuf.formats -import securesystemslib.keys -import securesystemslib.settings -import tuf.keydb -import tuf.log - -from tests import utils - -logger = logging.getLogger(__name__) - - -# Generate the three keys to use in our test cases. -KEYS = [] -for junk in range(3): - rsa_key = securesystemslib.keys.generate_rsa_key(2048) - rsa_key['keyid_hash_algorithms'] = securesystemslib.settings.HASH_ALGORITHMS - KEYS.append(rsa_key) - - - -class TestKeydb(unittest.TestCase): - def setUp(self): - tuf.keydb.clear_keydb(clear_all=True) - - - - def tearDown(self): - tuf.keydb.clear_keydb(clear_all=True) - - - - def test_create_keydb(self): - # Test condition for normal behaviour. - repository_name = 'example_repository' - - # The keydb dictionary should contain only the 'default' repository entry. - self.assertTrue('default' in tuf.keydb._keydb_dict) - self.assertEqual(1, len(tuf.keydb._keydb_dict)) - - - tuf.keydb.create_keydb(repository_name) - self.assertEqual(2, len(tuf.keydb._keydb_dict)) - - # Verify that a keydb cannot be created for a name that already exists. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.keydb.create_keydb, repository_name) - - # Ensure that the key database for 'example_repository' is deleted so that - # the key database is returned to its original, default state. - tuf.keydb.remove_keydb(repository_name) - - - - def test_remove_keydb(self): - # Test condition for expected behaviour. - rsakey = KEYS[0] - keyid = KEYS[0]['keyid'] - - repository_name = 'example_repository' - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.keydb.remove_keydb, 'default') - - tuf.keydb.create_keydb(repository_name) - tuf.keydb.remove_keydb(repository_name) - - # tuf.keydb.remove_keydb() logs a warning if a keydb for a non-existent - # repository is specified. - tuf.keydb.remove_keydb(repository_name) - - # Test condition for improperly formatted argument, and unexpected argument. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.remove_keydb, 123) - self.assertRaises(TypeError, tuf.keydb.remove_keydb, rsakey, 123) - - - - def test_clear_keydb(self): - # Test condition ensuring 'clear_keydb()' clears the keydb database. - # Test the length of the keydb before and after adding a key. - self.assertEqual(0, len(tuf.keydb._keydb_dict['default'])) - rsakey = KEYS[0] - keyid = KEYS[0]['keyid'] - tuf.keydb._keydb_dict['default'][keyid] = rsakey - self.assertEqual(1, len(tuf.keydb._keydb_dict['default'])) - tuf.keydb.clear_keydb() - self.assertEqual(0, len(tuf.keydb._keydb_dict['default'])) - - # Test condition for unexpected argument. - self.assertRaises(TypeError, tuf.keydb.clear_keydb, 'default', False, 'unexpected_argument') - - # Test condition for improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.clear_keydb, 0) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.clear_keydb, 'default', 0) - - # Test condition for non-existent repository name. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.keydb.clear_keydb, 'non-existent') - - # Test condition for keys added to a non-default key database. Unlike the - # test conditions above, this test makes use of the public functions - # add_key(), create_keydb(), and get_key() to more easily verify - # clear_keydb()'s behaviour. - rsakey = KEYS[0] - keyid = KEYS[0]['keyid'] - repository_name = 'example_repository' - tuf.keydb.create_keydb(repository_name) - self.assertRaises(tuf.exceptions.UnknownKeyError, tuf.keydb.get_key, keyid, repository_name) - tuf.keydb.add_key(rsakey, keyid, repository_name) - self.assertEqual(rsakey, tuf.keydb.get_key(keyid, repository_name)) - - tuf.keydb.clear_keydb(repository_name) - self.assertRaises(tuf.exceptions.UnknownKeyError, tuf.keydb.get_key, keyid, repository_name) - - # Remove 'repository_name' from the key database to revert it back to its - # original, default state (i.e., only the 'default' repository exists). - tuf.keydb.remove_keydb(repository_name) - - - - def test_get_key(self): - # Test conditions using valid 'keyid' arguments. - rsakey = KEYS[0] - keyid = KEYS[0]['keyid'] - tuf.keydb._keydb_dict['default'][keyid] = rsakey - rsakey2 = KEYS[1] - keyid2 = KEYS[1]['keyid'] - tuf.keydb._keydb_dict['default'][keyid2] = rsakey2 - - self.assertEqual(rsakey, tuf.keydb.get_key(keyid)) - self.assertEqual(rsakey2, tuf.keydb.get_key(keyid2)) - self.assertNotEqual(rsakey2, tuf.keydb.get_key(keyid)) - self.assertNotEqual(rsakey, tuf.keydb.get_key(keyid2)) - - # Test conditions using invalid arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.get_key, None) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.get_key, 123) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.get_key, ['123']) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.get_key, {'keyid': '123'}) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.get_key, '') - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.get_key, keyid, 123) - - # Test condition using a 'keyid' that has not been added yet. - keyid3 = KEYS[2]['keyid'] - self.assertRaises(tuf.exceptions.UnknownKeyError, tuf.keydb.get_key, keyid3) - - # Test condition for a key added to a non-default repository. - repository_name = 'example_repository' - rsakey3 = KEYS[2] - tuf.keydb.create_keydb(repository_name) - tuf.keydb.add_key(rsakey3, keyid3, repository_name) - - # Test condition for a key added to a non-existent repository. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.keydb.get_key, - keyid, 'non-existent') - - # Verify that 'rsakey3' is added to the expected repository name. - # If not supplied, the 'default' repository name is searched. - self.assertRaises(tuf.exceptions.UnknownKeyError, tuf.keydb.get_key, keyid3) - self.assertEqual(rsakey3, tuf.keydb.get_key(keyid3, repository_name)) - - # Remove the 'example_repository' so that other test functions have access - # to a default state of the keydb. - tuf.keydb.remove_keydb(repository_name) - - - - def test_add_key(self): - # Test conditions using valid 'keyid' arguments. - rsakey = KEYS[0] - keyid = KEYS[0]['keyid'] - rsakey2 = KEYS[1] - keyid2 = KEYS[1]['keyid'] - rsakey3 = KEYS[2] - keyid3 = KEYS[2]['keyid'] - self.assertEqual(None, tuf.keydb.add_key(rsakey, keyid)) - self.assertEqual(None, tuf.keydb.add_key(rsakey2, keyid2)) - self.assertEqual(None, tuf.keydb.add_key(rsakey3)) - - self.assertEqual(rsakey, tuf.keydb.get_key(keyid)) - self.assertEqual(rsakey2, tuf.keydb.get_key(keyid2)) - self.assertEqual(rsakey3, tuf.keydb.get_key(keyid3)) - - # Test conditions using arguments with invalid formats. - tuf.keydb.clear_keydb() - rsakey3['keytype'] = 'bad_keytype' - - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.add_key, None, keyid) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.add_key, '', keyid) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.add_key, ['123'], keyid) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.add_key, {'a': 'b'}, keyid) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.add_key, rsakey, {'keyid': ''}) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.add_key, rsakey, 123) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.add_key, rsakey, False) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.add_key, rsakey, ['keyid']) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.add_key, rsakey3, keyid3) - rsakey3['keytype'] = 'rsa' - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.add_key, rsakey3, keyid3, 123) - - # Test conditions where keyid does not match the rsakey. - self.assertRaises(securesystemslib.exceptions.Error, tuf.keydb.add_key, rsakey, keyid2) - self.assertRaises(securesystemslib.exceptions.Error, tuf.keydb.add_key, rsakey2, keyid) - - # Test conditions using keyids that have already been added. - tuf.keydb.add_key(rsakey, keyid) - tuf.keydb.add_key(rsakey2, keyid2) - self.assertRaises(tuf.exceptions.KeyAlreadyExistsError, tuf.keydb.add_key, rsakey) - self.assertRaises(tuf.exceptions.KeyAlreadyExistsError, tuf.keydb.add_key, rsakey2) - - # Test condition for key added to the keydb of a non-default repository. - repository_name = 'example_repository' - tuf.keydb.create_keydb(repository_name) - self.assertRaises(tuf.exceptions.UnknownKeyError, tuf.keydb.get_key, keyid3, repository_name) - tuf.keydb.add_key(rsakey3, keyid3, repository_name) - self.assertRaises(tuf.exceptions.UnknownKeyError, tuf.keydb.get_key, keyid3) - self.assertEqual(rsakey3, tuf.keydb.get_key(keyid3, repository_name)) - - # Test condition for key added to the keydb of a non-existent repository. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.keydb.add_key, - rsakey3, keyid3, 'non-existent') - - # Reset the keydb to its original, default state. Other test functions - # expect only the 'default' repository to exist. - tuf.keydb.remove_keydb(repository_name) - - - - def test_remove_key(self): - # Test conditions using valid keyids. - rsakey = KEYS[0] - keyid = KEYS[0]['keyid'] - rsakey2 = KEYS[1] - keyid2 = KEYS[1]['keyid'] - rsakey3 = KEYS[2] - keyid3 = KEYS[2]['keyid'] - tuf.keydb.add_key(rsakey, keyid) - tuf.keydb.add_key(rsakey2, keyid2) - tuf.keydb.add_key(rsakey3, keyid3) - - self.assertEqual(None, tuf.keydb.remove_key(keyid)) - self.assertEqual(None, tuf.keydb.remove_key(keyid2)) - - # Ensure the keys were actually removed. - self.assertRaises(tuf.exceptions.UnknownKeyError, tuf.keydb.get_key, keyid) - self.assertRaises(tuf.exceptions.UnknownKeyError, tuf.keydb.get_key, keyid2) - - # Test for 'keyid' not in keydb. - self.assertRaises(tuf.exceptions.UnknownKeyError, tuf.keydb.remove_key, keyid) - - # Test condition for unknown key argument. - self.assertRaises(tuf.exceptions.UnknownKeyError, tuf.keydb.remove_key, '1') - - # Test condition for removal of keys from a non-default repository. - repository_name = 'example_repository' - tuf.keydb.create_keydb(repository_name) - tuf.keydb.add_key(rsakey, keyid, repository_name) - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.keydb.remove_key, keyid, 'non-existent') - tuf.keydb.remove_key(keyid, repository_name) - self.assertRaises(tuf.exceptions.UnknownKeyError, tuf.keydb.remove_key, keyid, repository_name) - - # Reset the keydb so that subsequent tests have access to the original, - # default keydb. - tuf.keydb.remove_keydb(repository_name) - - # Test conditions for arguments with invalid formats. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.remove_key, None) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.remove_key, '') - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.remove_key, 123) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.remove_key, ['123']) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.remove_key, keyid, 123) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.keydb.remove_key, {'bad': '123'}) - self.assertRaises(securesystemslib.exceptions.Error, tuf.keydb.remove_key, rsakey3) - - - - def test_create_keydb_from_root_metadata(self): - # Test condition using a valid 'root_metadata' argument. - rsakey = KEYS[0] - keyid = KEYS[0]['keyid'] - rsakey2 = KEYS[1] - keyid2 = KEYS[1]['keyid'] - - keydict = {keyid: rsakey, keyid2: rsakey2} - - roledict = {'Root': {'keyids': [keyid], 'threshold': 1}, - 'Targets': {'keyids': [keyid2, keyid], 'threshold': 1}} - version = 8 - consistent_snapshot = False - expires = '1985-10-21T01:21:00Z' - - root_metadata = tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROOT_SCHEMA, - _type='root', - spec_version='1.0.0', - version=version, - expires=expires, - keys=keydict, - roles=roledict, - consistent_snapshot=consistent_snapshot) - - self.assertEqual(None, tuf.keydb.create_keydb_from_root_metadata(root_metadata)) - tuf.keydb.create_keydb_from_root_metadata(root_metadata) - - # Ensure 'keyid' and 'keyid2' were added to the keydb database. - self.assertEqual(rsakey, tuf.keydb.get_key(keyid)) - self.assertEqual(rsakey2, tuf.keydb.get_key(keyid2)) - - # Verify that the keydb is populated for a non-default repository. - repository_name = 'example_repository' - tuf.keydb.create_keydb_from_root_metadata(root_metadata, repository_name) - - # Test conditions for arguments with invalid formats. - self.assertRaises(securesystemslib.exceptions.FormatError, - tuf.keydb.create_keydb_from_root_metadata, None) - self.assertRaises(securesystemslib.exceptions.FormatError, - tuf.keydb.create_keydb_from_root_metadata, '') - self.assertRaises(securesystemslib.exceptions.FormatError, - tuf.keydb.create_keydb_from_root_metadata, 123) - self.assertRaises(securesystemslib.exceptions.FormatError, - tuf.keydb.create_keydb_from_root_metadata, ['123']) - self.assertRaises(securesystemslib.exceptions.FormatError, - tuf.keydb.create_keydb_from_root_metadata, {'bad': '123'}) - self.assertRaises(securesystemslib.exceptions.FormatError, - tuf.keydb.create_keydb_from_root_metadata, root_metadata, 123) - - # Verify that a keydb cannot be created for a non-existent repository name. - tuf.keydb.create_keydb_from_root_metadata(root_metadata, 'non-existent') - - # Remove the 'non-existent' and 'example_repository' key database so that - # subsequent test functions have access to a default keydb. - tuf.keydb.remove_keydb(repository_name) - tuf.keydb.remove_keydb('non-existent') - - - # Test conditions for correctly formatted 'root_metadata' arguments but - # containing incorrect keyids or key types. In these conditions, the keys - # should not be added to the keydb database and a warning should be logged. - tuf.keydb.clear_keydb() - - # 'keyid' does not match 'rsakey2'. - # In this case, the key will be added to the keydb - keydict[keyid] = rsakey2 - - # Key with invalid keytype. - rsakey3 = KEYS[2] - keyid3 = KEYS[2]['keyid'] - rsakey3['keytype'] = 'bad_keytype' - keydict[keyid3] = rsakey3 - - version = 8 - expires = '1985-10-21T01:21:00Z' - - root_metadata = tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROOT_SCHEMA, - _type='root', - spec_version='1.0.0', - version=version, - expires=expires, - keys=keydict, - roles=roledict, - consistent_snapshot=consistent_snapshot) - - self.assertEqual(None, tuf.keydb.create_keydb_from_root_metadata(root_metadata)) - - # Ensure only 'keyid2' and 'keyid' were added to the keydb database. - # 'keyid3' should not be stored. - self.maxDiff = None - self.assertEqual(rsakey2, tuf.keydb.get_key(keyid2)) - - test_key = rsakey2 - test_key['keyid'] = keyid - self.assertEqual(test_key, tuf.keydb.get_key(keyid)) - - self.assertRaises(tuf.exceptions.UnknownKeyError, tuf.keydb.get_key, keyid3) - - # reset values - rsakey3['keytype'] = 'rsa' - rsakey2['keyid'] = keyid2 - - - -# Run unit test. -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_log_old.py b/tests/test_log_old.py deleted file mode 100755 index a92661b305..0000000000 --- a/tests/test_log_old.py +++ /dev/null @@ -1,210 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_log_old.py - - - Vladimir Diaz - - - May 1, 2014. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Unit test for 'log.py'. -""" - -import logging -import unittest -import os -import shutil -import sys -import importlib - -import tuf -import tuf.log -import tuf.settings - -import securesystemslib -import securesystemslib.util - -from tests import utils - - -# We explicitly create a logger which is a child of the tuf hierarchy, -# instead of using the standard getLogger(__name__) pattern, because the -# tests are not part of the tuf hierarchy and we are testing functionality -# of the tuf package explicitly enabled on the tuf hierarchy -logger = logging.getLogger('tuf.test_log') - -log_levels = [logging.CRITICAL, logging.ERROR, logging.WARNING, - logging.INFO, logging.DEBUG] - - -class TestLog(unittest.TestCase): - - def setUp(self): - # store the current log level so it can be restored after the test - self._initial_level = logging.getLogger('tuf').level - - def tearDown(self): - tuf.log.remove_console_handler() - tuf.log.disable_file_logging() - logging.getLogger('tuf').level = self._initial_level - - - - - def test_set_log_level(self): - # Test normal case. - global log_levels - global logger - - tuf.log.set_log_level() - self.assertTrue(logger.isEnabledFor(logging.DEBUG)) - - for level in log_levels: - tuf.log.set_log_level(level) - self.assertTrue(logger.isEnabledFor(level)) - - # Test for improperly formatted argument. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.log.set_log_level, '123') - - # Test for invalid argument. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.log.set_log_level, 51) - - - - def test_set_filehandler_log_level(self): - # Normal case. Default log level. - # A file handler is not set by default. Add one now before attempting to - # set the log level. - self.assertRaises(tuf.exceptions.Error, tuf.log.set_filehandler_log_level) - tuf.log.enable_file_logging() - tuf.log.set_filehandler_log_level() - - # Expected log levels. - for level in log_levels: - tuf.log.set_log_level(level) - - # Test that the log level of the file handler cannot be set because - # file logging is disabled (via tuf.settings.ENABLE_FILE_LOGGING). - tuf.settings.ENABLE_FILE_LOGGING = False - importlib.reload(tuf.log) - - # Test for improperly formatted argument. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.log.set_filehandler_log_level, '123') - - # Test for invalid argument. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.log.set_filehandler_log_level, 51) - - - def test_set_console_log_level(self): - # Test setting a console log level without first adding one. - self.assertRaises(securesystemslib.exceptions.Error, tuf.log.set_console_log_level) - - # Normal case. Default log level. Setting the console log level first - # requires adding a console logger. - tuf.log.add_console_handler() - tuf.log.set_console_log_level() - - # Expected log levels. - for level in log_levels: - tuf.log.set_console_log_level(level) - - # Test for improperly formatted argument. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.log.set_console_log_level, '123') - - # Test for invalid argument. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.log.set_console_log_level, 51) - - - - - - def test_add_console_handler(self): - # Normal case. Default log level. - tuf.log.add_console_handler() - - # Adding a console handler when one has already been added. - tuf.log.add_console_handler() - - # Expected log levels. - for level in log_levels: - tuf.log.set_console_log_level(level) - - # Test for improperly formatted argument. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.log.add_console_handler, '123') - - # Test for invalid argument. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.log.add_console_handler, 51) - - # Test that an exception is printed to the console. Note: A stack trace - # is not included in the exception output because 'log.py' applies a filter - # to minimize the amount of output to the console. - try: - raise TypeError('Test exception output in the console.') - - except TypeError as e: - logger.exception(e) - - - def test_remove_console_handler(self): - # Normal case. - tuf.log.remove_console_handler() - - # Removing a console handler that has not been added. Logs a warning. - tuf.log.remove_console_handler() - - - def test_enable_file_logging(self): - # Normal case. - if os.path.exists(tuf.settings.LOG_FILENAME): - shutil.move( - tuf.settings.LOG_FILENAME, tuf.settings.LOG_FILENAME + '.backup') - - tuf.log.enable_file_logging() - self.assertTrue(os.path.exists(tuf.settings.LOG_FILENAME)) - if os.path.exists(tuf.settings.LOG_FILENAME + '.backup'): - shutil.move( - tuf.settings.LOG_FILENAME + '.backup', tuf.settings.LOG_FILENAME) - - # The file logger must first be unset before attempting to re-add it. - self.assertRaises(tuf.exceptions.Error, tuf.log.enable_file_logging) - - tuf.log.disable_file_logging() - tuf.log.enable_file_logging('my_log_file.log') - logger.debug('testing file logging') - self.assertTrue(os.path.exists('my_log_file.log')) - - # Test for an improperly formatted argument. - tuf.log.disable_file_logging() - self.assertRaises(securesystemslib.exceptions.FormatError, - tuf.log.enable_file_logging, 1) - - - def test_disable_file_logging(self): - # Normal case. - tuf.log.enable_file_logging('my.log') - logger.debug('debug message') - junk, hashes = securesystemslib.util.get_file_details('my.log') - tuf.log.disable_file_logging() - logger.debug('new debug message') - junk, hashes2 = securesystemslib.util.get_file_details('my.log') - self.assertEqual(hashes, hashes2) - - # An exception should not be raised if an attempt is made to disable - # the file logger if it has already been disabled. - tuf.log.disable_file_logging() - - -# Run unit test. -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_mirrors_old.py b/tests/test_mirrors_old.py deleted file mode 100755 index 0d530154c6..0000000000 --- a/tests/test_mirrors_old.py +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_mirrors_old.py - - - Konstantin Andrianov. - - - March 26, 2012. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Unit test for 'mirrors.py'. -""" - -import unittest -import sys - -import tuf.mirrors as mirrors -import tuf.unittest_toolbox as unittest_toolbox - -from tests import utils - -import securesystemslib -import securesystemslib.util - - -class TestMirrors(unittest_toolbox.Modified_TestCase): - - def setUp(self): - - unittest_toolbox.Modified_TestCase.setUp(self) - - self.mirrors = \ - {'mirror1': {'url_prefix' : 'http://mirror1.com', - 'metadata_path' : 'metadata', - 'targets_path' : 'targets'}, - 'mirror2': {'url_prefix' : 'http://mirror2.com', - 'metadata_path' : 'metadata', - 'targets_path' : 'targets', - 'confined_target_dirs' : ['targets/release/', - 'targets/release/']}, - 'mirror3': {'url_prefix' : 'http://mirror3.com', - 'targets_path' : 'targets', - 'confined_target_dirs' : ['targets/release/v2/']}, - # confined_target_dirs = [] means that none of the targets on - # that mirror is available. - 'mirror4': {'url_prefix' : 'http://mirror4.com', - 'metadata_path' : 'metadata', - 'confined_target_dirs' : []}, - # Make sure we are testing when confined_target_dirs is [''] which means - # that all targets are available on that mirror. - 'mirror5': {'url_prefix' : 'http://mirror5.com', - 'targets_path' : 'targets', - 'confined_target_dirs' : ['']} - } - - - - def test_get_list_of_mirrors(self): - # Test: Normal case. - - # 1 match: a mirror without target directory confinement - mirror_list = mirrors.get_list_of_mirrors('target', 'a.txt', self.mirrors) - self.assertEqual(len(mirror_list), 2) - self.assertTrue(self.mirrors['mirror1']['url_prefix']+'/targets/a.txt' in \ - mirror_list) - self.assertTrue(self.mirrors['mirror5']['url_prefix']+'/targets/a.txt' in \ - mirror_list) - - mirror_list = mirrors.get_list_of_mirrors('target', 'a/b', self.mirrors) - self.assertEqual(len(mirror_list), 2) - self.assertTrue(self.mirrors['mirror1']['url_prefix']+'/targets/a/b' in \ - mirror_list) - self.assertTrue(self.mirrors['mirror5']['url_prefix']+'/targets/a/b' in \ - mirror_list) - - # 2 matches: One with non-confined targets and one with matching confinement - mirror_list = mirrors.get_list_of_mirrors('target', 'release/v2/c', self.mirrors) - self.assertEqual(len(mirror_list), 3) - self.assertTrue(self.mirrors['mirror1']['url_prefix']+'/targets/release/v2/c' in \ - mirror_list) - self.assertTrue(self.mirrors['mirror3']['url_prefix']+'/targets/release/v2/c' in \ - mirror_list) - self.assertTrue(self.mirrors['mirror5']['url_prefix']+'/targets/release/v2/c' in \ - mirror_list) - - # 3 matches: Metadata found on 3 mirrors - mirror_list = mirrors.get_list_of_mirrors('meta', 'release.txt', self.mirrors) - self.assertEqual(len(mirror_list), 3) - self.assertTrue(self.mirrors['mirror1']['url_prefix']+'/metadata/release.txt' in \ - mirror_list) - self.assertTrue(self.mirrors['mirror2']['url_prefix']+'/metadata/release.txt' in \ - mirror_list) - self.assertTrue(self.mirrors['mirror4']['url_prefix']+'/metadata/release.txt' in \ - mirror_list) - - # No matches - del self.mirrors['mirror1'] - del self.mirrors['mirror5'] - mirror_list = mirrors.get_list_of_mirrors('target', 'a/b', self.mirrors) - self.assertFalse(mirror_list) - - - # Test: Invalid 'file_type'. - self.assertRaises(securesystemslib.exceptions.Error, mirrors.get_list_of_mirrors, - self.random_string(), 'a', self.mirrors) - - self.assertRaises(securesystemslib.exceptions.Error, mirrors.get_list_of_mirrors, - 12345, 'a', self.mirrors) - - # Test: Improperly formatted 'file_path'. - self.assertRaises(securesystemslib.exceptions.FormatError, mirrors.get_list_of_mirrors, - 'meta', 12345, self.mirrors) - - # Test: Improperly formatted 'mirrors_dict' object. - self.assertRaises(securesystemslib.exceptions.FormatError, mirrors.get_list_of_mirrors, - 'meta', 'a', 12345) - - self.assertRaises(securesystemslib.exceptions.FormatError, mirrors.get_list_of_mirrors, - 'meta', 'a', ['a']) - - self.assertRaises(securesystemslib.exceptions.FormatError, mirrors.get_list_of_mirrors, - 'meta', 'a', {'a':'b'}) - - - -# Run the unittests -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_mix_and_match_attack_old.py b/tests/test_mix_and_match_attack_old.py deleted file mode 100755 index cc033c291e..0000000000 --- a/tests/test_mix_and_match_attack_old.py +++ /dev/null @@ -1,236 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_mix_and_match_attack_old.py - - - Konstantin Andrianov. - - - March 27, 2012. - - April 6, 2014. - Refactored to use the 'unittest' module (test conditions in code, rather - than verifying text output), use pre-generated repository files, and - discontinue use of the old repository tools. Modify the previous scenario - simulated for the mix-and-match attack. -vladimir.v.diaz - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Simulate a mix-and-match attack. In a mix-and-match attack, an attacker is - able to trick clients into using a combination of metadata that never existed - together on the repository at the same time. - - Note: There is no difference between 'updates' and 'target' files. -""" - -import os -import tempfile -import shutil -import logging -import unittest -import sys - -import tuf.exceptions -import tuf.log -import tuf.client.updater as updater -import tuf.repository_tool as repo_tool -import tuf.unittest_toolbox as unittest_toolbox -import tuf.roledb -import tuf.keydb - -from tests import utils - - -# The repository tool is imported and logs console messages by default. -# Disable console log messages generated by this unit test. -repo_tool.disable_console_log_messages() - -logger = logging.getLogger(__name__) - - - -class TestMixAndMatchAttack(unittest_toolbox.Modified_TestCase): - - @classmethod - def setUpClass(cls): - # Create a temporary directory to store the repository, metadata, and - # target files. 'temporary_directory' must be deleted in TearDownModule() - # so that temporary files are always removed, even when exceptions occur. - cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) - - # Launch a SimpleHTTPServer (serves files in the current directory). - # Test cases will request metadata and target files that have been - # pre-generated in 'tuf/tests/repository_data', which will be served by the - # SimpleHTTPServer launched here. The test cases of this unit test assume - # the pre-generated metadata files have a specific structure, such - # as a delegated role 'targets/role1', three target files, five key files, - # etc. - cls.server_process_handler = utils.TestServerProcess(log=logger) - - - - @classmethod - def tearDownClass(cls): - # Cleans the resources and flush the logged lines (if any). - cls.server_process_handler.clean() - - # Remove the temporary repository directory, which should contain all the - # metadata, targets, and key files generated of all the test cases. - shutil.rmtree(cls.temporary_directory) - - - - - def setUp(self): - # We are inheriting from custom class. - unittest_toolbox.Modified_TestCase.setUp(self) - - self.repository_name = 'test_repository1' - - # Copy the original repository files provided in the test folder so that - # any modifications made to repository files are restricted to the copies. - # The 'repository_data' directory is expected to exist in 'tuf/tests/'. - original_repository_files = os.path.join(os.getcwd(), 'repository_data') - temporary_repository_root = \ - self.make_temp_directory(directory=self.temporary_directory) - - # The original repository, keystore, and client directories will be copied - # for each test case. - original_repository = os.path.join(original_repository_files, 'repository') - original_client = os.path.join(original_repository_files, 'client') - original_keystore = os.path.join(original_repository_files, 'keystore') - - # Save references to the often-needed client repository directories. - # Test cases need these references to access metadata and target files. - self.repository_directory = \ - os.path.join(temporary_repository_root, 'repository') - self.client_directory = os.path.join(temporary_repository_root, 'client') - self.keystore_directory = os.path.join(temporary_repository_root, 'keystore') - - # Copy the original 'repository', 'client', and 'keystore' directories - # to the temporary repository the test cases can use. - shutil.copytree(original_repository, self.repository_directory) - shutil.copytree(original_client, self.client_directory) - shutil.copytree(original_keystore, self.keystore_directory) - - # Set the url prefix required by the 'tuf/client/updater.py' updater. - # 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'. - repository_basepath = self.repository_directory[len(os.getcwd()):] - url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + repository_basepath - - # Setting 'tuf.settings.repository_directory' with the temporary client - # directory copied from the original repository files. - tuf.settings.repositories_directory = self.client_directory - self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, - 'metadata_path': 'metadata', - 'targets_path': 'targets'}} - - # Create the repository instance. The test cases will use this client - # updater to refresh metadata, fetch target files, etc. - self.repository_updater = updater.Updater(self.repository_name, - self.repository_mirrors) - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - # Logs stdout and stderr from the sever subprocess. - self.server_process_handler.flush_log() - - # Remove temporary directory - unittest_toolbox.Modified_TestCase.tearDown(self) - - - def test_with_tuf(self): - # Scenario: - # An attacker tries to trick the client into installing files indicated by - # a previous release of its corresponding metatadata. The outdated metadata - # is properly named and was previously valid, but is no longer current - # according to the latest 'snapshot.json' role. Generate a new snapshot of - # the repository after modifying a target file of 'role1.json'. - # Backup 'role1.json' (the delegated role to be updated, and then inserted - # again for the mix-and-match attack.) - role1_path = os.path.join(self.repository_directory, 'metadata', 'role1.json') - backup_role1 = os.path.join(self.repository_directory, 'role1.json.backup') - shutil.copy(role1_path, backup_role1) - - # Backup 'file3.txt', specified by 'role1.json'. - file3_path = os.path.join(self.repository_directory, 'targets', 'file3.txt') - shutil.copy(file3_path, file3_path + '.backup') - - # Re-generate the required metadata on the remote repository. The affected - # metadata must be properly updated and signed with 'repository_tool.py', - # otherwise the client will reject them as invalid metadata. The resulting - # metadata should be valid metadata. - repository = repo_tool.load_repository(self.repository_directory) - - # Load the signing keys so that newly generated metadata is properly signed. - timestamp_keyfile = os.path.join(self.keystore_directory, 'timestamp_key') - role1_keyfile = os.path.join(self.keystore_directory, 'delegation_key') - snapshot_keyfile = os.path.join(self.keystore_directory, 'snapshot_key') - timestamp_private = \ - repo_tool.import_ed25519_privatekey_from_file(timestamp_keyfile, 'password') - role1_private = \ - repo_tool.import_ed25519_privatekey_from_file(role1_keyfile, 'password') - snapshot_private = \ - repo_tool.import_ed25519_privatekey_from_file(snapshot_keyfile, 'password') - - repository.targets('role1').load_signing_key(role1_private) - repository.snapshot.load_signing_key(snapshot_private) - repository.timestamp.load_signing_key(timestamp_private) - - # Modify a 'role1.json' target file, and add it to its metadata so that a - # new version is generated. - with open(file3_path, 'wt') as file_object: - file_object.write('This is role2\'s target file.') - repository.targets('role1').add_target(os.path.basename(file3_path)) - - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Insert the previously valid 'role1.json'. The TUF client should reject it. - shutil.move(backup_role1, role1_path) - - # Verify that the TUF client detects unexpected metadata (previously valid, - # but not up-to-date with the latest snapshot of the repository) and - # refuses to continue the update process. Refresh top-level metadata so - # that the client is aware of the latest snapshot of the repository. - self.repository_updater.refresh() - - try: - with utils.ignore_deprecation_warnings('tuf.client.updater'): - self.repository_updater.targets_of_role('role1') - - # Verify that the specific - # 'tuf.exceptions.BadVersionNumberError' exception is raised by - # each mirror. - except tuf.exceptions.NoWorkingMirrorError as exception: - for mirror_url, mirror_error in exception.mirror_errors.items(): - url_prefix = self.repository_mirrors['mirror1']['url_prefix'] - url_file = os.path.join(url_prefix, 'metadata', 'role1.json') - - # Verify that 'role1.json' is the culprit. - self.assertEqual(url_file.replace('\\', '/'), mirror_url) - self.assertTrue(isinstance( - mirror_error, tuf.exceptions.BadVersionNumberError)) - - else: - self.fail('TUF did not prevent a mix-and-match attack.') - - -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_multiple_repositories_integration_old.py b/tests/test_multiple_repositories_integration_old.py deleted file mode 100755 index 6387764894..0000000000 --- a/tests/test_multiple_repositories_integration_old.py +++ /dev/null @@ -1,268 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_multiple_repositories_integration_old.py - - - Vladimir Diaz - - - February 2, 2017 - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Verify that clients and the repository tools are able to keep track of - multiple repositories and separate sets of metadata for each. -""" - -import os -import tempfile -import logging -import shutil -import unittest -import json -import sys - -import tuf -import tuf.log -import tuf.roledb -import tuf.client.updater as updater -import tuf.settings -import tuf.unittest_toolbox as unittest_toolbox -import tuf.repository_tool as repo_tool - -from tests import utils - -import securesystemslib - -logger = logging.getLogger(__name__) - -repo_tool.disable_console_log_messages() - - -class TestMultipleRepositoriesIntegration(unittest_toolbox.Modified_TestCase): - - def setUp(self): - # Modified_Testcase can handle temp dir removal - unittest_toolbox.Modified_TestCase.setUp(self) - self.temporary_directory = self.make_temp_directory(directory=os.getcwd()) - - # Copy the original repository files provided in the test folder so that - # any modifications made to repository files are restricted to the copies. - # The 'repository_data' directory is expected to exist in 'tuf/tests/'. - original_repository_files = os.path.join(os.getcwd(), 'repository_data') - - self.temporary_repository_root = tempfile.mkdtemp(dir=self.temporary_directory) - - # The original repository, keystore, and client directories will be copied - # for each test case. - original_repository = os.path.join(original_repository_files, 'repository') - original_client = os.path.join(original_repository_files, 'client', 'test_repository1') - original_keystore = os.path.join(original_repository_files, 'keystore') - original_map_file = os.path.join(original_repository_files, 'map.json') - - # Save references to the often-needed client repository directories. - # Test cases need these references to access metadata and target files. - self.repository_directory = os.path.join(self.temporary_repository_root, - 'repository_server1') - self.repository_directory2 = os.path.join(self.temporary_repository_root, - 'repository_server2') - - # Setting 'tuf.settings.repositories_directory' with the temporary client - # directory copied from the original repository files. - tuf.settings.repositories_directory = self.temporary_repository_root - - self.repository_name = 'test_repository1' - self.repository_name2 = 'test_repository2' - - self.client_directory = os.path.join(self.temporary_repository_root, - self.repository_name) - self.client_directory2 = os.path.join(self.temporary_repository_root, - self.repository_name2) - - self.keystore_directory = os.path.join(self.temporary_repository_root, 'keystore') - self.map_file = os.path.join(self.client_directory, 'map.json') - self.map_file2 = os.path.join(self.client_directory2, 'map.json') - - # Copy the original 'repository', 'client', and 'keystore' directories - # to the temporary repository the test cases can use. - shutil.copytree(original_repository, self.repository_directory) - shutil.copytree(original_repository, self.repository_directory2) - shutil.copytree(original_client, self.client_directory) - shutil.copytree(original_client, self.client_directory2) - shutil.copyfile(original_map_file, self.map_file) - shutil.copyfile(original_map_file, self.map_file2) - shutil.copytree(original_keystore, self.keystore_directory) - - # Launch a SimpleHTTPServer (serves files in the current directory). - # Test cases will request metadata and target files that have been - # pre-generated in 'tuf/tests/repository_data', which will be served by the - # SimpleHTTPServer launched here. The test cases of this unit test assume - # the pre-generated metadata files have a specific structure, such - # as a delegated role 'targets/role1', three target files, five key files, - # etc. - - # Needed because in some tests simple_server.py cannot be found. - # The reason is that the current working directory - # has been changed when executing a subprocess. - SIMPLE_SERVER_PATH = os.path.join(os.getcwd(), 'simple_server.py') - - # Creates a subprocess running a server. - self.server_process_handler = utils.TestServerProcess(log=logger, - server=SIMPLE_SERVER_PATH, popen_cwd=self.repository_directory) - - logger.debug('Server process started.') - - # Creates a subprocess running a server. - self.server_process_handler2 = utils.TestServerProcess(log=logger, - server=SIMPLE_SERVER_PATH, popen_cwd=self.repository_directory2) - - logger.debug('Server process 2 started.') - - url_prefix = \ - 'http://' + utils.TEST_HOST_ADDRESS + ':' + \ - str(self.server_process_handler.port) - url_prefix2 = \ - 'http://' + utils.TEST_HOST_ADDRESS + ':' + \ - str(self.server_process_handler2.port) - - self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, - 'metadata_path': 'metadata', - 'targets_path': 'targets'}} - - self.repository_mirrors2 = {'mirror1': {'url_prefix': url_prefix2, - 'metadata_path': 'metadata', - 'targets_path': 'targets'}} - - # Create the repository instances. The test cases will use these client - # updaters to refresh metadata, fetch target files, etc. - self.repository_updater = updater.Updater(self.repository_name, - self.repository_mirrors) - self.repository_updater2 = updater.Updater(self.repository_name2, - self.repository_mirrors2) - - - def tearDown(self): - # Cleans the resources and flush the logged lines (if any). - self.server_process_handler.clean() - self.server_process_handler2.clean() - - # updater.Updater() populates the roledb with the name "test_repository1" - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - # Remove top-level temporary directory - unittest_toolbox.Modified_TestCase.tearDown(self) - - - def test_update(self): - self.assertEqual('test_repository1', str(self.repository_updater)) - self.assertEqual('test_repository2', str(self.repository_updater2)) - - self.assertEqual(sorted(['role1', 'root', 'snapshot', 'targets', 'timestamp']), - sorted(tuf.roledb.get_rolenames('test_repository1'))) - - self.assertEqual(sorted(['role1', 'root', 'snapshot', 'targets', 'timestamp']), - sorted(tuf.roledb.get_rolenames('test_repository2'))) - - # Note: refresh() resets the known metadata and updates the latest - # top-level metadata. - self.repository_updater.refresh() - - self.assertEqual(sorted(['root', 'snapshot', 'targets', 'timestamp']), - sorted(tuf.roledb.get_rolenames('test_repository1'))) - - # test_repository2 wasn't refreshed and should still know about delegated - # roles. - self.assertEqual(sorted(['root', 'role1', 'snapshot', 'targets', 'timestamp']), - sorted(tuf.roledb.get_rolenames('test_repository2'))) - - # 'role1.json' should be downloaded, because it provides info for the - # requested 'file3.txt'. - valid_targetinfo = self.repository_updater.get_one_valid_targetinfo('file3.txt') - - self.assertEqual(sorted(['role2', 'role1', 'root', 'snapshot', 'targets', 'timestamp']), - sorted(tuf.roledb.get_rolenames('test_repository1'))) - - - - - def test_repository_tool(self): - - self.assertEqual(self.repository_name, str(self.repository_updater)) - self.assertEqual(self.repository_name2, str(self.repository_updater2)) - - repository = repo_tool.load_repository(self.repository_directory, - self.repository_name) - repository2 = repo_tool.load_repository(self.repository_directory2, - self.repository_name2) - - repository.timestamp.version = 88 - self.assertEqual(['timestamp'], tuf.roledb.get_dirty_roles( - self.repository_name)) - self.assertEqual([], tuf.roledb.get_dirty_roles(self.repository_name2)) - - repository2.timestamp.version = 100 - self.assertEqual(['timestamp'], tuf.roledb.get_dirty_roles( - self.repository_name2)) - - key_file = os.path.join(self.keystore_directory, 'timestamp_key') - timestamp_private = repo_tool.import_ed25519_privatekey_from_file(key_file, "password") - - repository.timestamp.load_signing_key(timestamp_private) - repository2.timestamp.load_signing_key(timestamp_private) - - repository.write('timestamp', increment_version_number=False) - repository2.write('timestamp', increment_version_number=False) - - # And move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.rmtree(os.path.join(self.repository_directory2, 'metadata')) - - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory2, 'metadata.staged'), - os.path.join(self.repository_directory2, 'metadata')) - - # Verify that the client retrieves the expected updates. - logger.info('Downloading timestamp from server 1.') - self.repository_updater.refresh() - - self.assertEqual( - 88, self.repository_updater.metadata['current']['timestamp']['version']) - logger.info('Downloading timestamp from server 2.') - self.repository_updater2.refresh() - - self.assertEqual( - 100, self.repository_updater2.metadata['current']['timestamp']['version']) - - # Test the behavior of the multi-repository updater. - map_file = securesystemslib.util.load_json_file(self.map_file) - map_file['repositories'][self.repository_name] = ['http://localhost:' \ - + str(self.server_process_handler.port)] - map_file['repositories'][self.repository_name2] = ['http://localhost:' \ - + str(self.server_process_handler2.port)] - with open(self.map_file, 'w') as file_object: - file_object.write(json.dumps(map_file)) - - # Try to load a non-existent map file. - self.assertRaises(tuf.exceptions.Error, updater.MultiRepoUpdater, 'bad_path') - - multi_repo_updater = updater.MultiRepoUpdater(self.map_file) - valid_targetinfo = multi_repo_updater.get_valid_targetinfo('file3.txt') - - for my_updater, my_targetinfo in valid_targetinfo.items(): - my_updater.download_target(my_targetinfo, self.temporary_directory) - self.assertTrue(os.path.exists(os.path.join(self.temporary_directory, 'file3.txt'))) - - - -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_replay_attack_old.py b/tests/test_replay_attack_old.py deleted file mode 100755 index 92dc3ba466..0000000000 --- a/tests/test_replay_attack_old.py +++ /dev/null @@ -1,321 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_replay_attack_old.py - - - Konstantin Andrianov. - - - February 22, 2012. - - April 5, 2014. - Refactored to use the 'unittest' module (test conditions in code, rather - than verifying text output), use pre-generated repository files, and - discontinue use of the old repository tools. Expanded comments. - -vladimir.v.diaz - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Simulate a replay, or rollback, attack. In a replay attack, a client is - tricked into installing software that is older than that which the client - previously knew to be available. - - Note: There is no difference between 'updates' and 'target' files. -""" - -import os -import tempfile -import datetime -import shutil -import logging -import unittest -import sys -from urllib import request - -import tuf.formats -import tuf.log -import tuf.client.updater as updater -import tuf.repository_tool as repo_tool -import tuf.unittest_toolbox as unittest_toolbox - -from tests import utils - -import securesystemslib - - -# The repository tool is imported and logs console messages by default. -# Disable console log messages generated by this unit test. -repo_tool.disable_console_log_messages() - -logger = logging.getLogger(__name__) - - - -class TestReplayAttack(unittest_toolbox.Modified_TestCase): - - @classmethod - def setUpClass(cls): - # Create a temporary directory to store the repository, metadata, and target - # files. 'temporary_directory' must be deleted in TearDownModule() so that - # temporary files are always removed, even when exceptions occur. - cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) - - # Launch a SimpleHTTPServer (serves files in the current directory). - # Test cases will request metadata and target files that have been - # pre-generated in 'tuf/tests/repository_data', which will be served by the - # SimpleHTTPServer launched here. The test cases of this unit test assume - # the pre-generated metadata files have a specific structure, such - # as a delegated role 'targets/role1', three target files, five key files, - # etc. - cls.server_process_handler = utils.TestServerProcess(log=logger) - - - - @classmethod - def tearDownClass(cls): - # Cleans the resources and flush the logged lines (if any). - cls.server_process_handler.clean() - - # Remove the temporary repository directory, which should contain all the - # metadata, targets, and key files generated of all the test cases. - shutil.rmtree(cls.temporary_directory) - - - - - def setUp(self): - # We are inheriting from custom class. - unittest_toolbox.Modified_TestCase.setUp(self) - - self.repository_name = 'test_repository1' - - # Copy the original repository files provided in the test folder so that - # any modifications made to repository files are restricted to the copies. - # The 'repository_data' directory is expected to exist in 'tuf/tests/'. - original_repository_files = os.path.join(os.getcwd(), 'repository_data') - temporary_repository_root = \ - self.make_temp_directory(directory=self.temporary_directory) - - # The original repository, keystore, and client directories will be copied - # for each test case. - original_repository = os.path.join(original_repository_files, 'repository') - original_client = os.path.join(original_repository_files, 'client') - original_keystore = os.path.join(original_repository_files, 'keystore') - - # Save references to the often-needed client repository directories. - # Test cases need these references to access metadata and target files. - self.repository_directory = \ - os.path.join(temporary_repository_root, 'repository') - self.client_directory = os.path.join(temporary_repository_root, 'client') - self.keystore_directory = os.path.join(temporary_repository_root, 'keystore') - - # Copy the original 'repository', 'client', and 'keystore' directories - # to the temporary repository the test cases can use. - shutil.copytree(original_repository, self.repository_directory) - shutil.copytree(original_client, self.client_directory) - shutil.copytree(original_keystore, self.keystore_directory) - - # Set the url prefix required by the 'tuf/client/updater.py' updater. - # 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'. - repository_basepath = self.repository_directory[len(os.getcwd()):] - url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + repository_basepath - - # Setting 'tuf.settings.repository_directory' with the temporary client - # directory copied from the original repository files. - tuf.settings.repositories_directory = self.client_directory - self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, - 'metadata_path': 'metadata', - 'targets_path': 'targets'}} - - # Create the repository instance. The test cases will use this client - # updater to refresh metadata, fetch target files, etc. - self.repository_updater = updater.Updater(self.repository_name, - self.repository_mirrors) - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - # Logs stdout and stderr from the sever subprocess. - self.server_process_handler.flush_log() - - # Remove temporary directory - unittest_toolbox.Modified_TestCase.tearDown(self) - - - def test_without_tuf(self): - # Scenario: - # 'timestamp.json' specifies the latest version of the repository files. - # A client should only accept the same version number (specified in the - # file) of the metadata, or greater. A version number less than the one - # currently trusted should be rejected. A non-TUF client may use a - # different mechanism for determining versions of metadata, but version - # numbers in this integrations because that is what TUF uses. - # - # Modify the repository's timestamp.json' so that a new version is generated - # and accepted by the client, and backup the previous version. The previous - # is then returned the next time the client requests an update. A non-TUF - # client (without a way to detect older versions of metadata, and thus - # updates) is expected to download older metadata and outdated files. - # Verify that the older version of timestamp.json' is downloaded by the - # non-TUF client. - - # Backup the current version of 'timestamp'. It will be used as the - # outdated version returned to the client. The repository tool removes - # obsolete metadadata, so do *not* save the backup version in the - # repository's metadata directory. - timestamp_path = os.path.join(self.repository_directory, 'metadata', - 'timestamp.json') - backup_timestamp = os.path.join(self.repository_directory, - 'timestamp.json.backup') - shutil.copy(timestamp_path, backup_timestamp) - - # The fileinfo of the previous version is saved to verify that it is indeed - # accepted by the non-TUF client. - length, hashes = securesystemslib.util.get_file_details(backup_timestamp) - previous_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - # Modify the timestamp file on the remote repository. - repository = repo_tool.load_repository(self.repository_directory) - key_file = os.path.join(self.keystore_directory, 'timestamp_key') - timestamp_private = repo_tool.import_ed25519_privatekey_from_file(key_file, - 'password') - repository.timestamp.load_signing_key(timestamp_private) - - # Set an arbitrary expiration so that the repository tool generates a new - # version. - repository.timestamp.expiration = datetime.datetime(2030, 1, 1, 12, 12) - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Save the fileinfo of the new version generated to verify that it is - # saved by the client. - length, hashes = securesystemslib.util.get_file_details(timestamp_path) - new_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - url_prefix = self.repository_mirrors['mirror1']['url_prefix'] - url_file = os.path.join(url_prefix, 'metadata', 'timestamp.json') - client_timestamp_path = os.path.join(self.client_directory, - self.repository_name, 'metadata', 'current', 'timestamp.json') - - # On Windows, the URL portion should not contain back slashes. - request.urlretrieve(url_file.replace('\\', '/'), client_timestamp_path) - - length, hashes = securesystemslib.util.get_file_details(client_timestamp_path) - download_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - # Verify 'download_fileinfo' is equal to the new version. - self.assertEqual(download_fileinfo, new_fileinfo) - - # Restore the previous version of 'timestamp.json' on the remote repository - # and verify that the non-TUF client downloads it (expected, but not ideal). - shutil.move(backup_timestamp, timestamp_path) - - # On Windows, the URL portion should not contain back slashes. - request.urlretrieve(url_file.replace('\\', '/'), client_timestamp_path) - - length, hashes = securesystemslib.util.get_file_details(client_timestamp_path) - download_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - # Verify 'download_fileinfo' is equal to the previous version. - self.assertEqual(download_fileinfo, previous_fileinfo) - self.assertNotEqual(download_fileinfo, new_fileinfo) - - - - def test_with_tuf(self): - # The same scenario outlined in test_without_tuf() is followed here, except - # with a TUF client (scenario description provided in the opening comment - # block of that test case.) The TUF client performs a refresh of top-level - # metadata, which also includes 'timestamp.json'. - - # Backup the current version of 'timestamp'. It will be used as the - # outdated version returned to the client. The repository tool removes - # obsolete metadadata, so do *not* save the backup version in the - # repository's metadata directory. - timestamp_path = os.path.join(self.repository_directory, 'metadata', - 'timestamp.json') - backup_timestamp = os.path.join(self.repository_directory, - 'timestamp.json.backup') - shutil.copy(timestamp_path, backup_timestamp) - - # The fileinfo of the previous version is saved to verify that it is indeed - # accepted by the non-TUF client. - length, hashes = securesystemslib.util.get_file_details(backup_timestamp) - previous_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - # Modify the timestamp file on the remote repository. - repository = repo_tool.load_repository(self.repository_directory) - key_file = os.path.join(self.keystore_directory, 'timestamp_key') - timestamp_private = repo_tool.import_ed25519_privatekey_from_file(key_file, - 'password') - repository.timestamp.load_signing_key(timestamp_private) - - # Set an arbitrary expiration so that the repository tool generates a new - # version. - repository.timestamp.expiration = datetime.datetime(2030, 1, 1, 12, 12) - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Save the fileinfo of the new version generated to verify that it is - # saved by the client. - length, hashes = securesystemslib.util.get_file_details(timestamp_path) - new_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - # Refresh top-level metadata, including 'timestamp.json'. Installation of - # new version of 'timestamp.json' is expected. - self.repository_updater.refresh() - - client_timestamp_path = os.path.join(self.client_directory, - self.repository_name, 'metadata', 'current', 'timestamp.json') - length, hashes = securesystemslib.util.get_file_details(client_timestamp_path) - download_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - # Verify 'download_fileinfo' is equal to the new version. - self.assertEqual(download_fileinfo, new_fileinfo) - - # Restore the previous version of 'timestamp.json' on the remote repository - # and verify that the non-TUF client downloads it (expected, but not ideal). - shutil.move(backup_timestamp, timestamp_path) - logger.info('Moving the timestamp.json backup to the current version.') - - # Verify that the TUF client detects replayed metadata and refuses to - # continue the update process. - try: - self.repository_updater.refresh() - - # Verify that the specific 'tuf.exceptions.ReplayedMetadataError' is raised by each - # mirror. - except tuf.exceptions.NoWorkingMirrorError as exception: - for mirror_url, mirror_error in exception.mirror_errors.items(): - url_prefix = self.repository_mirrors['mirror1']['url_prefix'] - url_file = os.path.join(url_prefix, 'metadata', 'timestamp.json') - - # Verify that 'timestamp.json' is the culprit. - self.assertEqual(url_file.replace('\\', '/'), mirror_url) - self.assertTrue(isinstance(mirror_error, tuf.exceptions.ReplayedMetadataError)) - - else: - self.fail('TUF did not prevent a replay attack.') - - -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_repository_lib_old.py b/tests/test_repository_lib_old.py deleted file mode 100755 index aa784a2e37..0000000000 --- a/tests/test_repository_lib_old.py +++ /dev/null @@ -1,1102 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_repository_lib_old.py - - - Vladimir Diaz - - - June 1, 2014. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Unit test for 'repository_lib.py'. -""" - -import os -import time -import datetime -import logging -import tempfile -import json -import shutil -import unittest -import copy -import sys - -import tuf -import tuf.formats -import tuf.log -import tuf.formats -import tuf.roledb -import tuf.keydb -import tuf.settings - -import tuf.repository_lib as repo_lib -import tuf.repository_tool as repo_tool - -from tests import utils - -import securesystemslib -import securesystemslib.exceptions -import securesystemslib.rsa_keys -import securesystemslib.interface -import securesystemslib.storage - -logger = logging.getLogger(__name__) - -repo_lib.disable_console_log_messages() - -TOP_LEVEL_METADATA_FILES = ['root.json', 'targets.json', 'timestamp.json', - 'snapshot.json'] - - -class TestRepositoryToolFunctions(unittest.TestCase): - @classmethod - def setUpClass(cls): - # Create a temporary directory to store the repository, metadata, and target - # files. 'temporary_directory' must be deleted in TearDownClass() so that - # temporary files are always removed, even when exceptions occur. - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) - - - - @classmethod - def tearDownClass(cls): - # Remove the temporary repository directory, which should contain all the - # metadata, targets, and key files generated for the test cases. - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - shutil.rmtree(cls.temporary_directory) - - - def setUp(self): - tuf.roledb.create_roledb('test_repository') - tuf.keydb.create_keydb('test_repository') - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - - - def test_import_rsa_privatekey_from_file(self): - # Test normal case. - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - - # Load one of the pre-generated key files from 'tuf/tests/repository_data'. - # 'password' unlocks the pre-generated key files. - key_filepath = os.path.join('repository_data', 'keystore', - 'root_key') - self.assertTrue(os.path.exists(key_filepath)) - - imported_rsa_key = repo_lib.import_rsa_privatekey_from_file(key_filepath, - 'password') - self.assertTrue(securesystemslib.formats.RSAKEY_SCHEMA.matches(imported_rsa_key)) - - - # Test improperly formatted argument. - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_lib.import_rsa_privatekey_from_file, 3, 'pw') - - - # Test invalid argument. - # Non-existent key file. - nonexistent_keypath = os.path.join(temporary_directory, - 'nonexistent_keypath') - self.assertRaises(securesystemslib.exceptions.StorageError, - repo_lib.import_rsa_privatekey_from_file, - nonexistent_keypath, 'pw') - - # Invalid key file argument. - invalid_keyfile = os.path.join(temporary_directory, 'invalid_keyfile') - with open(invalid_keyfile, 'wb') as file_object: - file_object.write(b'bad keyfile') - self.assertRaises(securesystemslib.exceptions.CryptoError, repo_lib.import_rsa_privatekey_from_file, - invalid_keyfile, 'pw') - - - - def test_import_ed25519_privatekey_from_file(self): - # Test normal case. - # Generate ed25519 keys that can be imported. - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - ed25519_keypath = os.path.join(temporary_directory, 'ed25519_key') - securesystemslib.interface.generate_and_write_ed25519_keypair( - password='pw', filepath=ed25519_keypath) - - imported_ed25519_key = \ - repo_lib.import_ed25519_privatekey_from_file(ed25519_keypath, 'pw') - self.assertTrue(securesystemslib.formats.ED25519KEY_SCHEMA.matches(imported_ed25519_key)) - - - # Test improperly formatted argument. - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_lib.import_ed25519_privatekey_from_file, 3, 'pw') - - - # Test invalid argument. - # Non-existent key file. - nonexistent_keypath = os.path.join(temporary_directory, - 'nonexistent_keypath') - self.assertRaises(securesystemslib.exceptions.StorageError, - repo_lib.import_ed25519_privatekey_from_file, - nonexistent_keypath, 'pw') - - # Invalid key file argument. - invalid_keyfile = os.path.join(temporary_directory, 'invalid_keyfile') - with open(invalid_keyfile, 'wb') as file_object: - file_object.write(b'bad keyfile') - - self.assertRaises(securesystemslib.exceptions.Error, - repo_lib.import_ed25519_privatekey_from_file, invalid_keyfile, 'pw') - - # Invalid private key imported (contains unexpected keytype.) - imported_ed25519_key['keytype'] = 'invalid_keytype' - - # Use 'rsa_keys.py' to bypass the key format validation performed by - # 'keys.py'. - salt, iterations, derived_key = \ - securesystemslib.rsa_keys._generate_derived_key('pw') - - # Store the derived key info in a dictionary, the object expected - # by the non-public _encrypt() routine. - derived_key_information = {'salt': salt, 'iterations': iterations, - 'derived_key': derived_key} - - # Convert the key object to json string format and encrypt it with the - # derived key. - encrypted_key = securesystemslib.rsa_keys._encrypt( - json.dumps(imported_ed25519_key), derived_key_information) - - with open(ed25519_keypath, 'wb') as file_object: - file_object.write(encrypted_key.encode('utf-8')) - - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_lib.import_ed25519_privatekey_from_file, ed25519_keypath, 'pw') - - - - def test_get_top_level_metadata_filenames(self): - - # Test normal case. - metadata_directory = os.path.join('metadata/') - filenames = {'root.json': metadata_directory + 'root.json', - 'targets.json': metadata_directory + 'targets.json', - 'snapshot.json': metadata_directory + 'snapshot.json', - 'timestamp.json': metadata_directory + 'timestamp.json'} - - self.assertEqual(filenames, - repo_lib.get_top_level_metadata_filenames('metadata/')) - - # If a directory argument is not specified, the current working directory - # is used. - metadata_directory = os.getcwd() - filenames = {'root.json': os.path.join(metadata_directory, 'root.json'), - 'targets.json': os.path.join(metadata_directory, 'targets.json'), - 'snapshot.json': os.path.join(metadata_directory, 'snapshot.json'), - 'timestamp.json': os.path.join(metadata_directory, 'timestamp.json')} - self.assertEqual(filenames, - repo_lib.get_top_level_metadata_filenames(metadata_directory)) - - - # Test improperly formatted argument. - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_lib.get_top_level_metadata_filenames, 3) - - - - def test_get_targets_metadata_fileinfo(self): - # Test normal case. - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - test_filepath = os.path.join(temporary_directory, 'file.txt') - - with open(test_filepath, 'wt') as file_object: - file_object.write('test file') - - # Generate test fileinfo object. It is assumed SHA256 and SHA512 hashes - # are computed by get_targets_metadata_fileinfo(). - file_length = os.path.getsize(test_filepath) - sha256_digest_object = securesystemslib.hash.digest_filename(test_filepath) - sha512_digest_object = securesystemslib.hash.digest_filename(test_filepath, algorithm='sha512') - file_hashes = {'sha256': sha256_digest_object.hexdigest(), - 'sha512': sha512_digest_object.hexdigest()} - fileinfo = {'length': file_length, 'hashes': file_hashes} - self.assertTrue(tuf.formats.TARGETS_FILEINFO_SCHEMA.matches(fileinfo)) - - storage_backend = securesystemslib.storage.FilesystemBackend() - - self.assertEqual(fileinfo, repo_lib.get_targets_metadata_fileinfo(test_filepath, - storage_backend)) - - - # Test improperly formatted argument. - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_lib.get_targets_metadata_fileinfo, 3, - storage_backend) - - - # Test non-existent file. - nonexistent_filepath = os.path.join(temporary_directory, 'oops.txt') - self.assertRaises(securesystemslib.exceptions.Error, - repo_lib.get_targets_metadata_fileinfo, - nonexistent_filepath, storage_backend) - - - - def test_get_target_hash(self): - # Test normal case. - expected_target_hashes = { - '/file1.txt': 'e3a3d89eb3b70ce3fbce6017d7b8c12d4abd5635427a0e8a238f53157df85b3d', - '/README.txt': '8faee106f1bb69f34aaf1df1e3c2e87d763c4d878cb96b91db13495e32ceb0b0', - '/packages/file2.txt': 'c9c4a5cdd84858dd6a23d98d7e6e6b2aec45034946c16b2200bc317c75415e92' - } - for filepath, target_hash in expected_target_hashes.items(): - self.assertTrue(tuf.formats.RELPATH_SCHEMA.matches(filepath)) - self.assertTrue(securesystemslib.formats.HASH_SCHEMA.matches(target_hash)) - self.assertEqual(repo_lib.get_target_hash(filepath), target_hash) - - # Test for improperly formatted argument. - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.get_target_hash, 8) - - - - def test_generate_root_metadata(self): - # Test normal case. - # Load the root metadata provided in 'tuf/tests/repository_data/'. - root_filepath = os.path.join('repository_data', 'repository', - 'metadata', 'root.json') - root_signable = securesystemslib.util.load_json_file(root_filepath) - - # generate_root_metadata() expects the top-level roles and keys to be - # available in 'tuf.keydb' and 'tuf.roledb'. - tuf.roledb.create_roledb_from_root_metadata(root_signable['signed']) - tuf.keydb.create_keydb_from_root_metadata(root_signable['signed']) - expires = '1985-10-21T01:22:00Z' - - root_metadata = repo_lib.generate_root_metadata(1, expires, - consistent_snapshot=False) - self.assertTrue(tuf.formats.ROOT_SCHEMA.matches(root_metadata)) - - root_keyids = tuf.roledb.get_role_keyids('root') - tuf.keydb._keydb_dict['default'][root_keyids[0]]['keytype'] = 'bad_keytype' - self.assertRaises(securesystemslib.exceptions.Error, repo_lib.generate_root_metadata, 1, - expires, consistent_snapshot=False) - - # Reset the root key's keytype, so that we can next verify that a different - # securesystemslib.exceptions.Error exception is raised for duplicate keyids. - tuf.keydb._keydb_dict['default'][root_keyids[0]]['keytype'] = 'rsa' - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_root_metadata, - '3', expires, False) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_root_metadata, - 1, '3', False) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_root_metadata, - 1, expires, 3) - - # Test for missing required roles and keys. - tuf.roledb.clear_roledb() - tuf.keydb.clear_keydb() - self.assertRaises(securesystemslib.exceptions.Error, repo_lib.generate_root_metadata, - 1, expires, False) - - - - def test_generate_targets_metadata(self): - # Test normal case. - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - targets_directory = os.path.join(temporary_directory, 'targets') - file1_path = os.path.join(targets_directory, 'file.txt') - securesystemslib.util.ensure_parent_dir(file1_path) - - with open(file1_path, 'wt') as file_object: - file_object.write('test file.') - - # Set valid generate_targets_metadata() arguments. Add a custom field for - # the 'target_files' target set below. - version = 1 - datetime_object = datetime.datetime(2030, 1, 1, 12, 0) - expiration_date = datetime_object.isoformat() + 'Z' - file_permissions = oct(os.stat(file1_path).st_mode)[4:] - target_files = {'file.txt': {'custom': {'file_permission': file_permissions}}} - - # Delegations data must be loaded into roledb since - # generate_targets_metadata tries to update delegations keyids - # and threshold - repository_path = os.path.join('repository_data', 'repository') - repository = repo_tool.load_repository(repository_path) - roleinfo = tuf.roledb.get_roleinfo('targets') - delegations = roleinfo['delegations'] - - targets_metadata = repo_lib.generate_targets_metadata(targets_directory, - target_files, version, expiration_date, delegations, False) - self.assertTrue(tuf.formats.TARGETS_SCHEMA.matches(targets_metadata)) - - # Valid arguments with 'delegations' set to None. - targets_metadata = repo_lib.generate_targets_metadata(targets_directory, - target_files, version, expiration_date, None, False) - self.assertTrue(tuf.formats.TARGETS_SCHEMA.matches(targets_metadata)) - - # Test update in targets' delegations - keystore_path = os.path.join('repository_data', 'keystore') - targets_public_keypath = os.path.join(keystore_path, 'targets_key.pub') - targets_public_key = securesystemslib.interface.\ - import_ed25519_publickey_from_file(targets_public_keypath) - - # Add new key and threshold to delegated role - repository.targets('role1').add_verification_key(targets_public_key) - repository.targets('role1').threshold = 2 - role1_keyids = tuf.roledb.get_role_keyids('role1') - role1_threshold = tuf.roledb.get_role_threshold('role1') - roleinfo = tuf.roledb.get_roleinfo('targets') - delegations = roleinfo['delegations'] - old_delegations = copy.deepcopy(delegations) - - targets_metadata = repo_lib.generate_targets_metadata(targets_directory, - target_files, version, expiration_date, delegations, False) - self.assertNotEqual(old_delegations, delegations) - self.assertEqual(role1_keyids, - targets_metadata['delegations']['roles'][0]['keyids']) - self.assertEqual(role1_threshold, - targets_metadata['delegations']['roles'][0]['threshold']) - for keyid in role1_keyids: - self.assertIn(keyid, targets_metadata['delegations']['keys']) - - - # Verify that 'digest.filename' file is saved to 'targets_directory' if - # the 'write_consistent_targets' argument is True. - list_targets_directory = os.listdir(targets_directory) - targets_metadata = repo_lib.generate_targets_metadata(targets_directory, - target_files, version, expiration_date, delegations, - write_consistent_targets=True) - new_list_targets_directory = os.listdir(targets_directory) - - # Verify that 'targets_directory' contains only one extra item. - self.assertTrue(len(list_targets_directory) + 1, - len(new_list_targets_directory)) - - # Verify that an exception is not raised if the target files already exist. - repo_lib.generate_targets_metadata(targets_directory, target_files, - version, expiration_date, delegations, - write_consistent_targets=True) - - - # Verify that 'targets_metadata' contains a 'custom' entry (optional) - # for 'file.txt'. - self.assertTrue('custom' in targets_metadata['targets']['file.txt']) - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_targets_metadata, - 3, target_files, version, expiration_date) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_targets_metadata, - targets_directory, 3, version, expiration_date) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_targets_metadata, - targets_directory, target_files, '3', expiration_date) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_targets_metadata, - targets_directory, target_files, version, '3') - - # Improperly formatted 'delegations' and 'write_consistent_targets' - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_targets_metadata, - targets_directory, target_files, version, expiration_date, - 3, False) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_targets_metadata, - targets_directory, target_files, version, expiration_date, - delegations, 3) - - # Test non-existent target file. - bad_target_file = \ - {'non-existent.txt': {'file_permission': file_permissions}} - - self.assertRaises(securesystemslib.exceptions.Error, repo_lib.generate_targets_metadata, - targets_directory, bad_target_file, version, - expiration_date) - - - # Test use of an existing fileinfo structures - target1_hashes = {'sha256': 'c2986576f5fdfd43944e2b19e775453b96748ec4fe2638a6d2f32f1310967095'} - target2_hashes = {'sha256': '517c0ce943e7274a2431fa5751e17cfd5225accd23e479bfaad13007751e87ef'} - - # Test missing expected field, hashes, when use_existing_fileinfo - target_files = {'file.txt': {'length': 555}} - self.assertRaises(securesystemslib.exceptions.Error, repo_lib.generate_targets_metadata, - targets_directory, target_files, version, expiration_date, - use_existing_fileinfo=True) - - # Test missing expected field, length, when use_existing_fileinfo - target_files = {'file.txt': {'hashes': target1_hashes}} - self.assertRaises(securesystemslib.exceptions.Error, repo_lib.generate_targets_metadata, - targets_directory, target_files, version, expiration_date, - use_existing_fileinfo=True) - - # Test missing both expected fields when use_existing_fileinfo - target_files = {'file.txt': {}} - self.assertRaises(securesystemslib.exceptions.Error, repo_lib.generate_targets_metadata, - targets_directory, target_files, version, expiration_date, - use_existing_fileinfo=True) - - target_files = {'file1.txt': {'custom': {'meta': 'foo'}, - 'hashes': target1_hashes, - 'length': 555}, - 'file2.txt': {'custom': {'meta': 'bar'}, - 'hashes': target2_hashes, - 'length': 42}} - targets_metadata = \ - repo_lib.generate_targets_metadata(targets_directory, target_files, - version, expiration_date, delegations, - False, use_existing_fileinfo=True) - - - def _setup_generate_snapshot_metadata_test(self): - # Test normal case. - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - original_repository_path = os.path.join('repository_data', - 'repository') - repository_directory = os.path.join(temporary_directory, 'repository') - shutil.copytree(original_repository_path, repository_directory) - metadata_directory = os.path.join(repository_directory, - repo_lib.METADATA_STAGED_DIRECTORY_NAME) - - targets_directory = os.path.join(repository_directory, repo_lib.TARGETS_DIRECTORY_NAME) - - version = 1 - expiration_date = '1985-10-21T13:20:00Z' - - # Load a valid repository so that top-level roles exist in roledb and - # generate_snapshot_metadata() has roles to specify in snapshot metadata. - storage_backend = securesystemslib.storage.FilesystemBackend() - repository = repo_tool.Repository(repository_directory, metadata_directory, - targets_directory, storage_backend) - repository_junk = repo_tool.load_repository(repository_directory) - - # Load a valid repository so that top-level roles exist in roledb and - # generate_snapshot_metadata() has roles to specify in snapshot metadata. - storage_backend = securesystemslib.storage.FilesystemBackend() - - # For testing purposes, store an invalid metadata file in the metadata directory - # to verify that it isn't loaded by generate_snapshot_metadata(). Unknown - # metadata file extensions should be ignored. - invalid_metadata_file = os.path.join(metadata_directory, 'role_file.xml') - with open(invalid_metadata_file, 'w') as file_object: - file_object.write('bad extension on metadata file') - - return metadata_directory, version, expiration_date, \ - storage_backend - - - def test_generate_snapshot_metadata(self): - metadata_directory, version, expiration_date, storage_backend = \ - self._setup_generate_snapshot_metadata_test() - - snapshot_metadata = \ - repo_lib.generate_snapshot_metadata(metadata_directory, version, - expiration_date, - storage_backend, - consistent_snapshot=False) - self.assertTrue(tuf.formats.SNAPSHOT_SCHEMA.matches(snapshot_metadata)) - - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_snapshot_metadata, - 3, version, expiration_date, consistent_snapshot=False, - storage_backend=storage_backend) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_snapshot_metadata, - metadata_directory, '3', expiration_date, storage_backend, - consistent_snapshot=False) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_snapshot_metadata, - metadata_directory, version, '3', storage_backend, - consistent_snapshot=False) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_snapshot_metadata, - metadata_directory, version, expiration_date, 3, - storage_backend) - - - - def test_generate_snapshot_metadata_with_length(self): - metadata_directory, version, expiration_date, storage_backend = \ - self._setup_generate_snapshot_metadata_test() - - snapshot_metadata = \ - repo_lib.generate_snapshot_metadata(metadata_directory, version, - expiration_date, - storage_backend, - consistent_snapshot=False, - use_length=True) - self.assertTrue(tuf.formats.SNAPSHOT_SCHEMA.matches(snapshot_metadata)) - - metadata_files_info_dict = snapshot_metadata['meta'] - for metadata_filename in sorted(os.listdir(metadata_directory), reverse=True): - - # In the metadata_directory, there are files with format: - # 1.root.json. The prefix number should be removed. - stripped_filename, version = \ - repo_lib._strip_version_number(metadata_filename, - consistent_snapshot=True) - - # In the repository, the file "role_file.xml" have been added to make - # sure that non-json files aren't loaded. This file should be filtered. - if stripped_filename.endswith('.json'): - if stripped_filename not in TOP_LEVEL_METADATA_FILES: - # Check that length is not calculated but hashes is - self.assertIn('length', metadata_files_info_dict[stripped_filename]) - self.assertNotIn('hashes', metadata_files_info_dict[stripped_filename]) - - - - def test_generate_snapshot_metadata_with_hashes(self): - metadata_directory, version, expiration_date, storage_backend = \ - self._setup_generate_snapshot_metadata_test() - - snapshot_metadata = \ - repo_lib.generate_snapshot_metadata(metadata_directory, version, - expiration_date, - storage_backend, - consistent_snapshot=False, - use_hashes=True) - self.assertTrue(tuf.formats.SNAPSHOT_SCHEMA.matches(snapshot_metadata)) - - metadata_files_info_dict = snapshot_metadata['meta'] - for metadata_filename in sorted(os.listdir(metadata_directory), reverse=True): - - # In the metadata_directory, there are files with format: - # 1.root.json. The prefix number should be removed. - stripped_filename, version = \ - repo_lib._strip_version_number(metadata_filename, - consistent_snapshot=True) - - # In the repository, the file "role_file.xml" have been added to make - # sure that non-json files aren't loaded. This file should be filtered. - if stripped_filename.endswith('.json'): - if stripped_filename not in TOP_LEVEL_METADATA_FILES: - # Check that hashes is not calculated but length is - self.assertNotIn('length', metadata_files_info_dict[stripped_filename]) - self.assertIn('hashes', metadata_files_info_dict[stripped_filename]) - - - - def test_generate_snapshot_metadata_with_hashes_and_length(self): - metadata_directory, version, expiration_date, storage_backend = \ - self._setup_generate_snapshot_metadata_test() - - snapshot_metadata = \ - repo_lib.generate_snapshot_metadata(metadata_directory, version, - expiration_date, - storage_backend, - consistent_snapshot=False, - use_length=True, - use_hashes=True) - self.assertTrue(tuf.formats.SNAPSHOT_SCHEMA.matches(snapshot_metadata)) - - metadata_files_info_dict = snapshot_metadata['meta'] - for metadata_filename in sorted(os.listdir(metadata_directory), reverse=True): - - # In the metadata_directory, there are files with format: - # 1.root.json. The prefix number should be removed. - stripped_filename, version = \ - repo_lib._strip_version_number(metadata_filename, - consistent_snapshot=True) - - # In the repository, the file "role_file.xml" have been added to make - # sure that non-json files aren't loaded. This file should be filtered. - if stripped_filename.endswith('.json'): - if stripped_filename not in TOP_LEVEL_METADATA_FILES: - # Check that both length and hashes are not are not calculated - self.assertIn('length', metadata_files_info_dict[stripped_filename]) - self.assertIn('hashes', metadata_files_info_dict[stripped_filename]) - - - - def _setup_generate_timestamp_metadata_test(self): - # Test normal case. - repository_name = 'test_repository' - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - original_repository_path = os.path.join('repository_data', - 'repository') - repository_directory = os.path.join(temporary_directory, 'repository') - shutil.copytree(original_repository_path, repository_directory) - metadata_directory = os.path.join(repository_directory, - repo_lib.METADATA_STAGED_DIRECTORY_NAME) - targets_directory = os.path.join(repository_directory, repo_lib.TARGETS_DIRECTORY_NAME) - - snapshot_file_path = os.path.join(metadata_directory, - repo_lib.SNAPSHOT_FILENAME) - - # Set valid generate_timestamp_metadata() arguments. - version = 1 - expiration_date = '1985-10-21T13:20:00Z' - - storage_backend = securesystemslib.storage.FilesystemBackend() - # Load a valid repository so that top-level roles exist in roledb and - # generate_snapshot_metadata() has roles to specify in snapshot metadata. - repository = repo_tool.Repository(repository_directory, metadata_directory, - targets_directory, repository_name) - - repository_junk = repo_tool.load_repository(repository_directory, - repository_name) - - return snapshot_file_path, version, expiration_date, storage_backend, \ - repository_name - - - def test_generate_timestamp_metadata(self): - snapshot_file_path, version, expiration_date, storage_backend, \ - repository_name = self._setup_generate_timestamp_metadata_test() - - timestamp_metadata = repo_lib.generate_timestamp_metadata(snapshot_file_path, - version, expiration_date, storage_backend, repository_name) - self.assertTrue(tuf.formats.TIMESTAMP_SCHEMA.matches(timestamp_metadata)) - - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_lib.generate_timestamp_metadata, 3, version, expiration_date, - storage_backend, repository_name) - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_lib.generate_timestamp_metadata, snapshot_file_path, '3', - expiration_date, storage_backend, repository_name) - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_lib.generate_timestamp_metadata, snapshot_file_path, version, '3', - storage_backend, repository_name) - - - - def test_generate_timestamp_metadata_without_length(self): - snapshot_file_path, version, expiration_date, storage_backend, \ - repository_name = self._setup_generate_timestamp_metadata_test() - - timestamp_metadata = repo_lib.generate_timestamp_metadata(snapshot_file_path, - version, expiration_date, storage_backend, repository_name, - use_length=False) - self.assertTrue(tuf.formats.TIMESTAMP_SCHEMA.matches(timestamp_metadata)) - - # Check that length is not calculated but hashes is - timestamp_file_info = timestamp_metadata['meta'] - - self.assertNotIn('length', timestamp_file_info['snapshot.json']) - self.assertIn('hashes', timestamp_file_info['snapshot.json']) - - - - def test_generate_timestamp_metadata_without_hashes(self): - snapshot_file_path, version, expiration_date, storage_backend, \ - repository_name = self._setup_generate_timestamp_metadata_test() - - timestamp_metadata = repo_lib.generate_timestamp_metadata(snapshot_file_path, - version, expiration_date, storage_backend, repository_name, - use_hashes=False) - self.assertTrue(tuf.formats.TIMESTAMP_SCHEMA.matches(timestamp_metadata)) - - # Check that hashes is not calculated but length is - timestamp_file_info = timestamp_metadata['meta'] - - self.assertIn('length', timestamp_file_info['snapshot.json']) - self.assertNotIn('hashes', timestamp_file_info['snapshot.json']) - - - - def test_generate_timestamp_metadata_without_length_and_hashes(self): - snapshot_file_path, version, expiration_date, storage_backend, \ - repository_name = self._setup_generate_timestamp_metadata_test() - - timestamp_metadata = repo_lib.generate_timestamp_metadata(snapshot_file_path, - version, expiration_date, storage_backend, repository_name, - use_hashes=False, use_length=False) - self.assertTrue(tuf.formats.TIMESTAMP_SCHEMA.matches(timestamp_metadata)) - - # Check that length and hashes attributes are not added - timestamp_file_info = timestamp_metadata['meta'] - self.assertNotIn('length', timestamp_file_info['snapshot.json']) - self.assertNotIn('hashes', timestamp_file_info['snapshot.json']) - - - - def test_sign_metadata(self): - # Test normal case. - repository_name = 'test_repository' - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - metadata_path = os.path.join('repository_data', 'repository', 'metadata') - keystore_path = os.path.join('repository_data', 'keystore') - root_filename = os.path.join(metadata_path, 'root.json') - root_metadata = securesystemslib.util.load_json_file(root_filename)['signed'] - targets_filename = os.path.join(metadata_path, 'targets.json') - targets_metadata = securesystemslib.util.load_json_file(targets_filename)['signed'] - - tuf.keydb.create_keydb_from_root_metadata(root_metadata, repository_name) - tuf.roledb.create_roledb_from_root_metadata(root_metadata, repository_name) - root_keyids = tuf.roledb.get_role_keyids('root', repository_name) - targets_keyids = tuf.roledb.get_role_keyids('targets', repository_name) - - root_private_keypath = os.path.join(keystore_path, 'root_key') - root_private_key = repo_lib.import_rsa_privatekey_from_file(root_private_keypath, - 'password') - - # Sign with a valid, but not a threshold, key. - targets_public_keypath = os.path.join(keystore_path, 'targets_key.pub') - targets_public_key = securesystemslib.interface.\ - import_ed25519_publickey_from_file(targets_public_keypath) - - # sign_metadata() expects the private key 'root_metadata' to be in - # 'tuf.keydb'. Remove any public keys that may be loaded before - # adding private key, otherwise a 'tuf.KeyAlreadyExists' exception is - # raised. - tuf.keydb.remove_key(root_private_key['keyid'], - repository_name=repository_name) - tuf.keydb.add_key(root_private_key, repository_name=repository_name) - tuf.keydb.remove_key(targets_public_key['keyid'], repository_name=repository_name) - tuf.keydb.add_key(targets_public_key, repository_name=repository_name) - - # Verify that a valid root signable is generated. - root_signable = repo_lib.sign_metadata(root_metadata, root_keyids, - root_filename, repository_name) - self.assertTrue(tuf.formats.SIGNABLE_SCHEMA.matches(root_signable)) - - # Test for an unset private key (in this case, target's). - repo_lib.sign_metadata(targets_metadata, targets_keyids, targets_filename, - repository_name) - - # Add an invalid keytype to one of the root keys. - root_keyid = root_keyids[0] - tuf.keydb._keydb_dict[repository_name][root_keyid]['keytype'] = 'bad_keytype' - self.assertRaises(securesystemslib.exceptions.Error, repo_lib.sign_metadata, - root_metadata, root_keyids, root_filename, repository_name) - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_lib.sign_metadata, 3, root_keyids, 'root.json', repository_name) - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_lib.sign_metadata, root_metadata, 3, 'root.json', repository_name) - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_lib.sign_metadata, root_metadata, root_keyids, 3, repository_name) - - - - def test_write_metadata_file(self): - # Test normal case. - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - metadata_directory = os.path.join('repository_data', 'repository', 'metadata') - root_filename = os.path.join(metadata_directory, 'root.json') - root_signable = securesystemslib.util.load_json_file(root_filename) - - output_filename = os.path.join(temporary_directory, 'root.json') - version_number = root_signable['signed']['version'] + 1 - - self.assertFalse(os.path.exists(output_filename)) - storage_backend = securesystemslib.storage.FilesystemBackend() - repo_lib.write_metadata_file(root_signable, output_filename, version_number, - consistent_snapshot=False, storage_backend=storage_backend) - self.assertTrue(os.path.exists(output_filename)) - - # Attempt to over-write the previously written metadata file. An exception - # is not raised in this case, only a debug message is logged. - repo_lib.write_metadata_file(root_signable, output_filename, version_number, - consistent_snapshot=False, storage_backend=storage_backend) - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.write_metadata_file, - 3, output_filename, version_number, False, storage_backend) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.write_metadata_file, - root_signable, 3, version_number, False, storage_backend) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.write_metadata_file, - root_signable, output_filename, '3', False, storage_backend) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.write_metadata_file, - root_signable, output_filename, storage_backend, version_number, 3) - - - - def test_create_tuf_client_directory(self): - # Test normal case. - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - repository_directory = os.path.join('repository_data', 'repository') - client_directory = os.path.join(temporary_directory, 'client') - - repo_lib.create_tuf_client_directory(repository_directory, client_directory) - - self.assertTrue(os.path.exists(client_directory)) - metadata_directory = os.path.join(client_directory, 'metadata') - current_directory = os.path.join(metadata_directory, 'current') - previous_directory = os.path.join(metadata_directory, 'previous') - self.assertTrue(os.path.exists(client_directory)) - self.assertTrue(os.path.exists(metadata_directory)) - self.assertTrue(os.path.exists(current_directory)) - self.assertTrue(os.path.exists(previous_directory)) - - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_lib.create_tuf_client_directory, 3, client_directory) - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_lib.create_tuf_client_directory, repository_directory, 3) - - - # Test invalid argument (i.e., client directory already exists.) - self.assertRaises(tuf.exceptions.RepositoryError, - repo_lib.create_tuf_client_directory, repository_directory, - client_directory) - - # Test invalid client metadata directory (i.e., non-errno.EEXIST exceptions - # should be re-raised.) - shutil.rmtree(metadata_directory) - - # Save the original metadata directory name so that it can be restored - # after testing. - metadata_directory_name = repo_lib.METADATA_DIRECTORY_NAME - repo_lib.METADATA_DIRECTORY_NAME = '/' - - # Creation of the '/' directory is forbidden on all supported OSs. The '/' - # argument to create_tuf_client_directory should cause it to re-raise a - # non-errno.EEXIST exception. - self.assertRaises((OSError, tuf.exceptions.RepositoryError), - repo_lib.create_tuf_client_directory, repository_directory, '/') - - # Restore the metadata directory name in repo_lib. - repo_lib.METADATA_DIRECTORY_NAME = metadata_directory_name - - - - def test__generate_and_write_metadata(self): - # Test for invalid, or unsupported, rolename. - # Load the root metadata provided in 'tuf/tests/repository_data/'. - repository_name = 'repository_name' - root_filepath = os.path.join('repository_data', 'repository', - 'metadata', 'root.json') - root_signable = securesystemslib.util.load_json_file(root_filepath) - - # _generate_and_write_metadata() expects the top-level roles - # (specifically 'snapshot') and keys to be available in 'tuf.roledb'. - tuf.roledb.create_roledb_from_root_metadata(root_signable['signed'], - repository_name) - tuf.keydb.create_keydb_from_root_metadata(root_signable['signed'], - repository_name) - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - targets_directory = os.path.join(temporary_directory, 'targets') - os.mkdir(targets_directory) - repository_directory = os.path.join(temporary_directory, 'repository') - metadata_directory = os.path.join(repository_directory, - repo_lib.METADATA_STAGED_DIRECTORY_NAME) - targets_metadata = os.path.join('repository_data', 'repository', 'metadata', - 'targets.json') - obsolete_metadata = os.path.join(metadata_directory, 'obsolete_role.json') - securesystemslib.util.ensure_parent_dir(obsolete_metadata) - shutil.copyfile(targets_metadata, obsolete_metadata) - - keystore_path = os.path.join('repository_data', 'keystore') - targets_private_keypath = os.path.join(keystore_path, 'targets_key') - targets_private_key = repo_lib.import_ed25519_privatekey_from_file(targets_private_keypath, - 'password') - tuf.keydb.remove_key(targets_private_key['keyid'], - repository_name=repository_name) - tuf.keydb.add_key(targets_private_key, repository_name=repository_name) - - # Verify that obsolete metadata (a metadata file exists on disk, but the - # role is unavailable in 'tuf.roledb'). First add the obsolete - # role to 'tuf.roledb' so that its metadata file can be written to disk. - targets_roleinfo = tuf.roledb.get_roleinfo('targets', repository_name) - targets_roleinfo['version'] = 1 - expiration = \ - tuf.formats.unix_timestamp_to_datetime(int(time.time() + 86400)) - expiration = expiration.isoformat() + 'Z' - targets_roleinfo['expires'] = expiration - targets_roleinfo['signing_keyids'] = targets_roleinfo['keyids'] - tuf.roledb.add_role('obsolete_role', targets_roleinfo, - repository_name=repository_name) - - storage_backend = securesystemslib.storage.FilesystemBackend() - repo_lib._generate_and_write_metadata('obsolete_role', obsolete_metadata, - targets_directory, metadata_directory, storage_backend, - consistent_snapshot=False, filenames=None, - repository_name=repository_name) - - snapshot_filepath = os.path.join('repository_data', 'repository', - 'metadata', 'snapshot.json') - snapshot_signable = securesystemslib.util.load_json_file(snapshot_filepath) - tuf.roledb.remove_role('obsolete_role', repository_name) - self.assertTrue(os.path.exists(os.path.join(metadata_directory, - 'obsolete_role.json'))) - tuf.repository_lib._delete_obsolete_metadata(metadata_directory, - snapshot_signable['signed'], False, repository_name, - storage_backend) - self.assertFalse(os.path.exists(metadata_directory + 'obsolete_role.json')) - shutil.copyfile(targets_metadata, obsolete_metadata) - - - - def test__delete_obsolete_metadata(self): - repository_name = 'test_repository' - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - repository_directory = os.path.join(temporary_directory, 'repository') - metadata_directory = os.path.join(repository_directory, - repo_lib.METADATA_STAGED_DIRECTORY_NAME) - os.makedirs(metadata_directory) - snapshot_filepath = os.path.join('repository_data', 'repository', - 'metadata', 'snapshot.json') - snapshot_signable = securesystemslib.util.load_json_file(snapshot_filepath) - storage_backend = securesystemslib.storage.FilesystemBackend() - - # Create role metadata that should not exist in snapshot.json. - role1_filepath = os.path.join('repository_data', 'repository', 'metadata', - 'role1.json') - shutil.copyfile(role1_filepath, os.path.join(metadata_directory, 'role2.json')) - - repo_lib._delete_obsolete_metadata(metadata_directory, - snapshot_signable['signed'], True, repository_name, storage_backend) - - # _delete_obsolete_metadata should never delete root.json. - root_filepath = os.path.join('repository_data', 'repository', 'metadata', - 'root.json') - shutil.copyfile(root_filepath, os.path.join(metadata_directory, 'root.json')) - repo_lib._delete_obsolete_metadata(metadata_directory, - snapshot_signable['signed'], True, repository_name, storage_backend) - self.assertTrue(os.path.exists(os.path.join(metadata_directory, 'root.json'))) - - # Verify what happens for a non-existent metadata directory (a debug - # message is logged). - self.assertRaises(securesystemslib.exceptions.StorageError, - repo_lib._delete_obsolete_metadata, 'non-existent', - snapshot_signable['signed'], True, repository_name, storage_backend) - - - def test__load_top_level_metadata(self): - repository_name = 'test_repository' - - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - repository_directory = os.path.join(temporary_directory, 'repository') - metadata_directory = os.path.join(repository_directory, - repo_lib.METADATA_STAGED_DIRECTORY_NAME) - targets_directory = os.path.join(repository_directory, - repo_lib.TARGETS_DIRECTORY_NAME) - shutil.copytree(os.path.join('repository_data', 'repository', 'metadata'), - metadata_directory) - shutil.copytree(os.path.join('repository_data', 'repository', 'targets'), - targets_directory) - - # Add a duplicate signature to the Root file for testing purposes). - root_file = os.path.join(metadata_directory, 'root.json') - signable = securesystemslib.util.load_json_file(os.path.join(metadata_directory, 'root.json')) - signable['signatures'].append(signable['signatures'][0]) - - storage_backend = securesystemslib.storage.FilesystemBackend() - repo_lib.write_metadata_file(signable, root_file, 8, False, storage_backend) - - filenames = repo_lib.get_top_level_metadata_filenames(metadata_directory) - repository = repo_tool.create_new_repository(repository_directory, repository_name) - repo_lib._load_top_level_metadata(repository, filenames, repository_name) - - # Manually add targets delegations to roledb since - # repository.write('targets') will try to update its delegations - targets_filepath = os.path.join('repository_data', 'repository', - 'metadata', 'targets.json') - targets_signable = securesystemslib.util.load_json_file(targets_filepath) - delegations = targets_signable['signed']['delegations'] - - roleinfo = {} - roleinfo['name'] = delegations['roles'][0]['name'] - roleinfo['keyids'] = delegations['roles'][0]['keyids'] - roleinfo['threshold'] = delegations['roles'][0]['threshold'] - roleinfo['version'] = 1 - tuf.roledb.add_role('role1', roleinfo, repository_name) - - keystore_path = os.path.join('repository_data', 'keystore') - root_privkey_path = os.path.join(keystore_path, 'root_key') - targets_privkey_path = os.path.join(keystore_path, 'targets_key') - snapshot_privkey_path = os.path.join(keystore_path, 'snapshot_key') - timestamp_privkey_path = os.path.join(keystore_path, 'timestamp_key') - - repository.root.load_signing_key(repo_lib.import_rsa_privatekey_from_file(root_privkey_path, 'password')) - repository.targets.load_signing_key(repo_lib.import_ed25519_privatekey_from_file(targets_privkey_path, 'password')) - repository.snapshot.load_signing_key(repo_lib.import_ed25519_privatekey_from_file(snapshot_privkey_path, 'password')) - repository.timestamp.load_signing_key(repo_lib.import_ed25519_privatekey_from_file(timestamp_privkey_path, 'password')) - - # Partially write all top-level roles (we increase the threshold of each - # top-level role so that they are flagged as partially written. - repository.root.threshold = repository.root.threshold + 1 - repository.snapshot.threshold = repository.snapshot.threshold + 1 - repository.targets.threshold = repository.targets.threshold + 1 - repository.timestamp.threshold = repository.timestamp.threshold + 1 - repository.write('root') - repository.write('snapshot') - repository.write('targets') - repository.write('timestamp') - - repo_lib._load_top_level_metadata(repository, filenames, repository_name) - - # Attempt to load a repository with missing top-level metadata. - for role_file in os.listdir(metadata_directory): - if role_file.endswith('.json') and not role_file.startswith('root'): - role_filename = os.path.join(metadata_directory, role_file) - os.remove(role_filename) - self.assertRaises(tuf.exceptions.RepositoryError, - repo_lib._load_top_level_metadata, repository, filenames, - repository_name) - - # Remove the required Root file and verify that an exception is raised. - os.remove(os.path.join(metadata_directory, 'root.json')) - self.assertRaises(tuf.exceptions.RepositoryError, - repo_lib._load_top_level_metadata, repository, filenames, - repository_name) - - - - def test__remove_invalid_and_duplicate_signatures(self): - # Remove duplicate PSS signatures (same key generates valid, but different - # signatures). First load a valid signable (in this case, the root role). - repository_name = 'test_repository' - root_filepath = os.path.join('repository_data', 'repository', - 'metadata', 'root.json') - root_signable = securesystemslib.util.load_json_file(root_filepath) - key_filepath = os.path.join('repository_data', 'keystore', 'root_key') - root_rsa_key = repo_lib.import_rsa_privatekey_from_file(key_filepath, - 'password') - - # Add 'root_rsa_key' to tuf.keydb, since - # _remove_invalid_and_duplicate_signatures() checks for unknown keys in - # tuf.keydb. - tuf.keydb.add_key(root_rsa_key, repository_name=repository_name) - - # Append the new valid, but duplicate PSS signature, and test that - # duplicates are removed. create_signature() generates a key for the - # key type of the first argument (i.e., root_rsa_key). - data = securesystemslib.formats.encode_canonical(root_signable['signed']).encode('utf-8') - new_pss_signature = securesystemslib.keys.create_signature(root_rsa_key, - data) - root_signable['signatures'].append(new_pss_signature) - - expected_number_of_signatures = len(root_signable['signatures']) - tuf.repository_lib._remove_invalid_and_duplicate_signatures(root_signable, - repository_name) - self.assertEqual(len(root_signable), expected_number_of_signatures) - - # Test for an invalid keyid. - root_signable['signatures'][0]['keyid'] = '404' - tuf.repository_lib._remove_invalid_and_duplicate_signatures(root_signable, - repository_name) - - # Re-add a valid signature for the following test condition. - root_signable['signatures'].append(new_pss_signature) - - # Test that an exception is not raised if an invalid sig is present, - # and that the duplicate key is removed 'root_signable'. - root_signable['signatures'][0]['sig'] = '4040' - invalid_keyid = root_signable['signatures'][0]['keyid'] - tuf.repository_lib._remove_invalid_and_duplicate_signatures(root_signable, - repository_name) - - for signature in root_signable['signatures']: - self.assertFalse(invalid_keyid == signature['keyid']) - - - -# Run the test cases. -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_repository_tool_old.py b/tests/test_repository_tool_old.py deleted file mode 100755 index 8b04a8814c..0000000000 --- a/tests/test_repository_tool_old.py +++ /dev/null @@ -1,2199 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_repository_tool_old.py - - - Vladimir Diaz - - - April 7, 2014. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Unit test for 'repository_tool.py'. -""" - -import os -import time -import datetime -import unittest -import logging -import tempfile -import shutil -import sys - -import tuf -import tuf.log -import tuf.formats -import tuf.roledb -import tuf.keydb -import tuf.repository_tool as repo_tool - -from tests import utils - -import securesystemslib -import securesystemslib.exceptions -import securesystemslib.storage - -logger = logging.getLogger(__name__) - -repo_tool.disable_console_log_messages() - - -class TestRepository(unittest.TestCase): - @classmethod - def setUpClass(cls): - # Create a temporary directory to store the repository, metadata, and target - # files. 'temporary_directory' must be deleted in TearDownClass() so that - # temporary files are always removed, even when exceptions occur. - cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) - - - @classmethod - def tearDownClass(cls): - # Remove the temporary repository directory, which should contain all the - # metadata, targets, and key files generated for the test cases. - shutil.rmtree(cls.temporary_directory) - - - - def setUp(self): - tuf.roledb.create_roledb('test_repository') - tuf.keydb.create_keydb('test_repository') - - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - - def test_init(self): - # Test normal case. - repository_name = 'test_repository' - storage_backend = securesystemslib.storage.FilesystemBackend() - repository = repo_tool.Repository('repository_directory/', - 'metadata_directory/', 'targets_directory/', storage_backend, - repository_name) - self.assertTrue(isinstance(repository.root, repo_tool.Root)) - self.assertTrue(isinstance(repository.snapshot, repo_tool.Snapshot)) - self.assertTrue(isinstance(repository.timestamp, repo_tool.Timestamp)) - self.assertTrue(isinstance(repository.targets, repo_tool.Targets)) - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, - storage_backend, 3, 'metadata_directory/', 'targets_directory') - self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, - 'repository_directory', storage_backend, 3, 'targets_directory') - self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, - 'repository_directory', 'metadata_directory', 3, storage_backend) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, - 'repository_directory/', 'metadata_directory/', 'targets_directory/', - storage_backend, repository_name, use_timestamp_length=3) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, - 'repository_directory/', 'metadata_directory/', 'targets_directory/', - storage_backend, repository_name, use_timestamp_length=False, - use_timestamp_hashes=3) - - - - def create_repository_directory(self): - # Create a repository directory and copy in test targets data - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - targets_directory = os.path.join(temporary_directory, 'repository', - repo_tool.TARGETS_DIRECTORY_NAME) - original_targets_directory = os.path.join('repository_data', - 'repository', 'targets') - shutil.copytree(original_targets_directory, targets_directory) - - # In this case, create_new_repository() creates the 'repository/' - # sub-directory in 'temporary_directory' if it does not exist. - return os.path.join(temporary_directory, 'repository') - - - - - def test_writeall(self): - # Test creation of a TUF repository. - # - # 1. Import public and private keys. - # 2. Add verification keys. - # 3. Load signing keys. - # 4. Add target files. - # 5. Perform delegation. - # 6. writeall() - # - # Copy the target files from 'tuf/tests/repository_data' so that writeall() - # has target fileinfo to include in metadata. - repository_name = 'test_repository' - repository_directory = self.create_repository_directory() - metadata_directory = os.path.join(repository_directory, - repo_tool.METADATA_STAGED_DIRECTORY_NAME) - - repository = repo_tool.create_new_repository(repository_directory, repository_name) - - # (1) Load the public and private keys of the top-level roles, and one - # delegated role. - keystore_directory = os.path.join('repository_data', 'keystore') - - # Load the public keys. - root_pubkey_path = os.path.join(keystore_directory, 'root_key.pub') - targets_pubkey_path = os.path.join(keystore_directory, 'targets_key.pub') - snapshot_pubkey_path = os.path.join(keystore_directory, 'snapshot_key.pub') - timestamp_pubkey_path = os.path.join(keystore_directory, 'timestamp_key.pub') - role1_pubkey_path = os.path.join(keystore_directory, 'delegation_key.pub') - - root_pubkey = repo_tool.import_rsa_publickey_from_file(root_pubkey_path) - targets_pubkey = \ - repo_tool.import_ed25519_publickey_from_file(targets_pubkey_path) - snapshot_pubkey = \ - repo_tool.import_ed25519_publickey_from_file(snapshot_pubkey_path) - timestamp_pubkey = \ - repo_tool.import_ed25519_publickey_from_file(timestamp_pubkey_path) - role1_pubkey = repo_tool.import_ed25519_publickey_from_file(role1_pubkey_path) - - # Load the private keys. - root_privkey_path = os.path.join(keystore_directory, 'root_key') - targets_privkey_path = os.path.join(keystore_directory, 'targets_key') - snapshot_privkey_path = os.path.join(keystore_directory, 'snapshot_key') - timestamp_privkey_path = os.path.join(keystore_directory, 'timestamp_key') - role1_privkey_path = os.path.join(keystore_directory, 'delegation_key') - - root_privkey = \ - repo_tool.import_rsa_privatekey_from_file(root_privkey_path, 'password') - targets_privkey = \ - repo_tool.import_ed25519_privatekey_from_file(targets_privkey_path, - 'password') - snapshot_privkey = \ - repo_tool.import_ed25519_privatekey_from_file(snapshot_privkey_path, - 'password') - timestamp_privkey = \ - repo_tool.import_ed25519_privatekey_from_file(timestamp_privkey_path, - 'password') - role1_privkey = \ - repo_tool.import_ed25519_privatekey_from_file(role1_privkey_path, - 'password') - - - # (2) Add top-level verification keys. - repository.root.add_verification_key(root_pubkey) - repository.targets.add_verification_key(targets_pubkey) - repository.snapshot.add_verification_key(snapshot_pubkey) - - # Verify that repository.writeall() fails for insufficient threshold - # of signatures (default threshold = 1). - self.assertRaises(tuf.exceptions.UnsignedMetadataError, repository.writeall) - - repository.timestamp.add_verification_key(timestamp_pubkey) - - - # (3) Load top-level signing keys. - repository.status() - repository.root.load_signing_key(root_privkey) - repository.status() - repository.targets.load_signing_key(targets_privkey) - repository.status() - repository.snapshot.load_signing_key(snapshot_privkey) - repository.status() - - # Verify that repository.writeall() fails for insufficient threshold - # of signatures (default threshold = 1). - self.assertRaises(tuf.exceptions.UnsignedMetadataError, repository.writeall) - - repository.timestamp.load_signing_key(timestamp_privkey) - - - # (4) Add target files. - target1 = 'file1.txt' - target2 = 'file2.txt' - target3 = 'file3.txt' - repository.targets.add_target(target1) - repository.targets.add_target(target2) - - # (5) Perform delegation. - repository.targets.delegate('role1', [role1_pubkey], [target3]) - repository.targets('role1').load_signing_key(role1_privkey) - - # (6) Write repository. - repository.writeall() - - # Verify that the expected metadata is written. - for role in ['root.json', 'targets.json', 'snapshot.json', 'timestamp.json']: - role_filepath = os.path.join(metadata_directory, role) - role_signable = securesystemslib.util.load_json_file(role_filepath) - - # Raise 'securesystemslib.exceptions.FormatError' if 'role_signable' is - # an invalid signable. - tuf.formats.check_signable_object_format(role_signable) - - self.assertTrue(os.path.exists(role_filepath)) - - # Verify the 'role1.json' delegation is also written. - role1_filepath = os.path.join(metadata_directory, 'role1.json') - role1_signable = securesystemslib.util.load_json_file(role1_filepath) - tuf.formats.check_signable_object_format(role1_signable) - - # Verify that an exception is *not* raised for multiple - # repository.writeall(). - repository.writeall() - - # Verify that status() does not raise an exception. - repository.status() - - # Verify that status() does not raise - # 'tuf.exceptions.InsufficientKeysError' if a top-level role - # does not contain a threshold of keys. - targets_roleinfo = tuf.roledb.get_roleinfo('targets', repository_name) - old_threshold = targets_roleinfo['threshold'] - targets_roleinfo['threshold'] = 10 - tuf.roledb.update_roleinfo('targets', targets_roleinfo, - repository_name=repository_name) - repository.status() - - # Restore the original threshold values. - targets_roleinfo = tuf.roledb.get_roleinfo('targets', repository_name) - targets_roleinfo['threshold'] = old_threshold - tuf.roledb.update_roleinfo('targets', targets_roleinfo, - repository_name=repository_name) - - # Verify that status() does not raise - # 'tuf.exceptions.InsufficientKeysError' if a delegated role - # does not contain a threshold of keys. - role1_roleinfo = tuf.roledb.get_roleinfo('role1', repository_name) - old_role1_threshold = role1_roleinfo['threshold'] - role1_roleinfo['threshold'] = 10 - tuf.roledb.update_roleinfo('role1', role1_roleinfo, - repository_name=repository_name) - repository.status() - - # Restore role1's threshold. - role1_roleinfo = tuf.roledb.get_roleinfo('role1', repository_name) - role1_roleinfo['threshold'] = old_role1_threshold - tuf.roledb.update_roleinfo('role1', role1_roleinfo, - repository_name=repository_name) - - # Verify status() does not raise 'tuf.exceptions.UnsignedMetadataError' if any of the - # the top-level roles. Test that 'root' is improperly signed. - repository.root.unload_signing_key(root_privkey) - repository.root.load_signing_key(targets_privkey) - repository.status() - - repository.targets('role1').unload_signing_key(role1_privkey) - repository.targets('role1').load_signing_key(targets_privkey) - repository.status() - - # Reset Root and 'role1', and verify Targets. - repository.root.unload_signing_key(targets_privkey) - repository.root.load_signing_key(root_privkey) - repository.targets('role1').unload_signing_key(targets_privkey) - repository.targets('role1').load_signing_key(role1_privkey) - repository.targets.unload_signing_key(targets_privkey) - repository.targets.load_signing_key(snapshot_privkey) - repository.status() - - # Reset Targets and verify Snapshot. - repository.targets.unload_signing_key(snapshot_privkey) - repository.targets.load_signing_key(targets_privkey) - repository.snapshot.unload_signing_key(snapshot_privkey) - repository.snapshot.load_signing_key(timestamp_privkey) - repository.status() - - # Reset Snapshot and verify timestamp. - repository.snapshot.unload_signing_key(timestamp_privkey) - repository.snapshot.load_signing_key(snapshot_privkey) - repository.timestamp.unload_signing_key(timestamp_privkey) - repository.timestamp.load_signing_key(root_privkey) - repository.status() - - # Reset Timestamp - repository.timestamp.unload_signing_key(root_privkey) - repository.timestamp.load_signing_key(timestamp_privkey) - - # Verify that a writeall() fails if a repository is loaded and a change - # is made to a role. - repo_tool.load_repository(repository_directory, repository_name) - - repository.timestamp.expiration = datetime.datetime(2030, 1, 1, 12, 0) - self.assertRaises(tuf.exceptions.UnsignedMetadataError, repository.writeall) - - # Load the required Timestamp key so that a valid repository can be written. - repository.timestamp.load_signing_key(timestamp_privkey) - repository.writeall() - - # Test creation of a consistent snapshot repository. Writing a consistent - # snapshot modifies the Root metadata, which specifies whether a repository - # supports consistent snapshot. Verify that an exception is raised due to - # the missing signature of Root. - self.assertRaises(tuf.exceptions.UnsignedMetadataError, repository.writeall, True) - - # Make sure the private keys of Root (new version required since Root will - # change to enable consistent snapshot), Snapshot, role1, and timestamp - # loaded before writing consistent snapshot. - repository.root.load_signing_key(root_privkey) - repository.snapshot.load_signing_key(snapshot_privkey) - # Must also load targets signing key, because targets is re-signed when - # updating 'role1'. - repository.targets.load_signing_key(targets_privkey) - repository.targets('role1').load_signing_key(role1_privkey) - - # Verify that a consistent snapshot can be written and loaded. The roles - # above must be marked as dirty, otherwise writeall() will not create a - # consistent snapshot for them. - repository.mark_dirty(['role1', 'targets', 'root', 'snapshot', 'timestamp']) - repository.writeall(consistent_snapshot=True) - - # Verify that the newly written consistent snapshot can be loaded - # successfully. - repo_tool.load_repository(repository_directory, repository_name) - - # Verify the behavior of marking and unmarking roles as dirty. - # We begin by ensuring that writeall() cleared the list of dirty roles.. - self.assertEqual([], tuf.roledb.get_dirty_roles(repository_name)) - - repository.mark_dirty(['root', 'timestamp']) - self.assertEqual(['root', 'timestamp'], tuf.roledb.get_dirty_roles(repository_name)) - repository.unmark_dirty(['root']) - self.assertEqual(['timestamp'], tuf.roledb.get_dirty_roles(repository_name)) - - # Ensure status() does not leave behind any dirty roles. - repository.status() - self.assertEqual(['timestamp'], tuf.roledb.get_dirty_roles(repository_name)) - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, repository.writeall, 3) - - - def test_writeall_no_files(self): - # Test writeall() when using pre-supplied fileinfo - - repository_name = 'test_repository' - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - repository_directory = os.path.join(temporary_directory, 'repository') - targets_directory = os.path.join(repository_directory, - repo_tool.TARGETS_DIRECTORY_NAME) - - repository = repo_tool.create_new_repository(repository_directory, repository_name) - - # (1) Load the public and private keys of the top-level roles, and one - # delegated role. - keystore_directory = os.path.join('repository_data', 'keystore') - - # Load the public keys. - root_pubkey_path = os.path.join(keystore_directory, 'root_key.pub') - targets_pubkey_path = os.path.join(keystore_directory, 'targets_key.pub') - snapshot_pubkey_path = os.path.join(keystore_directory, 'snapshot_key.pub') - timestamp_pubkey_path = os.path.join(keystore_directory, 'timestamp_key.pub') - - root_pubkey = repo_tool.import_rsa_publickey_from_file(root_pubkey_path) - targets_pubkey = \ - repo_tool.import_ed25519_publickey_from_file(targets_pubkey_path) - snapshot_pubkey = \ - repo_tool.import_ed25519_publickey_from_file(snapshot_pubkey_path) - timestamp_pubkey = \ - repo_tool.import_ed25519_publickey_from_file(timestamp_pubkey_path) - - # Load the private keys. - root_privkey_path = os.path.join(keystore_directory, 'root_key') - targets_privkey_path = os.path.join(keystore_directory, 'targets_key') - snapshot_privkey_path = os.path.join(keystore_directory, 'snapshot_key') - timestamp_privkey_path = os.path.join(keystore_directory, 'timestamp_key') - - root_privkey = \ - repo_tool.import_rsa_privatekey_from_file(root_privkey_path, 'password') - targets_privkey = \ - repo_tool.import_ed25519_privatekey_from_file(targets_privkey_path, - 'password') - snapshot_privkey = \ - repo_tool.import_ed25519_privatekey_from_file(snapshot_privkey_path, - 'password') - timestamp_privkey = \ - repo_tool.import_ed25519_privatekey_from_file(timestamp_privkey_path, - 'password') - - - # (2) Add top-level verification keys. - repository.root.add_verification_key(root_pubkey) - repository.targets.add_verification_key(targets_pubkey) - repository.snapshot.add_verification_key(snapshot_pubkey) - - # Verify that repository.writeall() fails for insufficient threshold - # of signatures (default threshold = 1). - self.assertRaises(tuf.exceptions.UnsignedMetadataError, repository.writeall) - - repository.timestamp.add_verification_key(timestamp_pubkey) - - - # (3) Load top-level signing keys. - repository.status() - repository.root.load_signing_key(root_privkey) - repository.status() - repository.targets.load_signing_key(targets_privkey) - repository.status() - repository.snapshot.load_signing_key(snapshot_privkey) - repository.status() - - # Verify that repository.writeall() fails for insufficient threshold - # of signatures (default threshold = 1). - self.assertRaises(tuf.exceptions.UnsignedMetadataError, repository.writeall) - - repository.timestamp.load_signing_key(timestamp_privkey) - - # Add target fileinfo - target1_hashes = {'sha256': 'c2986576f5fdfd43944e2b19e775453b96748ec4fe2638a6d2f32f1310967095'} - target2_hashes = {'sha256': '517c0ce943e7274a2431fa5751e17cfd5225accd23e479bfaad13007751e87ef'} - target1_fileinfo = tuf.formats.make_targets_fileinfo(555, target1_hashes) - target2_fileinfo = tuf.formats.make_targets_fileinfo(37, target2_hashes) - target1 = 'file1.txt' - target2 = 'file2.txt' - repository.targets.add_target(target1, fileinfo=target1_fileinfo) - repository.targets.add_target(target2, fileinfo=target2_fileinfo) - - repository.writeall(use_existing_fileinfo=True) - - # Verify that the expected metadata is written. - metadata_directory = os.path.join(repository_directory, - repo_tool.METADATA_STAGED_DIRECTORY_NAME) - - for role in ['root.json', 'targets.json', 'snapshot.json', 'timestamp.json']: - role_filepath = os.path.join(metadata_directory, role) - role_signable = securesystemslib.util.load_json_file(role_filepath) - - # Raise 'securesystemslib.exceptions.FormatError' if 'role_signable' is - # an invalid signable. - tuf.formats.check_signable_object_format(role_signable) - - self.assertTrue(os.path.exists(role_filepath)) - - - - def test_get_filepaths_in_directory(self): - # Test normal case. - # Use the pre-generated metadata directory for testing. - # Set 'repo' reference to improve readability. - repo = repo_tool.Repository - metadata_directory = os.path.join('repository_data', - 'repository', 'metadata') - - # Verify the expected filenames. get_filepaths_in_directory() returns - # a list of absolute paths. - metadata_files = repo.get_filepaths_in_directory(metadata_directory) - - # Construct list of file paths expected, determining absolute paths. - expected_files = [] - for filepath in ['1.root.json', 'root.json', 'targets.json', - 'snapshot.json', 'timestamp.json', 'role1.json', 'role2.json']: - expected_files.append(os.path.abspath(os.path.join( - 'repository_data', 'repository', 'metadata', filepath))) - - self.assertEqual(sorted(expected_files), sorted(metadata_files)) - - - # Test when the 'recursive_walk' argument is True. - # In this case, recursive walk should yield the same results as the - # previous, non-recursive call. - metadata_files = repo.get_filepaths_in_directory(metadata_directory, - recursive_walk=True) - self.assertEqual(sorted(expected_files), sorted(metadata_files)) - - # And this recursive call from the directory above should yield the same - # results as well, plus extra files. - metadata_files = repo.get_filepaths_in_directory( - os.path.join('repository_data', 'repository'), recursive_walk=True) - for expected_file in expected_files: - self.assertIn(expected_file, metadata_files) - # self.assertEqual(sorted(expected_files), sorted(metadata_files)) - - # Now let's check it against the full list of expected files for the parent - # directory.... We'll add to the existing list. Expect the same files in - # metadata.staged/ as in metadata/, and a few target files in targets/ - # This is somewhat redundant with the previous test, but together they're - # probably more future-proof. - for filepath in ['file1.txt', 'file2.txt', 'file3.txt']: - expected_files.append(os.path.abspath(os.path.join( - 'repository_data', 'repository', 'targets', filepath))) - for filepath in [ '1.root.json', 'root.json', 'targets.json', - 'snapshot.json', 'timestamp.json', 'role1.json', 'role2.json']: - expected_files.append(os.path.abspath(os.path.join( - 'repository_data', 'repository', 'metadata.staged', filepath))) - - self.assertEqual(sorted(expected_files), sorted(metadata_files)) - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, repo.get_filepaths_in_directory, - 3, recursive_walk=False, followlinks=False) - self.assertRaises(securesystemslib.exceptions.FormatError, repo.get_filepaths_in_directory, - metadata_directory, 3, followlinks=False) - self.assertRaises(securesystemslib.exceptions.FormatError, repo.get_filepaths_in_directory, - metadata_directory, recursive_walk=False, followlinks=3) - - # Test invalid directory argument. - # A non-directory. - self.assertRaises(securesystemslib.exceptions.Error, repo.get_filepaths_in_directory, - os.path.join(metadata_directory, 'root.json')) - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - nonexistent_directory = os.path.join(temporary_directory, 'nonexistent/') - self.assertRaises(securesystemslib.exceptions.Error, repo.get_filepaths_in_directory, - nonexistent_directory, recursive_walk=False, - followlinks=False) - - - - def test_writeall_abstract_storage(self): - # Test creation of a TUF repository with a custom storage backend to ensure - # that functions relying on a storage backend being supplied operate - # correctly - - - class TestStorageBackend(securesystemslib.storage.StorageBackendInterface): - """ - An implementation of securesystemslib.storage.StorageBackendInterface - which mutates filenames on put()/get(), translating filename in memory - to filename + '.tst' on-disk, such that trying to read the - expected/canonical file paths from local storage doesn't find the TUF - metadata files. - """ - - from contextlib import contextmanager - - - @contextmanager - def get(self, filepath): - file_object = open(filepath + '.tst', 'rb') - yield file_object - file_object.close() - - - def put(self, fileobj, filepath): - if not fileobj.closed: - fileobj.seek(0) - - with open(filepath + '.tst', 'wb') as destination_file: - shutil.copyfileobj(fileobj, destination_file) - destination_file.flush() - os.fsync(destination_file.fileno()) - - - def remove(self, filepath): - os.remove(filepath + '.tst') - - - def getsize(self, filepath): - return os.path.getsize(filepath + '.tst') - - - def create_folder(self, filepath): - if not filepath: - return - try: - os.makedirs(filepath) - except OSError as err: - pass - - - def list_folder(self, filepath): - contents = [] - files = os.listdir(filepath) - - for fi in files: - if fi.endswith('.tst'): - contents.append(fi.split('.tst')[0]) - else: - contents.append(fi) - - return contents - - - - # Set up the repository directory - repository_name = 'test_repository' - repository_directory = self.create_repository_directory() - metadata_directory = os.path.join(repository_directory, - repo_tool.METADATA_STAGED_DIRECTORY_NAME) - targets_directory = os.path.join(repository_directory, - repo_tool.TARGETS_DIRECTORY_NAME) - - # TestStorageBackend expects all files on disk to have an additional '.tst' - # file extension - for target in os.listdir(targets_directory): - src = os.path.join(targets_directory, target) - dst = os.path.join(targets_directory, target + '.tst') - os.rename(src, dst) - - # (0) Create a repository with TestStorageBackend() - storage_backend = TestStorageBackend() - repository = repo_tool.create_new_repository(repository_directory, - repository_name, - storage_backend) - - # (1) Load the public and private keys of the top-level roles, and one - # delegated role. - keystore_directory = os.path.join('repository_data', 'keystore') - - # Load the public keys. - root_pubkey_path = os.path.join(keystore_directory, 'root_key.pub') - targets_pubkey_path = os.path.join(keystore_directory, 'targets_key.pub') - snapshot_pubkey_path = os.path.join(keystore_directory, 'snapshot_key.pub') - timestamp_pubkey_path = os.path.join(keystore_directory, 'timestamp_key.pub') - - root_pubkey = repo_tool.import_rsa_publickey_from_file(root_pubkey_path) - targets_pubkey = \ - repo_tool.import_ed25519_publickey_from_file(targets_pubkey_path) - snapshot_pubkey = \ - repo_tool.import_ed25519_publickey_from_file(snapshot_pubkey_path) - timestamp_pubkey = \ - repo_tool.import_ed25519_publickey_from_file(timestamp_pubkey_path) - - # Load the private keys. - root_privkey_path = os.path.join(keystore_directory, 'root_key') - targets_privkey_path = os.path.join(keystore_directory, 'targets_key') - snapshot_privkey_path = os.path.join(keystore_directory, 'snapshot_key') - timestamp_privkey_path = os.path.join(keystore_directory, 'timestamp_key') - - root_privkey = \ - repo_tool.import_rsa_privatekey_from_file(root_privkey_path, 'password') - targets_privkey = \ - repo_tool.import_ed25519_privatekey_from_file(targets_privkey_path, - 'password') - snapshot_privkey = \ - repo_tool.import_ed25519_privatekey_from_file(snapshot_privkey_path, - 'password') - timestamp_privkey = \ - repo_tool.import_ed25519_privatekey_from_file(timestamp_privkey_path, - 'password') - - - # (2) Add top-level verification keys. - repository.root.add_verification_key(root_pubkey) - repository.targets.add_verification_key(targets_pubkey) - repository.snapshot.add_verification_key(snapshot_pubkey) - repository.timestamp.add_verification_key(timestamp_pubkey) - - - # (3) Load top-level signing keys. - repository.root.load_signing_key(root_privkey) - repository.targets.load_signing_key(targets_privkey) - repository.snapshot.load_signing_key(snapshot_privkey) - repository.timestamp.load_signing_key(timestamp_privkey) - - - # (4) Add target files. - target1 = 'file1.txt' - target2 = 'file2.txt' - target3 = 'file3.txt' - repository.targets.add_target(target1) - repository.targets.add_target(target2) - repository.targets.add_target(target3) - - # (6) Write repository. - repository.writeall() - - - # Ensure all of the metadata files exist at the mutated file location and - # that those files are valid metadata - for role in ['root.json.tst', 'targets.json.tst', 'snapshot.json.tst', - 'timestamp.json.tst']: - role_filepath = os.path.join(metadata_directory, role) - self.assertTrue(os.path.exists(role_filepath)) - - role_signable = securesystemslib.util.load_json_file(role_filepath) - # Raise 'securesystemslib.exceptions.FormatError' if 'role_signable' is - # an invalid signable. - tuf.formats.check_signable_object_format(role_signable) - - - def test_signature_order(self): - """Test signatures are added to metadata in alphabetical order. """ - # Create empty repo dir and init default repo in memory - repo_dir = tempfile.mkdtemp(dir=self.temporary_directory) - repo = repo_tool.create_new_repository(repo_dir) - - # Dedicate any two existing test keys as root signing keys - for key_name in ["targets_key", "snapshot_key"]: - repo.root.load_signing_key( - repo_tool.import_ed25519_privatekey_from_file( - os.path.join("repository_data", "keystore", key_name), - "password")) - - # Write root metadata with two signatures - repo.write("root") - - # Load signed and written json metadata back into memory - root_metadata_path = os.path.join( - repo_dir, repo_tool.METADATA_STAGED_DIRECTORY_NAME, "root.json") - root_metadata = securesystemslib.util.load_json_file(root_metadata_path) - - # Assert signatures are ordered alphabetically (by signing key keyid) - self.assertListEqual( - [sig["keyid"] for sig in root_metadata["signatures"]], - [ - "59a4df8af818e9ed7abe0764c0b47b4240952aa0d179b5b78346c470ac30278d", - "65171251a9aff5a8b3143a813481cb07f6e0de4eb197c767837fe4491b739093" - ]) - - - -class TestMetadata(unittest.TestCase): - def setUp(self): - # Inherit from the repo_tool.Metadata() base class. All of the methods - # to be tested in TestMetadata require at least 1 role, so create it here - # and set its roleinfo. - - tuf.roledb.create_roledb('test_repository') - tuf.keydb.create_keydb('test_repository') - - class MetadataRole(repo_tool.Metadata): - def __init__(self): - super(MetadataRole, self).__init__() - - self._rolename = 'metadata_role' - self._repository_name = 'test_repository' - - # Expire in 86400 seconds (1 day). - expiration = \ - tuf.formats.unix_timestamp_to_datetime(int(time.time() + 86400)) - expiration = expiration.isoformat() + 'Z' - roleinfo = {'keyids': [], 'signing_keyids': [], 'threshold': 1, - 'signatures': [], 'version': 0, - 'consistent_snapshot': False, - 'expires': expiration, - 'partial_loaded': False} - - tuf.roledb.add_role(self._rolename, roleinfo, - repository_name='test_repository') - - self.metadata = MetadataRole() - - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - self.metadata = None - - - - def test_rolename(self): - base_metadata = repo_tool.Metadata() - - self.assertEqual(base_metadata.rolename, None) - - # Test the sub-classed MetadataRole(). - self.assertEqual(self.metadata.rolename, 'metadata_role') - - - - def test_version(self): - # Test version getter, and the default version number. - self.assertEqual(self.metadata.version, 0) - - # Test version setter, and verify updated version number. - self.metadata.version = 8 - self.assertEqual(self.metadata.version, 8) - - - - def test_threshold(self): - # Test threshold getter, and the default threshold number. - self.assertEqual(self.metadata.threshold, 1) - - # Test threshold setter, and verify updated threshold number. - self.metadata.threshold = 3 - self.assertEqual(self.metadata.threshold, 3) - - - - def test_expiration(self): - # Test expiration getter. - expiration = self.metadata.expiration - self.assertTrue(isinstance(expiration, datetime.datetime)) - - # Test expiration setter. - self.metadata.expiration = datetime.datetime(2030, 1, 1, 12, 0) - expiration = self.metadata.expiration - self.assertTrue(isinstance(expiration, datetime.datetime)) - - # test a setter with microseconds, we are forcing the microseconds value - expiration = datetime.datetime.today() + datetime.timedelta(weeks = 1) - # we force the microseconds value if we are unlucky enough to get a 0 - if expiration.microsecond == 0: - expiration = expiration.replace(microsecond = 1) - - new_expiration = self.metadata.expiration - self.assertTrue(isinstance(new_expiration, datetime.datetime)) - - # check that the expiration value is truncated - self.assertTrue(new_expiration.microsecond == 0) - - # Test improperly formatted datetime. - try: - self.metadata.expiration = '3' - - except securesystemslib.exceptions.FormatError: - pass - - else: - self.fail('Setter failed to detect improperly formatted datetime.') - - - # Test invalid argument (i.e., expiration has already expired.) - expired_datetime = tuf.formats.unix_timestamp_to_datetime(int(time.time() - 1)) - try: - self.metadata.expiration = expired_datetime - - except securesystemslib.exceptions.Error: - pass - - else: - self.fail('Setter failed to detect an expired datetime.') - - - - def test_keys(self): - # Test default case, where a verification key has not been added. - self.assertEqual(self.metadata.keys, []) - - - # Test keys() getter after a verification key has been loaded. - key_path = os.path.join('repository_data', - 'keystore', 'snapshot_key.pub') - key_object = repo_tool.import_ed25519_publickey_from_file(key_path) - self.metadata.add_verification_key(key_object) - - keyid = key_object['keyid'] - self.assertEqual([keyid], self.metadata.keys) - - - - def test_signing_keys(self): - # Test default case, where a signing key has not been added. - self.assertEqual(self.metadata.signing_keys, []) - - - # Test signing_keys() getter after a signing key has been loaded. - key_path = os.path.join('repository_data', - 'keystore', 'root_key') - key_object = repo_tool.import_rsa_privatekey_from_file(key_path, 'password') - self.metadata.load_signing_key(key_object) - - keyid = key_object['keyid'] - self.assertEqual([keyid], self.metadata.signing_keys) - - - - - - def test_add_verification_key(self): - # Add verification key and verify that it was added via (role).keys. - key_path = os.path.join('repository_data', 'keystore', 'snapshot_key.pub') - key_object = repo_tool.import_ed25519_publickey_from_file(key_path) - self.metadata.add_verification_key(key_object) - - keyid = key_object['keyid'] - self.assertEqual([keyid], self.metadata.keys) - - expiration = \ - tuf.formats.unix_timestamp_to_datetime(int(time.time() + 86400)) - expiration = expiration.isoformat() + 'Z' - roleinfo = {'keyids': [], 'signing_keyids': [], 'threshold': 1, - 'signatures': [], 'version': 0, - 'consistent_snapshot': False, 'expires': expiration, - 'partial_loaded': False} - - tuf.roledb.add_role('Root', roleinfo, 'test_repository') - tuf.roledb.add_role('Targets', roleinfo, 'test_repository') - tuf.roledb.add_role('Snapshot', roleinfo, 'test_repository') - tuf.roledb.add_role('Timestamp', roleinfo, 'test_repository') - - # Test for different top-level role names. - self.metadata._rolename = 'Targets' - self.metadata.add_verification_key(key_object) - self.metadata._rolename = 'Snapshot' - self.metadata.add_verification_key(key_object) - self.metadata._rolename = 'Timestamp' - self.metadata.add_verification_key(key_object) - - # Test for a given 'expires' argument. - expires = datetime.datetime(2030, 1, 1, 12, 0) - self.metadata.add_verification_key(key_object, expires) - - - # Test for an expired 'expires'. - expired = datetime.datetime(1984, 1, 1, 12, 0) - self.assertRaises(securesystemslib.exceptions.Error, - self.metadata.add_verification_key, key_object, expired) - - # Test improperly formatted key argument. - self.assertRaises(securesystemslib.exceptions.FormatError, self.metadata.add_verification_key, 3) - self.assertRaises(securesystemslib.exceptions.FormatError, self.metadata.add_verification_key, key_object, 3) - - - - def test_remove_verification_key(self): - # Add verification key so that remove_verifiation_key() can be tested. - key_path = os.path.join('repository_data', - 'keystore', 'snapshot_key.pub') - key_object = repo_tool.import_ed25519_publickey_from_file(key_path) - self.metadata.add_verification_key(key_object) - - keyid = key_object['keyid'] - self.assertEqual([keyid], self.metadata.keys) - - - # Test successful removal of verification key added above. - self.metadata.remove_verification_key(key_object) - self.assertEqual(self.metadata.keys, []) - - - # Test improperly formatted argument - self.assertRaises(securesystemslib.exceptions.FormatError, self.metadata.remove_verification_key, 3) - - - # Test non-existent public key argument. - key_path = os.path.join('repository_data', - 'keystore', 'targets_key.pub') - unused_key_object = repo_tool.import_ed25519_publickey_from_file(key_path) - - self.assertRaises(securesystemslib.exceptions.Error, self.metadata.remove_verification_key, - unused_key_object) - - - - def test_load_signing_key(self): - # Test normal case. - key_path = os.path.join('repository_data', - 'keystore', 'snapshot_key') - key_object = repo_tool.import_ed25519_privatekey_from_file(key_path, 'password') - self.metadata.load_signing_key(key_object) - - keyid = key_object['keyid'] - self.assertEqual([keyid], self.metadata.signing_keys) - - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, self.metadata.load_signing_key, 3) - - - # Test non-private key. - key_path = os.path.join('repository_data', - 'keystore', 'snapshot_key.pub') - key_object = repo_tool.import_ed25519_publickey_from_file(key_path) - self.assertRaises(securesystemslib.exceptions.Error, self.metadata.load_signing_key, key_object) - - - - def test_unload_signing_key(self): - # Load a signing key so that unload_signing_key() can have a key to unload. - key_path = os.path.join('repository_data', - 'keystore', 'snapshot_key') - key_object = repo_tool.import_ed25519_privatekey_from_file(key_path, 'password') - self.metadata.load_signing_key(key_object) - - keyid = key_object['keyid'] - self.assertEqual([keyid], self.metadata.signing_keys) - - self.metadata.unload_signing_key(key_object) - - self.assertEqual(self.metadata.signing_keys, []) - - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, self.metadata.unload_signing_key, 3) - - - # Test non-existent key argument. - key_path = os.path.join('repository_data', - 'keystore', 'targets_key') - unused_key_object = repo_tool.import_ed25519_privatekey_from_file(key_path, - 'password') - - self.assertRaises(securesystemslib.exceptions.Error, self.metadata.unload_signing_key, - unused_key_object) - - - - def test_add_signature(self): - # Test normal case. - # Load signature list from any of pre-generated metadata; needed for - # testing. - metadata_directory = os.path.join('repository_data', - 'repository', 'metadata') - root_filepath = os.path.join(metadata_directory, 'root.json') - root_signable = securesystemslib.util.load_json_file(root_filepath) - signatures = root_signable['signatures'] - - # Add the first signature from the list, as only one is needed. - self.metadata.add_signature(signatures[0]) - self.assertEqual(signatures, self.metadata.signatures) - - # Verify that a signature is added if a 'signatures' entry is not present. - tuf.roledb.create_roledb_from_root_metadata(root_signable['signed'], repository_name='test_repository') - del tuf.roledb._roledb_dict['test_repository']['root']['signatures'] - self.metadata._rolename = 'root' - self.metadata.add_signature(signatures[0]) - - # Add a duplicate signature. - self.metadata.add_signature(signatures[0]) - - # Test improperly formatted signature argument. - self.assertRaises(securesystemslib.exceptions.FormatError, self.metadata.add_signature, 3) - self.assertRaises(securesystemslib.exceptions.FormatError, self.metadata.add_signature, signatures[0], 3) - - - - def test_remove_signature(self): - # Test normal case. - # Add a signature so remove_signature() has some signature to remove. - metadata_directory = os.path.join('repository_data', - 'repository', 'metadata') - root_filepath = os.path.join(metadata_directory, 'root.json') - root_signable = securesystemslib.util.load_json_file(root_filepath) - signatures = root_signable['signatures'] - self.metadata.add_signature(signatures[0]) - - self.metadata.remove_signature(signatures[0]) - self.assertEqual(self.metadata.signatures, []) - - - # Test improperly formatted signature argument. - self.assertRaises(securesystemslib.exceptions.FormatError, - self.metadata.remove_signature, 3) - - # Test invalid signature argument (i.e., signature that has not been added.) - # Load an unused signature to be tested. - targets_filepath = os.path.join(metadata_directory, 'targets.json') - targets_signable = securesystemslib.util.load_json_file(targets_filepath) - signatures = targets_signable['signatures'] - - self.assertRaises(securesystemslib.exceptions.Error, - self.metadata.remove_signature, signatures[0]) - - - - def test_signatures(self): - # Test default case, where no signatures have been added yet. - self.assertEqual(self.metadata.signatures, []) - - - # Test getter after adding an example signature. - metadata_directory = os.path.join('repository_data', - 'repository', 'metadata') - root_filepath = os.path.join(metadata_directory, 'root.json') - root_signable = securesystemslib.util.load_json_file(root_filepath) - signatures = root_signable['signatures'] - - # Add the first signature from the list, as only need one is needed. - self.metadata.add_signature(signatures[0]) - self.assertEqual(signatures, self.metadata.signatures) - - - -class TestRoot(unittest.TestCase): - def setUp(self): - tuf.roledb.create_roledb('test_repository') - tuf.keydb.create_keydb('test_repository') - - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - - - def test_init(self): - - # Test normal case. - # Root() subclasses Metadata(), and creates a 'root' role in 'tuf.roledb'. - repository_name = 'test_repository' - root_object = repo_tool.Root(repository_name) - self.assertTrue(isinstance(root_object, repo_tool.Metadata)) - self.assertTrue(tuf.roledb.role_exists('root', repository_name)) - - - -class TestTimestamp(unittest.TestCase): - def setUp(self): - tuf.roledb.create_roledb('test_repository') - tuf.keydb.create_keydb('test_repository') - - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - - - def test_init(self): - - # Test normal case. - # Timestamp() subclasses Metadata(), and creates a 'timestamp' role in - # 'tuf.roledb'. - timestamp_object = repo_tool.Timestamp('test_repository') - self.assertTrue(isinstance(timestamp_object, repo_tool.Metadata)) - self.assertTrue(tuf.roledb.role_exists('timestamp', 'test_repository')) - - - - - -class TestSnapshot(unittest.TestCase): - def setUp(self): - tuf.roledb.create_roledb('test_repository') - tuf.keydb.create_keydb('test_repository') - - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - - - def test_init(self): - - # Test normal case. - # Snapshot() subclasses Metadata(), and creates a 'snapshot' role in - # 'tuf.roledb'. - snapshot_object = repo_tool.Snapshot('test_repository') - self.assertTrue(isinstance(snapshot_object, repo_tool.Metadata)) - self.assertTrue(tuf.roledb.role_exists('snapshot', 'test_repository')) - - - - - -class TestTargets(unittest.TestCase): - @classmethod - def setUpClass(cls): - # Create a temporary directory to store the repository, metadata, and target - # files. 'temporary_directory' must be deleted in TearDownClass() so that - # temporary files are always removed, even when exceptions occur. - cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) - - - - @classmethod - def tearDownClass(cls): - # Remove the temporary repository directory, which should contain all the - # metadata, targets, and key files generated for the test cases. - shutil.rmtree(cls.temporary_directory) - - - - def setUp(self): - tuf.roledb.create_roledb('test_repository') - tuf.keydb.create_keydb('test_repository') - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - self.targets_directory = os.path.join(temporary_directory, 'repository', - 'targets') - original_targets_directory = os.path.join('repository_data', - 'repository', 'targets') - shutil.copytree(original_targets_directory, self.targets_directory) - self.targets_object = repo_tool.Targets(self.targets_directory, - repository_name='test_repository') - - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - self.targets_object = None - - - - def test_init(self): - - # Test normal case. - # Snapshot() subclasses Metadata(), and creates a 'snapshot' role in - # 'tuf.roledb'. - targets_object = repo_tool.Targets('targets_directory/') - self.assertTrue(isinstance(targets_object, repo_tool.Metadata)) - self.assertTrue(tuf.roledb.role_exists('targets')) - - # Custom Targets object rolename. - targets_object = repo_tool.Targets('targets_directory/', 'project') - self.assertTrue(isinstance(targets_object, repo_tool.Metadata)) - self.assertTrue(tuf.roledb.role_exists('project')) - - # Custom roleinfo object (i.e., tuf.formats.ROLEDB_SCHEMA). 'keyids' and - # 'threshold' are required, the rest are optional. - roleinfo = {'keyids': - ['66c4cb5fef5e4d62b7013ef1cab4b8a827a36c14056d5603c3a970e21eb30e6f'], - 'threshold': 8} - self.assertTrue(tuf.formats.ROLEDB_SCHEMA.matches(roleinfo)) - - targets_object = repo_tool.Targets('targets_directory/', 'package', roleinfo) - self.assertTrue(isinstance(targets_object, repo_tool.Metadata)) - self.assertTrue(tuf.roledb.role_exists('package')) - - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Targets, 3) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Targets, 'targets_directory/', 3) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Targets, 'targets_directory/', - 'targets', 3) - - - - def test_call(self): - # Test normal case. - # Perform a delegation so that a delegated role can be accessed and tested - # through __call__(). Example: {targets_object}('role1'). - keystore_directory = os.path.join('repository_data', 'keystore') - public_keypath = os.path.join(keystore_directory, 'snapshot_key.pub') - public_key = repo_tool.import_ed25519_publickey_from_file(public_keypath) - - # Create Targets() object to be tested. - targets_object = repo_tool.Targets(self.targets_directory) - targets_object.delegate('role1', [public_key], ['file1.txt']) - - self.assertTrue(isinstance(targets_object('role1'), repo_tool.Targets)) - - # Test invalid (i.e., non-delegated) rolename argument. - self.assertRaises(tuf.exceptions.UnknownRoleError, targets_object, 'unknown_role') - - # Test improperly formatted argument. - self.assertRaises(securesystemslib.exceptions.FormatError, targets_object, 1) - - - - def test_get_delegated_rolenames(self): - # Test normal case. - # Perform two delegations so that get_delegated_rolenames() has roles to - # return. - keystore_directory = os.path.join('repository_data', 'keystore') - public_keypath = os.path.join(keystore_directory, 'snapshot_key.pub') - public_key = repo_tool.import_ed25519_publickey_from_file(public_keypath) - - # Set needed arguments by delegate(). - public_keys = [public_key] - threshold = 1 - - self.targets_object.delegate('tuf', public_keys, [], threshold, False, - ['file1.txt'], path_hash_prefixes=None) - - self.targets_object.delegate('warehouse', public_keys, [], threshold, False, - ['file2.txt'], path_hash_prefixes=None) - - # Test that get_delegated_rolenames returns the expected delegations. - expected_delegated_rolenames = ['targets/tuf/', 'targets/warehouse'] - for delegated_rolename in self.targets_object.get_delegated_rolenames(): - delegated_rolename in expected_delegated_rolenames - - - - def test_target_files(self): - # Test normal case. - # Verify the targets object initially contains zero target files. - self.assertEqual(self.targets_object.target_files, {}) - - target_filepath = 'file1.txt' - self.targets_object.add_target(target_filepath) - - self.assertEqual(len(self.targets_object.target_files), 1) - self.assertTrue(target_filepath in self.targets_object.target_files) - - - - def test_delegations(self): - # Test normal case. - # Perform a delegation so that delegations() has a Targets() object to - # return. - keystore_directory = os.path.join('repository_data', 'keystore') - public_keypath = os.path.join(keystore_directory, 'snapshot_key.pub') - public_key = repo_tool.import_ed25519_publickey_from_file(public_keypath) - - # Set needed arguments by delegate(). - public_keys = [public_key] - rolename = 'tuf' - paths = ['file1.txt'] - threshold = 1 - - self.targets_object.delegate(rolename, public_keys, paths, threshold, - terminating=False, list_of_targets=None, path_hash_prefixes=None) - - # Test that a valid Targets() object is returned by delegations(). - for delegated_object in self.targets_object.delegations: - self.assertTrue(isinstance(delegated_object, repo_tool.Targets)) - - # For testing / coverage purposes, try to remove a delegated role with the - # remove_delegated_role() method. - self.targets_object.remove_delegated_role(rolename) - - - - def test_add_delegated_role(self): - # Test for invalid targets object. - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.add_delegated_role, 'targets', 'bad_object') - - - - def test_add_target(self): - # Test normal case. - # Verify the targets object initially contains zero target files. - self.assertEqual(self.targets_object.target_files, {}) - - target_filepath = 'file1.txt' - self.targets_object.add_target(target_filepath) - - self.assertEqual(len(self.targets_object.target_files), 1) - self.assertTrue(target_filepath in self.targets_object.target_files) - - # Test the 'custom' parameter of add_target(), where additional information - # may be specified for the target. - target2_filepath = 'file2.txt' - target2_fullpath = os.path.join(self.targets_directory, target2_filepath) - - # The file permission of the target (octal number specifying file access - # for owner, group, others (e.g., 0755). - octal_file_permissions = oct(os.stat(target2_fullpath).st_mode)[4:] - custom_file_permissions = {'file_permissions': octal_file_permissions} - self.targets_object.add_target(target2_filepath, custom_file_permissions) - - self.assertEqual(len(self.targets_object.target_files), 2) - self.assertTrue(target2_filepath in self.targets_object.target_files) - self.assertEqual(self.targets_object.target_files['file2.txt']['custom'], - custom_file_permissions) - - # Attempt to replace target that has already been added. - octal_file_permissions2 = oct(os.stat(target2_fullpath).st_mode)[4:] - custom_file_permissions2 = {'file_permissions': octal_file_permissions} - self.targets_object.add_target(target2_filepath, custom_file_permissions2) - self.assertEqual(self.targets_object.target_files[target2_filepath]['custom'], - custom_file_permissions2) - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.add_target, 3) - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.add_target, 3, custom_file_permissions) - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.add_target, target_filepath, 3) - - # A target path starting with a directory separator - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object.add_target, '/file1.txt') - - # A target path using a backward slash as a separator - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object.add_target, 'subdir\\file1.txt') - - # Should not access the file system to check for non-existent files - self.targets_object.add_target('non-existent') - - - - def test_add_targets(self): - # Test normal case. - # Verify the targets object initially contains zero target files. - self.assertEqual(self.targets_object.target_files, {}) - - target1_filepath = 'file1.txt' - target2_filepath = 'file2.txt' - target3_filepath = 'file3.txt' - - # Add a 'target1_filepath' duplicate for testing purposes - # ('target1_filepath' should not be added twice.) - target_files = \ - [target1_filepath, target2_filepath, 'file3.txt', target1_filepath] - self.targets_object.add_targets(target_files) - - self.assertEqual(len(self.targets_object.target_files), 3) - self.assertEqual(self.targets_object.target_files, - {target1_filepath: {}, target2_filepath: {}, target3_filepath: {}}) - - # Attempt to replace targets that has already been added. - self.targets_object.add_targets(target_files) - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.add_targets, 3) - - # A target path starting with a directory separator - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object.add_targets, ['/file1.txt']) - - # A target path using a backward slash as a separator - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object.add_targets, ['subdir\\file1.txt']) - - # Check if the addition of the whole list is rolled back in case of - # wrong target path - target_files = self.targets_object.target_files - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object.add_targets, ['file4.txt', '/file5.txt']) - self.assertEqual(self.targets_object.target_files, target_files) - - # Should not access the file system to check for non-existent files - self.targets_object.add_targets(['non-existent']) - - - def test_remove_target(self): - # Test normal case. - # Verify the targets object initially contains zero target files. - self.assertEqual(self.targets_object.target_files, {}) - - # Add a target so that remove_target() has something to remove. - target_filepath = 'file1.txt' - self.targets_object.add_target(target_filepath) - - # Test remove_target()'s behavior. - self.targets_object.remove_target(target_filepath) - self.assertEqual(self.targets_object.target_files, {}) - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.remove_target, 3) - - # Test for filepath that hasn't been added yet. - target5_filepath = 'file5.txt' - self.assertRaises(securesystemslib.exceptions.Error, - self.targets_object.remove_target, - target5_filepath) - - - - def test_clear_targets(self): - # Test normal case. - # Verify the targets object initially contains zero target files. - self.assertEqual(self.targets_object.target_files, {}) - - # Add targets, to be tested by clear_targets(). - target1_filepath = 'file1.txt' - target2_filepath = 'file2.txt' - self.targets_object.add_targets([target1_filepath, target2_filepath]) - - self.targets_object.clear_targets() - self.assertEqual(self.targets_object.target_files, {}) - - - - def test_delegate(self): - # Test normal case. - # Need at least one public key and valid target paths required by - # delegate(). - keystore_directory = os.path.join('repository_data', 'keystore') - public_keypath = os.path.join(keystore_directory, 'snapshot_key.pub') - public_key = repo_tool.import_ed25519_publickey_from_file(public_keypath) - - # Set needed arguments by delegate(). - public_keys = [public_key] - rolename = 'tuf' - list_of_targets = ['file1.txt', 'file2.txt'] - threshold = 1 - paths = ['*'] - path_hash_prefixes = ['e3a3', '8fae', 'd543'] - - self.targets_object.delegate(rolename, public_keys, paths, - threshold, terminating=False, list_of_targets=list_of_targets, - path_hash_prefixes=path_hash_prefixes) - - self.assertEqual(self.targets_object.get_delegated_rolenames(), - ['tuf']) - - # Test for delegated paths that do not exist. - # An exception should not be raised for non-existent delegated paths, since - # these paths may not necessarily exist when the delegation is done, - # and also because the delegated paths can be glob patterns. - self.targets_object.delegate(rolename, public_keys, ['non-existent'], - threshold, terminating=False, list_of_targets=list_of_targets, - path_hash_prefixes=path_hash_prefixes) - - # Test for delegated targets that do not exist. - # An exception should not be raised for non-existent delegated targets, - # since at this point the file system should not be accessed yet - self.targets_object.delegate(rolename, public_keys, [], threshold, - terminating=False, list_of_targets=['non-existent.txt'], - path_hash_prefixes=path_hash_prefixes) - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.delegate, 3, public_keys, paths, threshold, - list_of_targets, path_hash_prefixes) - - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.delegate, rolename, 3, paths, threshold, - list_of_targets, path_hash_prefixes) - - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.delegate, rolename, public_keys, 3, threshold, - list_of_targets, path_hash_prefixes) - - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.delegate, rolename, public_keys, paths, '3', - list_of_targets, path_hash_prefixes) - - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.delegate, rolename, public_keys, paths, threshold, - 3, path_hash_prefixes) - - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.delegate, rolename, public_keys, paths, threshold, - list_of_targets, 3) - - # Test invalid arguments (e.g., already delegated 'rolename', non-existent - # files, etc.). - # Test duplicate 'rolename' delegation, which should have been delegated - # in the normal case above. - self.assertRaises(securesystemslib.exceptions.Error, - self.targets_object.delegate, rolename, public_keys, paths, threshold, - list_of_targets, path_hash_prefixes) - - # A path or target starting with a directory separator - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object.delegate, rolename, public_keys, ['/*']) - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object.delegate, rolename, public_keys, [], - list_of_targets=['/file1.txt']) - - # A path or target using '\' as a directory separator - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object.delegate, rolename, public_keys, ['subpath\\*']) - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object.delegate, rolename, public_keys, [], - list_of_targets=['subpath\\file1.txt']) - - - - - def test_delegate_hashed_bins(self): - # Test normal case. - keystore_directory = os.path.join('repository_data', 'keystore') - public_keypath = os.path.join(keystore_directory, 'snapshot_key.pub') - public_key = repo_tool.import_ed25519_publickey_from_file(public_keypath) - - # Set needed arguments by delegate_hashed_bins(). - public_keys = [public_key] - list_of_targets = ['file1.txt'] - - - # A helper function to check that the range of prefixes the role is - # delegated for, specified in path_hash_prefixes, matches the range - # implied by the bin, or delegation role, name. - def check_prefixes_match_range(): - roleinfo = tuf.roledb.get_roleinfo(self.targets_object.rolename, - 'test_repository') - have_prefixes = False - - for delegated_role in roleinfo['delegations']['roles']: - if len(delegated_role['path_hash_prefixes']) > 0: - rolename = delegated_role['name'] - prefixes = delegated_role['path_hash_prefixes'] - have_prefixes = True - - if len(prefixes) > 1: - prefix_range = "{}-{}".format(prefixes[0], prefixes[-1]) - else: - prefix_range = prefixes[0] - - self.assertEqual(rolename, prefix_range) - - # We expect at least one delegation with some path_hash_prefixes - self.assertTrue(have_prefixes) - - - # Test delegate_hashed_bins() and verify that 16 hashed bins have - # been delegated in the parent's roleinfo. - self.targets_object.delegate_hashed_bins(list_of_targets, public_keys, - number_of_bins=16) - - # The expected child rolenames, since 'number_of_bins' = 16 - delegated_rolenames = ['0', '1', '2', '3', '4', '5', '6', '7', - '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'] - - self.assertEqual(sorted(self.targets_object.get_delegated_rolenames()), - sorted(delegated_rolenames)) - check_prefixes_match_range() - - # For testing / coverage purposes, try to create delegated bins that - # hold a range of hash prefixes (e.g., bin name: 000-003). - self.targets_object.delegate_hashed_bins(list_of_targets, public_keys, - number_of_bins=512) - check_prefixes_match_range() - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.delegate_hashed_bins, 3, public_keys, - number_of_bins=1) - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.delegate_hashed_bins, - list_of_targets, 3, number_of_bins=1) - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.delegate_hashed_bins, - list_of_targets, public_keys, '1') - - # Test invalid arguments. - # Invalid number of bins, which must be a power of 2. - self.assertRaises(securesystemslib.exceptions.Error, - self.targets_object.delegate_hashed_bins, - list_of_targets, public_keys, number_of_bins=3) - - # Invalid 'list_of_targets'. - # A path or target starting with a directory separator - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object.delegate_hashed_bins, - ['/file1.txt'], public_keys, - number_of_bins=2) - - # A path or target using '\' as a directory separator - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object.delegate_hashed_bins, - ['subpath\\file1.txt'], public_keys, - number_of_bins=2) - - - def test_add_target_to_bin(self): - # Test normal case. - # Delegate the hashed bins so that add_target_to_bin() can be tested. - repository_name = 'test_repository' - keystore_directory = os.path.join('repository_data', 'keystore') - public_keypath = os.path.join(keystore_directory, 'targets_key.pub') - public_key = repo_tool.import_ed25519_publickey_from_file(public_keypath) - target1_filepath = 'file1.txt' - - # Set needed arguments by delegate_hashed_bins(). - public_keys = [public_key] - - # Delegate to hashed bins. The target filepath to be tested is expected - # to contain a hash prefix of 'e', and should be available at: - # repository.targets('e'). - self.targets_object.delegate_hashed_bins([], public_keys, - number_of_bins=16) - - # Ensure each hashed bin initially contains zero targets. - for delegation in self.targets_object.delegations: - self.assertEqual(delegation.target_files, {}) - - # Add 'target1_filepath' and verify that the relative path of - # 'target1_filepath' is added to the correct bin. - rolename = self.targets_object.add_target_to_bin(target1_filepath, 16) - - for delegation in self.targets_object.delegations: - if delegation.rolename == rolename: - self.assertTrue('file1.txt' in delegation.target_files) - - else: - self.assertFalse('file1.txt' in delegation.target_files) - - # Test for non-existent delegations and hashed bins. - empty_targets_role = repo_tool.Targets(self.targets_directory, 'empty', - repository_name=repository_name) - - self.assertRaises(securesystemslib.exceptions.Error, - empty_targets_role.add_target_to_bin, - target1_filepath, 16) - - # Test for a required hashed bin that does not exist. - self.targets_object.revoke(rolename) - self.assertRaises(securesystemslib.exceptions.Error, - self.targets_object.add_target_to_bin, - target1_filepath, 16) - - # Test adding a target with fileinfo - target2_hashes = {'sha256': '517c0ce943e7274a2431fa5751e17cfd5225accd23e479bfaad13007751e87ef'} - target2_fileinfo = tuf.formats.make_targets_fileinfo(37, target2_hashes) - target2_filepath = 'file2.txt' - - rolename = self.targets_object.add_target_to_bin(target2_filepath, 16, - fileinfo=target2_fileinfo) - - for delegation in self.targets_object.delegations: - if delegation.rolename == rolename: - self.assertTrue(target2_filepath in delegation.target_files) - - else: - self.assertFalse(target2_filepath in delegation.target_files) - - # Test improperly formatted argument. - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.add_target_to_bin, 3, 'foo') - - - - def test_remove_target_from_bin(self): - # Test normal case. - # Delegate the hashed bins so that add_target_to_bin() can be tested. - keystore_directory = os.path.join('repository_data', 'keystore') - public_keypath = os.path.join(keystore_directory, 'targets_key.pub') - public_key = repo_tool.import_ed25519_publickey_from_file(public_keypath) - target1_filepath = 'file1.txt' - - # Set needed arguments by delegate_hashed_bins(). - public_keys = [public_key] - - # Delegate to hashed bins. The target filepath to be tested is expected - # to contain a hash prefix of 'e', and can be accessed as: - # repository.targets('e'). - self.targets_object.delegate_hashed_bins([], public_keys, - number_of_bins=16) - - # Ensure each hashed bin initially contains zero targets. - for delegation in self.targets_object.delegations: - self.assertEqual(delegation.target_files, {}) - - # Add 'target1_filepath' and verify that the relative path of - # 'target1_filepath' is added to the correct bin. - added_rolename = self.targets_object.add_target_to_bin(target1_filepath, 16) - - for delegation in self.targets_object.delegations: - if delegation.rolename == added_rolename: - self.assertTrue('file1.txt' in delegation.target_files) - self.assertTrue(len(delegation.target_files) == 1) - else: - self.assertTrue('file1.txt' not in delegation.target_files) - - # Test the remove_target_from_bin() method. Verify that 'target1_filepath' - # has been removed. - removed_rolename = self.targets_object.remove_target_from_bin(target1_filepath, 16) - self.assertEqual(added_rolename, removed_rolename) - - for delegation in self.targets_object.delegations: - self.assertTrue(target1_filepath not in delegation.target_files) - - - # Test improperly formatted argument. - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.remove_target_from_bin, 3, 'foo') - - # Invalid target file path argument. - self.assertRaises(securesystemslib.exceptions.Error, - self.targets_object.remove_target_from_bin, 'non-existent', 16) - - - - def test_default_bin_num(self): - # Test creating, adding to and removing from hashed bins with the default - # number of bins - keystore_directory = os.path.join('repository_data', 'keystore') - public_keypath = os.path.join(keystore_directory, 'snapshot_key.pub') - public_key = repo_tool.import_ed25519_publickey_from_file(public_keypath) - target1_filepath = os.path.join(self.targets_directory, 'file1.txt') - - # Set needed arguments by delegate_hashed_bins(). - public_keys = [public_key] - - # Test default parameters for number_of_bins - self.targets_object.delegate_hashed_bins([], public_keys) - - # Ensure each hashed bin initially contains zero targets. - for delegation in self.targets_object.delegations: - self.assertEqual(delegation.target_files, {}) - - # Add 'target1_filepath' and verify that the relative path of - # 'target1_filepath' is added to the correct bin. - added_rolename = self.targets_object.add_target_to_bin(os.path.basename(target1_filepath)) - - for delegation in self.targets_object.delegations: - if delegation.rolename == added_rolename: - self.assertTrue('file1.txt' in delegation.target_files) - - else: - self.assertFalse('file1.txt' in delegation.target_files) - - # Remove target1_filepath and verify that all bins are now empty - removed_rolename = self.targets_object.remove_target_from_bin( - os.path.basename(target1_filepath)) - self.assertEqual(added_rolename, removed_rolename) - - for delegation in self.targets_object.delegations: - self.assertEqual(delegation.target_files, {}) - - - def test_add_paths(self): - # Test normal case. - # Perform a delegation so that add_paths() has a child role to delegate a - # path to. - keystore_directory = os.path.join('repository_data', 'keystore') - public_keypath = os.path.join(keystore_directory, 'snapshot_key.pub') - public_key = repo_tool.import_ed25519_publickey_from_file(public_keypath) - - # Set needed arguments by delegate(). - public_keys = [public_key] - rolename = 'tuf' - threshold = 1 - - self.targets_object.delegate(rolename, public_keys, [], threshold, - list_of_targets=None, path_hash_prefixes=None) - - # Delegate an extra role for test coverage (i.e., to later verify that - # delegated paths are not added to a child role that was not requested). - self.targets_object.delegate('junk_role', public_keys, []) - - paths = ['tuf_files/*'] - self.targets_object.add_paths(paths, 'tuf') - - # Retrieve 'targets_object' roleinfo, and verify the roleinfo contains the - # expected delegated paths of the delegated role. - targets_object_roleinfo = tuf.roledb.get_roleinfo(self.targets_object.rolename, - 'test_repository') - - delegated_role = targets_object_roleinfo['delegations']['roles'][0] - self.assertEqual(['tuf_files/*'], delegated_role['paths']) - - # Try to add a delegated path that has already been set. - # add_paths() should simply log a message in this case. - self.targets_object.add_paths(paths, 'tuf') - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.add_paths, 3, 'tuf') - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object.add_paths, paths, 3) - - - # Test invalid arguments. - # A non-delegated child role. - self.assertRaises(securesystemslib.exceptions.Error, - self.targets_object.add_paths, paths, 'non_delegated_rolename') - - # A path starting with a directory separator - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object.add_paths, ['/tuf_files/*'], 'tuf') - - # A path using a backward slash as a separator - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object.add_paths, ['tuf_files\\*'], 'tuf') - - # add_paths() should not raise an exception for non-existent - # paths, which it previously did. - self.targets_object.add_paths(['non-existent'], 'tuf') - - - - - def test_revoke(self): - # Test normal case. - # Perform a delegation so that revoke() has a delegation to revoke. - keystore_directory = os.path.join('repository_data', 'keystore') - public_keypath = os.path.join(keystore_directory, 'snapshot_key.pub') - public_key = repo_tool.import_ed25519_publickey_from_file(public_keypath) - - # Set needed arguments by delegate(). - public_keys = [public_key] - rolename = 'tuf' - paths = ['file1.txt'] - threshold = 1 - - self.targets_object.delegate(rolename, public_keys, [], threshold, False, - paths, path_hash_prefixes=None) - - # Test revoke() - self.targets_object.revoke('tuf') - self.assertEqual(self.targets_object.get_delegated_rolenames(), []) - - - # Test improperly formatted rolename argument. - self.assertRaises(securesystemslib.exceptions.FormatError, self.targets_object.revoke, 3) - - - - def test_check_path(self): - # Test that correct path does not raise exception: using '/' as a separator - # and does not start with a directory separator - self.targets_object._check_path('file1.txt') - - # Test that non-existent path does not raise exception (_check_path - # checks only the path string for compliance) - self.targets_object._check_path('non-existent.txt') - self.targets_object._check_path('subdir/non-existent') - - # Test improperly formatted pathname argument. - self.assertRaises(securesystemslib.exceptions.FormatError, - self.targets_object._check_path, 3) - - # Test invalid pathname - # Starting with os separator - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object._check_path, '/file1.txt') - - # Starting with Windows-style separator - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object._check_path, '\\file1.txt') - - # Using Windows-style separator ('\') - self.assertRaises(tuf.exceptions.InvalidNameError, - self.targets_object._check_path, 'subdir\\non-existent') - - - -class TestRepositoryToolFunctions(unittest.TestCase): - @classmethod - def setUpClass(cls): - # Create a temporary directory to store the repository, metadata, and target - # files. 'temporary_directory' must be deleted in TearDownClass() so that - # temporary files are always removed, even when exceptions occur. - cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) - - - - @classmethod - def tearDownClass(cls): - # Remove the temporary repository directory, which should contain all the - # metadata, targets, and key files generated for the test cases. - shutil.rmtree(cls.temporary_directory) - - - - def setUp(self): - tuf.roledb.create_roledb('test_repository') - tuf.keydb.create_keydb('test_repository') - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - - - def test_create_new_repository(self): - # Test normal case. - # Setup the temporary repository directories needed by - # create_new_repository(). - repository_name = 'test_repository' - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - repository_directory = os.path.join(temporary_directory, 'repository') - metadata_directory = os.path.join(repository_directory, - repo_tool.METADATA_STAGED_DIRECTORY_NAME) - targets_directory = os.path.join(repository_directory, - repo_tool.TARGETS_DIRECTORY_NAME) - - repository = repo_tool.create_new_repository(repository_directory, - repository_name) - self.assertTrue(isinstance(repository, repo_tool.Repository)) - - # Verify that the 'repository/', 'repository/metadata', and - # 'repository/targets' directories were created. - self.assertTrue(os.path.exists(repository_directory)) - self.assertTrue(os.path.exists(metadata_directory)) - self.assertTrue(os.path.exists(targets_directory)) - - # Test that the 'repository' directory is created (along with the other - # sub-directories) when it does not exist yet. The repository tool creates - # the non-existent directory. - shutil.rmtree(repository_directory) - - repository = repo_tool.create_new_repository(repository_directory, - repository_name) - self.assertTrue(isinstance(repository, repo_tool.Repository)) - - # Verify that the 'repository/', 'repository/metadata', and - # 'repository/targets' directories were created. - self.assertTrue(os.path.exists(repository_directory)) - self.assertTrue(os.path.exists(metadata_directory)) - self.assertTrue(os.path.exists(targets_directory)) - - # Test passing custom arguments to control the computation - # of length and hashes for timestamp and snapshot roles. - repository = repo_tool.create_new_repository(repository_directory, - repository_name, use_timestamp_length=True, use_timestamp_hashes=True, - use_snapshot_length=True, use_snapshot_hashes=True) - - # Verify that the argument for optional hashes and length for - # snapshot and timestamp are properly set. - self.assertTrue(repository._use_timestamp_length) - self.assertTrue(repository._use_timestamp_hashes) - self.assertTrue(repository._use_snapshot_length) - self.assertTrue(repository._use_snapshot_hashes) - - # Test for a repository name that doesn't exist yet. Note: - # The 'test_repository' repository name is created in setup() before this - # test case is run. - repository = repo_tool.create_new_repository(repository_directory, 'my-repo') - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_tool.create_new_repository, 3, repository_name) - - # For testing purposes, try to create a repository directory that - # fails due to a non-errno.EEXIST exception raised. - self.assertRaises(securesystemslib.exceptions.StorageError, - repo_tool.create_new_repository, 'bad' * 2000, repository_name) - - # Reset the 'repository_directory' so that the metadata and targets - # directories can be tested likewise. - repository_directory = os.path.join(temporary_directory, 'repository') - - # The same test as before, but for the metadata and targets directories. - original_metadata_staged_directory = \ - tuf.repository_tool.METADATA_STAGED_DIRECTORY_NAME - tuf.repository_tool.METADATA_STAGED_DIRECTORY_NAME = 'bad' * 2000 - - self.assertRaises(securesystemslib.exceptions.StorageError, - repo_tool.create_new_repository, repository_directory, repository_name) - - # Reset metadata staged directory so that the targets directory can be - # tested... - tuf.repository_tool.METADATA_STAGED_DIRECTORY_NAME = \ - original_metadata_staged_directory - - original_targets_directory = tuf.repository_tool.TARGETS_DIRECTORY_NAME - tuf.repository_tool.TARGETS_DIRECTORY_NAME = 'bad' * 2000 - - self.assertRaises(securesystemslib.exceptions.StorageError, - repo_tool.create_new_repository, repository_directory, repository_name) - - tuf.repository_tool.TARGETS_DIRECTORY_NAME = \ - original_targets_directory - - - - def test_load_repository(self): - # Test normal case. - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - original_repository_directory = os.path.join('repository_data', - 'repository') - - repository_directory = os.path.join(temporary_directory, 'repository') - metadata_directory = os.path.join(repository_directory, 'metadata.staged') - shutil.copytree(original_repository_directory, repository_directory) - - # For testing purposes, add a metadata file with an extension that is - # not supported, and another with invalid JSON content. - invalid_metadata_file = os.path.join(metadata_directory, 'root.xml') - root_file = os.path.join(metadata_directory, 'root.json') - shutil.copyfile(root_file, invalid_metadata_file) - bad_root_content = os.path.join(metadata_directory, 'root_bad.json') - - with open(bad_root_content, 'wb') as file_object: - file_object.write(b'bad') - - repository = repo_tool.load_repository(repository_directory) - self.assertTrue(isinstance(repository, repo_tool.Repository)) - self.assertTrue(isinstance(repository.targets('role1'), - repo_tool.Targets)) - self.assertTrue(isinstance(repository.targets('role1')('role2'), - repo_tool.Targets)) - - # Verify the expected roles have been loaded. See - # 'tuf/tests/repository_data/repository/'. - expected_roles = \ - ['root', 'targets', 'snapshot', 'timestamp', 'role1', 'role2'] - for role in tuf.roledb.get_rolenames(): - self.assertTrue(role in expected_roles) - - self.assertTrue(len(repository.root.keys)) - self.assertTrue(len(repository.targets.keys)) - self.assertTrue(len(repository.snapshot.keys)) - self.assertTrue(len(repository.timestamp.keys)) - self.assertEqual(1, repository.targets('role1').version) - - # It is assumed that the targets (tuf/tests/repository_data/) role contains - # 'file1.txt' and 'file2.txt'. - self.assertTrue('file1.txt' in repository.targets.target_files) - self.assertTrue('file2.txt' in repository.targets.target_files) - self.assertTrue('file3.txt' in repository.targets('role1').target_files) - - # Test if targets file info is loaded correctly: read the JSON metadata - # files separately and then compare with the loaded repository data. - targets_path = os.path.join(metadata_directory, 'targets.json') - role1_path = os.path.join(metadata_directory, 'role1.json') - - targets_object = securesystemslib.util.load_json_file(targets_path) - role1_object = securesystemslib.util.load_json_file(role1_path) - - targets_fileinfo = targets_object['signed']['targets'] - role1_fileinfo = role1_object['signed']['targets'] - - repository = repo_tool.load_repository(repository_directory) - - self.assertEqual(targets_fileinfo, repository.targets.target_files) - self.assertEqual(role1_fileinfo, repository.targets('role1').target_files) - - # Test for a non-default repository name. - repository = repo_tool.load_repository(repository_directory, 'my-repo') - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_tool.load_repository, 3) - - - # Test passing custom arguments to control the computation - # of length and hashes for timestamp and snapshot roles. - repository = repo_tool.load_repository(repository_directory, - 'my-repo', use_timestamp_length=True, use_timestamp_hashes=True, - use_snapshot_length=True, use_snapshot_hashes=True) - - # Verify that the argument for optional hashes and length for - # snapshot and timestamp are properly set. - self.assertTrue(repository._use_timestamp_length) - self.assertTrue(repository._use_timestamp_hashes) - self.assertTrue(repository._use_snapshot_length) - self.assertTrue(repository._use_snapshot_hashes) - - # Test for invalid 'repository_directory' (i.e., does not contain the - # minimum required metadata. - root_filepath = os.path.join(repository_directory, - repo_tool.METADATA_STAGED_DIRECTORY_NAME, 'root.json') - os.remove(root_filepath) - self.assertRaises(tuf.exceptions.RepositoryError, - repo_tool.load_repository, repository_directory) - - - - def test_dirty_roles(self): - repository_name = 'test_repository' - original_repository_directory = os.path.join('repository_data', - 'repository') - repository = repo_tool.load_repository(original_repository_directory, - repository_name) - - # dirty_roles() only logs the list of dirty roles. - repository.dirty_roles() - - - - def test_dump_signable_metadata(self): - metadata_directory = os.path.join('repository_data', - 'repository', 'metadata') - targets_metadata_file = os.path.join(metadata_directory, 'targets.json') - - metadata_content = repo_tool.dump_signable_metadata(targets_metadata_file) - - # Test for an invalid targets metadata file.. - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_tool.dump_signable_metadata, 1) - self.assertRaises(securesystemslib.exceptions.StorageError, - repo_tool.dump_signable_metadata, 'bad file path') - - - - def test_append_signature(self): - metadata_directory = os.path.join('repository_data', - 'repository', 'metadata') - targets_metadata_path = os.path.join(metadata_directory, 'targets.json') - - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - tmp_targets_metadata_path = os.path.join(temporary_directory, 'targets.json') - shutil.copyfile(targets_metadata_path, tmp_targets_metadata_path) - - # Test for normal case. - targets_metadata = securesystemslib.util.load_json_file(tmp_targets_metadata_path) - num_signatures = len(targets_metadata['signatures']) - signature = targets_metadata['signatures'][0] - - repo_tool.append_signature(signature, tmp_targets_metadata_path) - - targets_metadata = securesystemslib.util.load_json_file(tmp_targets_metadata_path) - self.assertTrue(num_signatures, len(targets_metadata['signatures'])) - - # Test for invalid arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_tool.append_signature, 1, tmp_targets_metadata_path) - - self.assertRaises(securesystemslib.exceptions.FormatError, - repo_tool.append_signature, signature, 1) - - -# Run the test cases. -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_roledb_old.py b/tests/test_roledb_old.py deleted file mode 100755 index 04b76e9545..0000000000 --- a/tests/test_roledb_old.py +++ /dev/null @@ -1,787 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_roledb_old.py - - - Vladimir Diaz - - - October 2012. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Unit test for 'roledb.py'. -""" - -import unittest -import logging -import sys - -import tuf -import tuf.formats -import tuf.roledb -import tuf.exceptions -import tuf.log - -from tests import utils - -import securesystemslib -import securesystemslib.keys - -logger = logging.getLogger(__name__) - - -# Generate the three keys to use in our test cases. -KEYS = [] -for junk in range(3): - KEYS.append(securesystemslib.keys.generate_rsa_key(2048)) - - - -class TestRoledb(unittest.TestCase): - def setUp(self): - tuf.roledb.clear_roledb(clear_all=True) - - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - - - - def test_create_roledb(self): - # Verify that a roledb is created for a named repository. - self.assertTrue('default' in tuf.roledb._roledb_dict) - self.assertEqual(1, len(tuf.roledb._roledb_dict)) - - repository_name = 'example_repository' - tuf.roledb.create_roledb(repository_name) - self.assertEqual(2, len(tuf.roledb._roledb_dict)) - self.assertTrue(repository_name in tuf.roledb._roledb_dict) - - # Test for invalid and improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.create_roledb, 123) - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.create_roledb, 'default') - - # Reset the roledb so that subsequent test functions have access to the - # original, default roledb. - tuf.roledb.remove_roledb(repository_name) - - - - def test_remove_roledb(self): - # Verify that the named repository is removed from the roledb. - repository_name = 'example_repository' - - rolename = 'targets' - roleinfo = {'keyids': ['123'], 'threshold': 1} - - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.remove_roledb, 'default') - tuf.roledb.create_roledb(repository_name) - - tuf.roledb.remove_roledb(repository_name) - - # remove_roledb() should not raise an exception if a non-existent - # 'repository_name' is specified. - tuf.roledb.remove_roledb(repository_name) - - # Ensure the roledb is reset to its original, default state. Subsequent - # test functions expect only the 'default' repository to exist in the roledb. - tuf.roledb.remove_roledb(repository_name) - - - - def test_clear_roledb(self): - # Test for an empty roledb, a length of 1 after adding a key, and finally - # an empty roledb after calling 'clear_roledb()'. - self.assertEqual(0, len(tuf.roledb._roledb_dict['default'])) - tuf.roledb._roledb_dict['default']['Root'] = {'keyids': ['123'], 'threshold': 1} - self.assertEqual(1, len(tuf.roledb._roledb_dict['default'])) - tuf.roledb.clear_roledb() - self.assertEqual(0, len(tuf.roledb._roledb_dict['default'])) - - # Verify that the roledb can be cleared for a non-default repository. - rolename = 'targets' - roleinfo = {'keyids': ['123'], 'threshold': 1} - - repository_name = 'example_repository' - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.clear_roledb, repository_name) - tuf.roledb.create_roledb(repository_name) - tuf.roledb.add_role(rolename, roleinfo, repository_name) - self.assertEqual(roleinfo['keyids'], tuf.roledb.get_role_keyids(rolename, repository_name)) - tuf.roledb.clear_roledb(repository_name) - self.assertFalse(tuf.roledb.role_exists(rolename, repository_name)) - - # Reset the roledb so that subsequent tests have access to the original, - # default roledb. - tuf.roledb.remove_roledb(repository_name) - - # Test condition for invalid and unexpected arguments. - self.assertRaises(TypeError, tuf.roledb.clear_roledb, 'default', False, 'unexpected_argument') - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.clear_roledb, 123) - - - - def test_add_role(self): - # Test conditions where the arguments are valid. - self.assertEqual(0, len(tuf.roledb._roledb_dict['default'])) - rolename = 'targets' - roleinfo = {'keyids': ['123'], 'threshold': 1} - rolename2 = 'role1' - self.assertEqual(None, tuf.roledb.add_role(rolename, roleinfo)) - self.assertEqual(1, len(tuf.roledb._roledb_dict['default'])) - tuf.roledb.clear_roledb() - self.assertEqual(None, tuf.roledb.add_role(rolename, roleinfo)) - self.assertEqual(1, len(tuf.roledb._roledb_dict['default'])) - - # Verify that a role can be added to a non-default repository. - repository_name = 'example_repository' - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.clear_roledb, - repository_name) - tuf.roledb.create_roledb(repository_name) - tuf.roledb.add_role(rolename, roleinfo, repository_name) - self.assertEqual(roleinfo['keyids'], tuf.roledb.get_role_keyids(rolename, - repository_name)) - - # Reset the roledb so that subsequent tests have access to a default - # roledb. - tuf.roledb.remove_roledb(repository_name) - - # Test conditions where the arguments are improperly formatted. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.add_role, None, roleinfo) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.add_role, 123, roleinfo) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.add_role, [''], roleinfo) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.add_role, rolename, None) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.add_role, rolename, 123) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.add_role, rolename, ['']) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.add_role, rolename, roleinfo, 123) - - - # Test condition where the rolename already exists in the role database. - self.assertRaises(tuf.exceptions.RoleAlreadyExistsError, tuf.roledb.add_role, - rolename, roleinfo) - - # Test where the repository name does not exist in the role database. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.add_role, - 'new_role', roleinfo, 'non-existent') - - # Test conditions for invalid rolenames. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.add_role, ' badrole ', - roleinfo) - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.add_role, '/badrole/', - roleinfo) - - - - - - def test_role_exists(self): - # Test conditions where the arguments are valid. - rolename = 'targets' - roleinfo = {'keyids': ['123'], 'threshold': 1} - rolename2 = 'role1' - - self.assertEqual(False, tuf.roledb.role_exists(rolename)) - tuf.roledb.add_role(rolename, roleinfo) - tuf.roledb.add_role(rolename2, roleinfo) - self.assertEqual(True, tuf.roledb.role_exists(rolename)) - self.assertEqual(True, tuf.roledb.role_exists(rolename2)) - - # Verify that a role can be queried for a non-default repository. - repository_name = 'example_repository' - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.clear_roledb, repository_name) - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.role_exists, rolename, repository_name) - - tuf.roledb.create_roledb(repository_name) - self.assertEqual(False, tuf.roledb.role_exists(rolename, repository_name)) - tuf.roledb.add_role(rolename, roleinfo, repository_name) - self.assertTrue(tuf.roledb.role_exists(rolename, repository_name)) - - # Reset the roledb so that subsequent tests have access to the original, - # default roledb. - tuf.roledb.remove_roledb(repository_name) - - # Test conditions where the arguments are improperly formatted. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.role_exists, None) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.role_exists, 123) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.role_exists, ['rolename']) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.role_exists, rolename, 123) - - # Test conditions for invalid rolenames. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.role_exists, '') - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.role_exists, ' badrole ') - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.role_exists, '/badrole/') - - - - - - def test_remove_role(self): - # Test conditions where the arguments are valid. - rolename = 'targets' - rolename2 = 'release' - rolename3 = 'django' - roleinfo = {'keyids': ['123'], 'threshold': 1} - roleinfo2 = {'keyids': ['123'], 'threshold': 1, 'delegations': - {'roles': [{'name': 'django', 'keyids': ['456'], 'threshold': 1}], - 'keys': {'456': {'keytype': 'rsa', 'keyval': {'public': '456'}}, - }}} - - tuf.roledb.add_role(rolename, roleinfo) - tuf.roledb.add_role(rolename2, roleinfo2) - tuf.roledb.add_role(rolename3, roleinfo) - - self.assertEqual(None, tuf.roledb.remove_role(rolename)) - self.assertEqual(True, rolename not in tuf.roledb._roledb_dict) - - # Verify that a role can be removed from a non-default repository. - repository_name = 'example_repository' - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.remove_role, rolename, repository_name) - tuf.roledb.create_roledb(repository_name) - - tuf.roledb.add_role(rolename, roleinfo, repository_name) - self.assertEqual(roleinfo['keyids'], tuf.roledb.get_role_keyids(rolename, repository_name)) - self.assertEqual(None, tuf.roledb.remove_role(rolename, repository_name)) - - # Verify that a role cannot be removed from a non-existent repository name. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.remove_role, rolename, 'non-existent') - - # Reset the roledb so that subsequent test have access to the original, - # default roledb. - tuf.roledb.remove_roledb(repository_name) - - # Test conditions where removing a role does not cause the removal of its - # delegated roles. The 'django' role should now only exist (after the - # removal of 'targets' in the previous test condition, and the removal - # of 'release' in the remove_role() call next. - self.assertEqual(None, tuf.roledb.remove_role(rolename2)) - self.assertEqual(1, len(tuf.roledb._roledb_dict['default'])) - - # Test conditions where the arguments are improperly formatted, - # contain invalid names, or haven't been added to the role database. - self._test_rolename(tuf.roledb.remove_role) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.remove_role, rolename, 123) - - - - - def test_get_rolenames(self): - # Test conditions where the arguments are valid. - rolename = 'targets' - rolename2 = 'role1' - roleinfo = {'keyids': ['123'], 'threshold': 1} - self.assertEqual([], tuf.roledb.get_rolenames()) - tuf.roledb.add_role(rolename, roleinfo) - tuf.roledb.add_role(rolename2, roleinfo) - self.assertEqual(set(['targets', 'role1']), - set(tuf.roledb.get_rolenames())) - - # Verify that rolenames can be retrieved for a role in a non-default - # repository. - repository_name = 'example_repository' - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.get_rolenames, repository_name) - tuf.roledb.create_roledb(repository_name) - tuf.roledb.add_role(rolename, roleinfo, repository_name) - tuf.roledb.add_role(rolename2, roleinfo, repository_name) - - self.assertEqual(set(['targets', 'role1']), - set(tuf.roledb.get_rolenames())) - - # Reset the roledb so that subsequent tests have access to the original, - # default repository. - tuf.roledb.remove_roledb(repository_name) - - # Test for invalid or improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.get_rolenames, 123) - - - - def test_get_role_info(self): - # Test conditions where the arguments are valid. - rolename = 'targets' - rolename2 = 'role1' - roleinfo = {'keyids': ['123'], 'threshold': 1} - roleinfo2 = {'keyids': ['456', '789'], 'threshold': 2} - self.assertRaises(tuf.exceptions.UnknownRoleError, tuf.roledb.get_roleinfo, rolename) - tuf.roledb.add_role(rolename, roleinfo) - tuf.roledb.add_role(rolename2, roleinfo2) - - self.assertEqual(roleinfo, tuf.roledb.get_roleinfo(rolename)) - self.assertEqual(roleinfo2, tuf.roledb.get_roleinfo(rolename2)) - - # Verify that a roleinfo can be retrieved for a role in a non-default - # repository. - repository_name = 'example_repository' - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.get_roleinfo, - rolename, repository_name) - - tuf.roledb.create_roledb(repository_name) - tuf.roledb.add_role(rolename, roleinfo, repository_name) - self.assertEqual(roleinfo, tuf.roledb.get_roleinfo(rolename, repository_name)) - - # Verify that a roleinfo cannot be retrieved for a non-existent repository - # name. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.get_roleinfo, rolename, - 'non-existent') - - # Reset the roledb so that subsequent tests have access to the original, - # default roledb - tuf.roledb.remove_roledb(repository_name) - - # Test conditions where the arguments are improperly formatted, contain - # invalid names, or haven't been added to the role database. - self._test_rolename(tuf.roledb.get_roleinfo) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.get_roleinfo, rolename, 123) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.get_roleinfo, 123) - - - - def test_get_role_keyids(self): - # Test conditions where the arguments are valid. - rolename = 'targets' - rolename2 = 'role1' - roleinfo = {'keyids': ['123'], 'threshold': 1} - roleinfo2 = {'keyids': ['456', '789'], 'threshold': 2} - self.assertRaises(tuf.exceptions.UnknownRoleError, tuf.roledb.get_role_keyids, rolename) - tuf.roledb.add_role(rolename, roleinfo) - tuf.roledb.add_role(rolename2, roleinfo2) - - self.assertEqual(['123'], tuf.roledb.get_role_keyids(rolename)) - self.assertEqual(set(['456', '789']), - set(tuf.roledb.get_role_keyids(rolename2))) - - # Verify that the role keyids can be retrieved for a role in a non-default - # repository. - repository_name = 'example_repository' - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.get_role_keyids, - rolename, repository_name) - tuf.roledb.create_roledb(repository_name) - tuf.roledb.add_role(rolename, roleinfo, repository_name) - self.assertEqual(['123'], tuf.roledb.get_role_keyids(rolename, repository_name)) - - # Verify that rolekeyids cannot be retrieved from a non-existent repository - # name. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.get_role_keyids, rolename, - 'non-existent') - - # Reset the roledb so that subsequent tests have access to the original, - # default roledb - tuf.roledb.remove_roledb(repository_name) - - # Test conditions where the arguments are improperly formatted, contain - # invalid names, or haven't been added to the role database. - self._test_rolename(tuf.roledb.get_role_keyids) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.get_role_keyids, rolename, 123) - - - - def test_get_role_threshold(self): - # Test conditions where the arguments are valid. - rolename = 'targets' - rolename2 = 'role1' - roleinfo = {'keyids': ['123'], 'threshold': 1} - roleinfo2 = {'keyids': ['456', '789'], 'threshold': 2} - self.assertRaises(tuf.exceptions.UnknownRoleError, tuf.roledb.get_role_threshold, rolename) - tuf.roledb.add_role(rolename, roleinfo) - tuf.roledb.add_role(rolename2, roleinfo2) - - self.assertEqual(1, tuf.roledb.get_role_threshold(rolename)) - self.assertEqual(2, tuf.roledb.get_role_threshold(rolename2)) - - # Verify that the threshold can be retrieved for a role in a non-default - # repository. - repository_name = 'example_repository' - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.get_role_threshold, - rolename, repository_name) - tuf.roledb.create_roledb(repository_name) - tuf.roledb.add_role(rolename, roleinfo, repository_name) - self.assertEqual(roleinfo['threshold'], tuf.roledb.get_role_threshold(rolename, repository_name)) - - # Verify that a role's threshold cannot be retrieved from a non-existent - # repository name. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.get_role_threshold, - rolename, 'non-existent') - - # Reset the roledb so that subsequent tests have access to the original, - # default roledb. - tuf.roledb.remove_roledb(repository_name) - - # Test conditions where the arguments are improperly formatted, - # contain invalid names, or haven't been added to the role database. - self._test_rolename(tuf.roledb.get_role_threshold) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.get_role_threshold, rolename, 123) - - - def test_get_role_paths(self): - # Test conditions where the arguments are valid. - rolename = 'targets' - rolename2 = 'role1' - roleinfo = {'keyids': ['123'], 'threshold': 1} - paths = ['a/b', 'c/d'] - roleinfo2 = {'keyids': ['456', '789'], 'threshold': 2, 'paths': paths} - self.assertRaises(tuf.exceptions.UnknownRoleError, tuf.roledb.get_role_paths, rolename) - tuf.roledb.add_role(rolename, roleinfo) - tuf.roledb.add_role(rolename2, roleinfo2) - - self.assertEqual({}, tuf.roledb.get_role_paths(rolename)) - self.assertEqual(paths, tuf.roledb.get_role_paths(rolename2)) - - # Verify that role paths can be queried for roles in non-default - # repositories. - repository_name = 'example_repository' - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.get_role_paths, - rolename, repository_name) - - tuf.roledb.create_roledb(repository_name) - tuf.roledb.add_role(rolename2, roleinfo2, repository_name) - self.assertEqual(roleinfo2['paths'], tuf.roledb.get_role_paths(rolename2, - repository_name)) - - # Reset the roledb so that subsequent roles have access to the original, - # default roledb. - tuf.roledb.remove_roledb(repository_name) - - # Test conditions where the arguments are improperly formatted, - # contain invalid names, or haven't been added to the role database. - self._test_rolename(tuf.roledb.get_role_paths) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.get_role_paths, rolename, 123) - - - - def test_get_delegated_rolenames(self): - # Test conditions where the arguments are valid. - rolename = 'unclaimed' - rolename2 = 'django' - rolename3 = 'release' - rolename4 = 'tuf' - - # unclaimed's roleinfo. - roleinfo = {'keyids': ['123'], 'threshold': 1, 'delegations': - {'roles': [{'name': 'django', 'keyids': ['456'], 'threshold': 1}, - {'name': 'tuf', 'keyids': ['888'], 'threshold': 1}], - 'keys': {'456': {'keytype': 'rsa', 'keyval': {'public': '456'}}, - }}} - - # django's roleinfo. - roleinfo2 = {'keyids': ['456'], 'threshold': 1, 'delegations': - {'roles': [{'name': 'release', 'keyids': ['789'], 'threshold': 1}], - 'keys': {'789': {'keytype': 'rsa', 'keyval': {'public': '789'}}, - }}} - - # release's roleinfo. - roleinfo3 = {'keyids': ['789'], 'threshold': 1, 'delegations': - {'roles': [], - 'keys': {}}} - - # tuf's roleinfo. - roleinfo4 = {'keyids': ['888'], 'threshold': 1, 'delegations': - {'roles': [], - 'keys': {}}} - - self.assertRaises(tuf.exceptions.UnknownRoleError, tuf.roledb.get_delegated_rolenames, - rolename) - - tuf.roledb.add_role(rolename, roleinfo) - tuf.roledb.add_role(rolename2, roleinfo2) - tuf.roledb.add_role(rolename3, roleinfo3) - tuf.roledb.add_role(rolename4, roleinfo4) - - self.assertEqual(set(['django', 'tuf']), - set(tuf.roledb.get_delegated_rolenames(rolename))) - - self.assertEqual(set(['release']), - set(tuf.roledb.get_delegated_rolenames(rolename2))) - - self.assertEqual(set([]), - set(tuf.roledb.get_delegated_rolenames(rolename3))) - - self.assertEqual(set([]), - set(tuf.roledb.get_delegated_rolenames(rolename4))) - - # Verify that the delegated rolenames of a role in a non-default - # repository can be accessed. - repository_name = 'example_repository' - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.get_delegated_rolenames, - rolename, repository_name) - tuf.roledb.create_roledb(repository_name) - tuf.roledb.add_role(rolename, roleinfo, repository_name) - self.assertEqual(set(['django', 'tuf']), - set(tuf.roledb.get_delegated_rolenames(rolename, repository_name))) - - # Reset the roledb so that subsequent tests have access to the original, - # default roledb. - tuf.roledb.remove_roledb(repository_name) - - # Test conditions where the arguments are improperly formatted, - # contain invalid names, or haven't been added to the role database. - self._test_rolename(tuf.roledb.get_delegated_rolenames) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.get_delegated_rolenames, rolename, 123) - - - - def test_create_roledb_from_root_metadata(self): - # Test condition using a valid 'root_metadata' argument. - rsakey = KEYS[0] - keyid = KEYS[0]['keyid'] - rsakey2 = KEYS[1] - keyid2 = KEYS[1]['keyid'] - rsakey3 = KEYS[2] - keyid3 = KEYS[2]['keyid'] - keydict = {keyid: rsakey, keyid2: rsakey2} - roledict = {'root': {'keyids': [keyid], 'threshold': 1}, - 'targets': {'keyids': [keyid2], 'threshold': 1}} - version = 8 - consistent_snapshot = False - expires = '1985-10-21T01:21:00Z' - - root_metadata = tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROOT_SCHEMA, - _type='root', - spec_version='1.0.0', - version=version, - expires=expires, - keys=keydict, - roles=roledict, - consistent_snapshot=consistent_snapshot) - - self.assertEqual(None, - tuf.roledb.create_roledb_from_root_metadata(root_metadata)) - - # Ensure 'Root' and 'Targets' were added to the role database. - self.assertEqual([keyid], tuf.roledb.get_role_keyids('root')) - self.assertEqual([keyid2], tuf.roledb.get_role_keyids('targets')) - - # Test that a roledb is created for a non-default repository. - repository_name = 'example_repository' - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.clear_roledb, - repository_name) - tuf.roledb.create_roledb_from_root_metadata(root_metadata, repository_name) - self.assertEqual([keyid], tuf.roledb.get_role_keyids('root', repository_name)) - self.assertEqual([keyid2], tuf.roledb.get_role_keyids('targets', repository_name)) - - # Remove the example repository added to the roledb so that subsequent - # tests have access to an original, default roledb. - tuf.roledb.remove_roledb(repository_name) - - # Test conditions for arguments with invalid formats. - self.assertRaises(securesystemslib.exceptions.FormatError, - tuf.roledb.create_roledb_from_root_metadata, None) - self.assertRaises(securesystemslib.exceptions.FormatError, - tuf.roledb.create_roledb_from_root_metadata, '') - self.assertRaises(securesystemslib.exceptions.FormatError, - tuf.roledb.create_roledb_from_root_metadata, 123) - self.assertRaises(securesystemslib.exceptions.FormatError, - tuf.roledb.create_roledb_from_root_metadata, ['123']) - self.assertRaises(securesystemslib.exceptions.FormatError, - tuf.roledb.create_roledb_from_root_metadata, {'bad': '123'}) - self.assertRaises(securesystemslib.exceptions.FormatError, - tuf.roledb.create_roledb_from_root_metadata, root_metadata, 123) - - # Verify that the expected roles of a Root file are properly loaded. - tuf.roledb.clear_roledb() - roledict = {'root': {'keyids': [keyid], 'threshold': 1}, - 'release': {'keyids': [keyid3], 'threshold': 1}} - version = 8 - - # Add a third key for 'release'. - keydict[keyid3] = rsakey3 - - # Generate 'root_metadata' to verify that 'release' and 'root' are added - # to the role database. - - root_metadata = tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROOT_SCHEMA, - _type='root', - spec_version='1.0.0', - version=version, - expires=expires, - keys=keydict, - roles=roledict, - consistent_snapshot=consistent_snapshot) - - self.assertEqual(None, - tuf.roledb.create_roledb_from_root_metadata(root_metadata)) - - # Ensure only 'root' and 'release' were added to the role database. - self.assertEqual(2, len(tuf.roledb._roledb_dict['default'])) - self.assertEqual(True, tuf.roledb.role_exists('root')) - self.assertEqual(True, tuf.roledb.role_exists('release')) - - - - def test_update_roleinfo(self): - rolename = 'targets' - roleinfo = {'keyids': ['123'], 'threshold': 1} - tuf.roledb.add_role(rolename, roleinfo) - - # Test normal case. - tuf.roledb.update_roleinfo(rolename, roleinfo) - - # Verify that a roleinfo can be updated for a role in a non-default - # repository. - repository_name = 'example_repository' - mark_role_as_dirty = True - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.clear_roledb, repository_name) - tuf.roledb.create_roledb(repository_name) - tuf.roledb.add_role(rolename, roleinfo, repository_name) - tuf.roledb.update_roleinfo(rolename, roleinfo, mark_role_as_dirty, repository_name) - self.assertEqual(roleinfo['keyids'], tuf.roledb.get_role_keyids(rolename, repository_name)) - - # Reset the roledb so that subsequent tests can access the default roledb. - tuf.roledb.remove_roledb(repository_name) - - # Test for an unknown role. - self.assertRaises(tuf.exceptions.UnknownRoleError, tuf.roledb.update_roleinfo, - 'unknown_rolename', roleinfo) - - # Verify that a roleinfo cannot be updated to a non-existent repository - # name. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.update_roleinfo, - 'new_rolename', roleinfo, False, 'non-existent') - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.update_roleinfo, 1, roleinfo) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.update_roleinfo, rolename, 1) - - repository_name = 'example_repository' - mark_role_as_dirty = True - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.update_roleinfo, rolename, - roleinfo, 1, repository_name) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.update_roleinfo, - rolename, mark_role_as_dirty, 123) - - - - def test_get_dirty_roles(self): - # Verify that the dirty roles of a role are returned. - rolename = 'targets' - roleinfo1 = {'keyids': ['123'], 'threshold': 1} - tuf.roledb.add_role(rolename, roleinfo1) - roleinfo2 = {'keyids': ['123'], 'threshold': 2} - mark_role_as_dirty = True - tuf.roledb.update_roleinfo(rolename, roleinfo2, mark_role_as_dirty) - # Note: The 'default' repository is searched if the repository name is - # not given to get_dirty_roles(). - self.assertEqual([rolename], tuf.roledb.get_dirty_roles()) - - # Verify that a list of dirty roles is returned for a non-default - # repository. - repository_name = 'example_repository' - tuf.roledb.create_roledb(repository_name) - tuf.roledb.add_role(rolename, roleinfo1, repository_name) - tuf.roledb.update_roleinfo(rolename, roleinfo2, mark_role_as_dirty, repository_name) - self.assertEqual([rolename], tuf.roledb.get_dirty_roles(repository_name)) - - # Verify that dirty roles are not returned for a non-existent repository. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.get_dirty_roles, 'non-existent') - - # Reset the roledb so that subsequent tests have access to a default - # roledb. - tuf.roledb.remove_roledb(repository_name) - - # Test for improperly formatted argument. - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.roledb.get_dirty_roles, 123) - - - - def test_mark_dirty(self): - # Add a dirty role to roledb. - rolename = 'targets' - roleinfo1 = {'keyids': ['123'], 'threshold': 1} - tuf.roledb.add_role(rolename, roleinfo1) - rolename2 = 'dirty_role' - roleinfo2 = {'keyids': ['123'], 'threshold': 2} - mark_role_as_dirty = True - tuf.roledb.update_roleinfo(rolename, roleinfo1, mark_role_as_dirty) - # Note: The 'default' repository is searched if the repository name is - # not given to get_dirty_roles(). - self.assertEqual([rolename], tuf.roledb.get_dirty_roles()) - - tuf.roledb.mark_dirty(['dirty_role']) - self.assertEqual([rolename2, rolename], tuf.roledb.get_dirty_roles()) - - # Verify that a role cannot be marked as dirty for a non-existent - # repository. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.mark_dirty, - ['dirty_role'], 'non-existent') - - - - def test_unmark_dirty(self): - # Add a dirty role to roledb. - rolename = 'targets' - roleinfo1 = {'keyids': ['123'], 'threshold': 1} - tuf.roledb.add_role(rolename, roleinfo1) - rolename2 = 'dirty_role' - roleinfo2 = {'keyids': ['123'], 'threshold': 2} - tuf.roledb.add_role(rolename2, roleinfo2) - mark_role_as_dirty = True - tuf.roledb.update_roleinfo(rolename, roleinfo1, mark_role_as_dirty) - # Note: The 'default' repository is searched if the repository name is - # not given to get_dirty_roles(). - self.assertEqual([rolename], tuf.roledb.get_dirty_roles()) - tuf.roledb.update_roleinfo(rolename2, roleinfo2, mark_role_as_dirty) - - tuf.roledb.unmark_dirty(['dirty_role']) - self.assertEqual([rolename], tuf.roledb.get_dirty_roles()) - tuf.roledb.unmark_dirty(['targets']) - self.assertEqual([], tuf.roledb.get_dirty_roles()) - - # What happens for a role that isn't dirty? unmark_dirty() should just - # log a message. - tuf.roledb.unmark_dirty(['unknown_role']) - - # Verify that a role cannot be unmarked as dirty for a non-existent - # repository. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, tuf.roledb.unmark_dirty, - ['dirty_role'], 'non-existent') - - - def _test_rolename(self, test_function): - # Private function that tests the 'rolename' argument of 'test_function' - # for format, invalid name, and unknown role exceptions. - - # Test conditions where the arguments are improperly formatted. - self.assertRaises(securesystemslib.exceptions.FormatError, test_function, None) - self.assertRaises(securesystemslib.exceptions.FormatError, test_function, 123) - self.assertRaises(securesystemslib.exceptions.FormatError, test_function, ['rolename']) - self.assertRaises(securesystemslib.exceptions.FormatError, test_function, {'a': 'b'}) - self.assertRaises(securesystemslib.exceptions.FormatError, test_function, ('a', 'b')) - self.assertRaises(securesystemslib.exceptions.FormatError, test_function, True) - - # Test condition where the 'rolename' has not been added to the role database. - self.assertRaises(tuf.exceptions.UnknownRoleError, test_function, 'badrole') - - # Test conditions for invalid rolenames. - self.assertRaises(securesystemslib.exceptions.InvalidNameError, test_function, '') - self.assertRaises(securesystemslib.exceptions.InvalidNameError, test_function, ' badrole ') - self.assertRaises(securesystemslib.exceptions.InvalidNameError, test_function, '/badrole/') - - - -def setUpModule(): - # setUpModule() is called before any test cases run. - # Ensure the roledb has not been modified by a previous test, which may - # affect assumptions (i.e., empty roledb) made by the tests cases in this - # unit test. - tuf.roledb.clear_roledb() - -def tearDownModule(): - # tearDownModule() is called after all the tests have run. - # Ensure we clean up roledb. Courtesy is contagious, and it begins with - # test_roledb_old.py. - tuf.roledb.clear_roledb() - - - -# Run the unit tests. -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_root_versioning_integration_old.py b/tests/test_root_versioning_integration_old.py deleted file mode 100755 index 251bdfe6c4..0000000000 --- a/tests/test_root_versioning_integration_old.py +++ /dev/null @@ -1,230 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2016 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_root_versioning_integration_old.py - - - Evan Cordell. - - - July 21, 2016. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Test root versioning for efficient root key rotation. -""" - - -import os -import logging -import tempfile -import shutil -import unittest -import sys - -import tuf -import tuf.log -import tuf.formats -import tuf.exceptions -import tuf.roledb -import tuf.keydb -import tuf.repository_tool as repo_tool - -from tests import utils - -import securesystemslib -import securesystemslib.storage - -logger = logging.getLogger(__name__) - -repo_tool.disable_console_log_messages() - - -class TestRepository(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) - - @classmethod - def tearDownClass(cls): - shutil.rmtree(cls.temporary_directory) - - def tearDown(self): - tuf.roledb.clear_roledb() - tuf.keydb.clear_keydb() - - def test_init(self): - # Test normal case. - storage_backend = securesystemslib.storage.FilesystemBackend() - repository = repo_tool.Repository('repository_directory/', - 'metadata_directory/', - 'targets_directory/', - storage_backend) - self.assertTrue(isinstance(repository.root, repo_tool.Root)) - self.assertTrue(isinstance(repository.snapshot, repo_tool.Snapshot)) - self.assertTrue(isinstance(repository.timestamp, repo_tool.Timestamp)) - self.assertTrue(isinstance(repository.targets, repo_tool.Targets)) - - # Test improperly formatted arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, 3, - 'metadata_directory/', 'targets_directory', storage_backend) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, - 'repository_directory', 3, 'targets_directory', storage_backend) - self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, - 'repository_directory', 'metadata_directory', storage_backend, 3) - - - - def test_root_role_versioning(self): - # Test root role versioning - # - # 1. Import public and private keys. - # 2. Add verification keys. - # 3. Load signing keys. - # 4. Add target files. - # 5. Perform delegation. - # 6. writeall() - # - # Copy the target files from 'tuf/tests/repository_data' so that writeall() - # has target fileinfo to include in metadata. - temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) - targets_directory = os.path.join(temporary_directory, 'repository', - repo_tool.TARGETS_DIRECTORY_NAME) - original_targets_directory = os.path.join('repository_data', - 'repository', 'targets') - shutil.copytree(original_targets_directory, targets_directory) - - # In this case, create_new_repository() creates the 'repository/' - # sub-directory in 'temporary_directory' if it does not exist. - repository_directory = os.path.join(temporary_directory, 'repository') - metadata_directory = os.path.join(repository_directory, - repo_tool.METADATA_STAGED_DIRECTORY_NAME) - repository = repo_tool.create_new_repository(repository_directory) - - - - - # (1) Load the public and private keys of the top-level roles, and one - # delegated role. - keystore_directory = os.path.join('repository_data', 'keystore') - - # Load the public keys. - root_pubkey_path = os.path.join(keystore_directory, 'root_key.pub') - targets_pubkey_path = os.path.join(keystore_directory, 'targets_key.pub') - snapshot_pubkey_path = os.path.join(keystore_directory, 'snapshot_key.pub') - timestamp_pubkey_path = os.path.join(keystore_directory, 'timestamp_key.pub') - role1_pubkey_path = os.path.join(keystore_directory, 'delegation_key.pub') - - root_pubkey = repo_tool.import_rsa_publickey_from_file(root_pubkey_path) - targets_pubkey = repo_tool.import_ed25519_publickey_from_file(targets_pubkey_path) - snapshot_pubkey = \ - repo_tool.import_ed25519_publickey_from_file(snapshot_pubkey_path) - timestamp_pubkey = \ - repo_tool.import_ed25519_publickey_from_file(timestamp_pubkey_path) - role1_pubkey = repo_tool.import_ed25519_publickey_from_file(role1_pubkey_path) - - # Load the private keys. - root_privkey_path = os.path.join(keystore_directory, 'root_key') - targets_privkey_path = os.path.join(keystore_directory, 'targets_key') - snapshot_privkey_path = os.path.join(keystore_directory, 'snapshot_key') - timestamp_privkey_path = os.path.join(keystore_directory, 'timestamp_key') - role1_privkey_path = os.path.join(keystore_directory, 'delegation_key') - - root_privkey = \ - repo_tool.import_rsa_privatekey_from_file(root_privkey_path, 'password') - targets_privkey = \ - repo_tool.import_ed25519_privatekey_from_file(targets_privkey_path, 'password') - snapshot_privkey = \ - repo_tool.import_ed25519_privatekey_from_file(snapshot_privkey_path, - 'password') - timestamp_privkey = \ - repo_tool.import_ed25519_privatekey_from_file(timestamp_privkey_path, - 'password') - role1_privkey = \ - repo_tool.import_ed25519_privatekey_from_file(role1_privkey_path, - 'password') - - - # (2) Add top-level verification keys. - repository.root.add_verification_key(root_pubkey) - repository.targets.add_verification_key(targets_pubkey) - repository.snapshot.add_verification_key(snapshot_pubkey) - repository.timestamp.add_verification_key(timestamp_pubkey) - - - # (3) Load top-level signing keys. - repository.root.load_signing_key(root_privkey) - repository.targets.load_signing_key(targets_privkey) - repository.snapshot.load_signing_key(snapshot_privkey) - repository.timestamp.load_signing_key(timestamp_privkey) - - # (4) Add target files. - target1 = 'file1.txt' - target2 = 'file2.txt' - target3 = 'file3.txt' - repository.targets.add_target(target1) - repository.targets.add_target(target2) - - - # (5) Perform delegation. - repository.targets.delegate('role1', [role1_pubkey], [target3]) - repository.targets('role1').load_signing_key(role1_privkey) - - # (6) Write repository. - repository.writeall() - - self.assertTrue(os.path.exists(os.path.join(metadata_directory, 'root.json'))) - self.assertTrue(os.path.exists(os.path.join(metadata_directory, '1.root.json'))) - - - # Verify that the expected metadata is written. - root_filepath = os.path.join(metadata_directory, 'root.json') - root_1_filepath = os.path.join(metadata_directory, '1.root.json') - root_2_filepath = os.path.join(metadata_directory, '2.root.json') - old_root_signable = securesystemslib.util.load_json_file(root_filepath) - root_1_signable = securesystemslib.util.load_json_file(root_1_filepath) - - # Make a change to the root keys - repository.root.add_verification_key(targets_pubkey) - repository.root.load_signing_key(targets_privkey) - repository.root.threshold = 2 - repository.writeall() - - new_root_signable = securesystemslib.util.load_json_file(root_filepath) - root_2_signable = securesystemslib.util.load_json_file(root_2_filepath) - - for role_signable in [old_root_signable, new_root_signable, root_1_signable, root_2_signable]: - # Raise 'securesystemslib.exceptions.FormatError' if 'role_signable' is an - # invalid signable. - tuf.formats.check_signable_object_format(role_signable) - - # Verify contents of versioned roots - self.assertEqual(old_root_signable, root_1_signable) - self.assertEqual(new_root_signable, root_2_signable) - - self.assertEqual(root_1_signable['signed']['version'], 1) - self.assertEqual(root_2_signable['signed']['version'], 2) - - repository.root.remove_verification_key(root_pubkey) - repository.root.unload_signing_key(root_privkey) - repository.root.threshold = 2 - - # Errors, not enough signing keys to satisfy old threshold - self.assertRaises(tuf.exceptions.UnsignedMetadataError, repository.writeall) - - # No error, write() ignore's root's threshold and allows it to be written - # to disk partially signed. - repository.write('root') - - - -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_sig_old.py b/tests/test_sig_old.py deleted file mode 100755 index d93659dad0..0000000000 --- a/tests/test_sig_old.py +++ /dev/null @@ -1,546 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_sig_old.py - - - Geremy Condra - Vladimir Diaz - - - February 28, 2012. Based on a previous version of this module. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Test cases for sig.py. -""" - -import unittest -import logging -import copy -import sys - -import tuf -import tuf.log -import tuf.formats -import tuf.keydb -import tuf.roledb -import tuf.sig -import tuf.exceptions - -from tests import utils - -import securesystemslib -import securesystemslib.keys - -logger = logging.getLogger(__name__) - -# Setup the keys to use in our test cases. -KEYS = [] -for _ in range(3): - KEYS.append(securesystemslib.keys.generate_rsa_key(2048)) - - - -class TestSig(unittest.TestCase): - def setUp(self): - pass - - def tearDown(self): - tuf.roledb.clear_roledb() - tuf.keydb.clear_keydb() - - - def test_get_signature_status_no_role(self): - signable = {'signed': 'test', 'signatures': []} - - # A valid, but empty signature status. - sig_status = tuf.sig.get_signature_status(signable) - self.assertTrue(tuf.formats.SIGNATURESTATUS_SCHEMA.matches(sig_status)) - - self.assertEqual(0, sig_status['threshold']) - self.assertEqual([], sig_status['good_sigs']) - self.assertEqual([], sig_status['bad_sigs']) - self.assertEqual([], sig_status['unknown_sigs']) - self.assertEqual([], sig_status['untrusted_sigs']) - self.assertEqual([], sig_status['unknown_signing_schemes']) - - # A valid signable, but non-existent role argument. - self.assertRaises(tuf.exceptions.UnknownRoleError, - tuf.sig.get_signature_status, signable, 'unknown_role') - - # Should verify we are not adding a duplicate signature - # when doing the following action. Here we know 'signable' - # has only one signature so it's okay. - signed = securesystemslib.formats.encode_canonical(signable['signed']).encode('utf-8') - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[0], signed)) - - tuf.keydb.add_key(KEYS[0]) - - # Improperly formatted role. - self.assertRaises(securesystemslib.exceptions.FormatError, - tuf.sig.get_signature_status, signable, 1) - - # Not allowed to call verify() without having specified a role. - args = (signable, None) - self.assertRaises(securesystemslib.exceptions.Error, tuf.sig.verify, *args) - - # Done. Let's remove the added key(s) from the key database. - tuf.keydb.remove_key(KEYS[0]['keyid']) - - - def test_get_signature_status_bad_sig(self): - signable = {'signed' : 'test', 'signatures' : []} - signed = securesystemslib.formats.encode_canonical(signable['signed']).encode('utf-8') - - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[0], signed)) - signable['signed'] += 'signature no longer matches signed data' - - tuf.keydb.add_key(KEYS[0]) - threshold = 1 - - roleinfo = tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROLE_SCHEMA, keyids=[KEYS[0]['keyid']], threshold=threshold) - - tuf.roledb.add_role('Root', roleinfo) - - sig_status = tuf.sig.get_signature_status(signable, 'Root') - - self.assertEqual(1, sig_status['threshold']) - self.assertEqual([], sig_status['good_sigs']) - self.assertEqual([KEYS[0]['keyid']], sig_status['bad_sigs']) - self.assertEqual([], sig_status['unknown_sigs']) - self.assertEqual([], sig_status['untrusted_sigs']) - self.assertEqual([], sig_status['unknown_signing_schemes']) - - self.assertFalse(tuf.sig.verify(signable, 'Root')) - - # Done. Let's remove the added key(s) from the key database. - tuf.keydb.remove_key(KEYS[0]['keyid']) - # Remove the role. - tuf.roledb.remove_role('Root') - - - def test_get_signature_status_unknown_signing_scheme(self): - signable = {'signed' : 'test', 'signatures' : []} - signed = securesystemslib.formats.encode_canonical(signable['signed']).encode('utf-8') - - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[0], signed)) - - valid_scheme = KEYS[0]['scheme'] - KEYS[0]['scheme'] = 'unknown_signing_scheme' - tuf.keydb.add_key(KEYS[0]) - threshold = 1 - - roleinfo = tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROLE_SCHEMA, keyids=[KEYS[0]['keyid']], threshold=threshold) - - tuf.roledb.add_role('root', roleinfo) - - sig_status = tuf.sig.get_signature_status(signable, 'root') - - self.assertEqual(1, sig_status['threshold']) - self.assertEqual([], sig_status['good_sigs']) - self.assertEqual([], sig_status['bad_sigs']) - self.assertEqual([], sig_status['unknown_sigs']) - self.assertEqual([], sig_status['untrusted_sigs']) - self.assertEqual([KEYS[0]['keyid']], - sig_status['unknown_signing_schemes']) - - self.assertFalse(tuf.sig.verify(signable, 'root')) - - # Done. Let's remove the added key(s) from the key database. - KEYS[0]['scheme'] = valid_scheme - tuf.keydb.remove_key(KEYS[0]['keyid']) - # Remove the role. - tuf.roledb.remove_role('root') - - - def test_get_signature_status_single_key(self): - signable = {'signed' : 'test', 'signatures' : []} - signed = securesystemslib.formats.encode_canonical(signable['signed']).encode('utf-8') - - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[0], signed)) - - threshold = 1 - - roleinfo = tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROLE_SCHEMA, keyids=[KEYS[0]['keyid']], threshold=threshold) - - tuf.roledb.add_role('Root', roleinfo) - tuf.keydb.add_key(KEYS[0]) - - sig_status = tuf.sig.get_signature_status(signable, 'Root') - - self.assertEqual(1, sig_status['threshold']) - self.assertEqual([KEYS[0]['keyid']], sig_status['good_sigs']) - self.assertEqual([], sig_status['bad_sigs']) - self.assertEqual([], sig_status['unknown_sigs']) - self.assertEqual([], sig_status['untrusted_sigs']) - self.assertEqual([], sig_status['unknown_signing_schemes']) - - self.assertTrue(tuf.sig.verify(signable, 'Root')) - - # Test for an unknown signature when 'role' is left unspecified. - sig_status = tuf.sig.get_signature_status(signable) - - self.assertEqual(0, sig_status['threshold']) - self.assertEqual([], sig_status['good_sigs']) - self.assertEqual([], sig_status['bad_sigs']) - self.assertEqual([KEYS[0]['keyid']], sig_status['unknown_sigs']) - self.assertEqual([], sig_status['untrusted_sigs']) - self.assertEqual([], sig_status['unknown_signing_schemes']) - - # Done. Let's remove the added key(s) from the key database. - tuf.keydb.remove_key(KEYS[0]['keyid']) - # Remove the role. - tuf.roledb.remove_role('Root') - - - def test_get_signature_status_below_threshold(self): - signable = {'signed' : 'test', 'signatures' : []} - signed = securesystemslib.formats.encode_canonical(signable['signed']).encode('utf-8') - - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[0], signed)) - - tuf.keydb.add_key(KEYS[0]) - threshold = 2 - - roleinfo = tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROLE_SCHEMA, - keyids=[KEYS[0]['keyid'], KEYS[2]['keyid']], - threshold=threshold) - - tuf.roledb.add_role('Root', roleinfo) - - sig_status = tuf.sig.get_signature_status(signable, 'Root') - - self.assertEqual(2, sig_status['threshold']) - self.assertEqual([KEYS[0]['keyid']], sig_status['good_sigs']) - self.assertEqual([], sig_status['bad_sigs']) - self.assertEqual([], sig_status['unknown_sigs']) - self.assertEqual([], sig_status['untrusted_sigs']) - self.assertEqual([], sig_status['unknown_signing_schemes']) - - self.assertFalse(tuf.sig.verify(signable, 'Root')) - - # Done. Let's remove the added key(s) from the key database. - tuf.keydb.remove_key(KEYS[0]['keyid']) - - # Remove the role. - tuf.roledb.remove_role('Root') - - - def test_get_signature_status_below_threshold_unrecognized_sigs(self): - signable = {'signed' : 'test', 'signatures' : []} - signed = securesystemslib.formats.encode_canonical(signable['signed']).encode('utf-8') - - # Two keys sign it, but only one of them will be trusted. - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[0], signed)) - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[2], signed)) - - tuf.keydb.add_key(KEYS[0]) - tuf.keydb.add_key(KEYS[1]) - threshold = 2 - - roleinfo = tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROLE_SCHEMA, - keyids=[KEYS[0]['keyid'], KEYS[1]['keyid']], - threshold=threshold) - - tuf.roledb.add_role('Root', roleinfo) - - sig_status = tuf.sig.get_signature_status(signable, 'Root') - - self.assertEqual(2, sig_status['threshold']) - self.assertEqual([KEYS[0]['keyid']], sig_status['good_sigs']) - self.assertEqual([], sig_status['bad_sigs']) - self.assertEqual([KEYS[2]['keyid']], sig_status['unknown_sigs']) - self.assertEqual([], sig_status['untrusted_sigs']) - self.assertEqual([], sig_status['unknown_signing_schemes']) - - self.assertFalse(tuf.sig.verify(signable, 'Root')) - - # Done. Let's remove the added key(s) from the key database. - tuf.keydb.remove_key(KEYS[0]['keyid']) - tuf.keydb.remove_key(KEYS[1]['keyid']) - - # Remove the role. - tuf.roledb.remove_role('Root') - - - def test_get_signature_status_below_threshold_unauthorized_sigs(self): - signable = {'signed' : 'test', 'signatures' : []} - signed = securesystemslib.formats.encode_canonical(signable['signed']).encode('utf-8') - # Two keys sign it, but one of them is only trusted for a different - # role. - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[0], signed)) - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[1], signed)) - - tuf.keydb.add_key(KEYS[0]) - tuf.keydb.add_key(KEYS[1]) - threshold = 2 - - roleinfo = tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROLE_SCHEMA, - keyids=[KEYS[0]['keyid'], KEYS[2]['keyid']], - threshold=threshold) - - tuf.roledb.add_role('Root', roleinfo) - - roleinfo = tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROLE_SCHEMA, - keyids=[KEYS[1]['keyid'], KEYS[2]['keyid']], - threshold=threshold) - - tuf.roledb.add_role('Release', roleinfo) - - sig_status = tuf.sig.get_signature_status(signable, 'Root') - - self.assertEqual(2, sig_status['threshold']) - self.assertEqual([KEYS[0]['keyid']], sig_status['good_sigs']) - self.assertEqual([], sig_status['bad_sigs']) - self.assertEqual([], sig_status['unknown_sigs']) - self.assertEqual([KEYS[1]['keyid']], sig_status['untrusted_sigs']) - self.assertEqual([], sig_status['unknown_signing_schemes']) - - self.assertFalse(tuf.sig.verify(signable, 'Root')) - - self.assertRaises(tuf.exceptions.UnknownRoleError, - tuf.sig.get_signature_status, signable, 'unknown_role') - - # Done. Let's remove the added key(s) from the key database. - tuf.keydb.remove_key(KEYS[0]['keyid']) - tuf.keydb.remove_key(KEYS[1]['keyid']) - - # Remove the roles. - tuf.roledb.remove_role('Root') - tuf.roledb.remove_role('Release') - - - - def test_check_signatures_no_role(self): - signable = {'signed' : 'test', 'signatures' : []} - signed = securesystemslib.formats.encode_canonical(signable['signed']).encode('utf-8') - - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[0], signed)) - - tuf.keydb.add_key(KEYS[0]) - - # No specific role we're considering. It's invalid to use the - # function tuf.sig.verify() without a role specified because - # tuf.sig.verify() is checking trust, as well. - args = (signable, None) - self.assertRaises(securesystemslib.exceptions.Error, tuf.sig.verify, *args) - - # Done. Let's remove the added key(s) from the key database. - tuf.keydb.remove_key(KEYS[0]['keyid']) - - - - def test_verify_single_key(self): - signable = {'signed' : 'test', 'signatures' : []} - signed = securesystemslib.formats.encode_canonical(signable['signed']).encode('utf-8') - - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[0], signed)) - - tuf.keydb.add_key(KEYS[0]) - threshold = 1 - - roleinfo = tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROLE_SCHEMA, keyids=[KEYS[0]['keyid']], threshold=threshold) - - tuf.roledb.add_role('Root', roleinfo) - - # This will call verify() and return True if 'signable' is valid, - # False otherwise. - self.assertTrue(tuf.sig.verify(signable, 'Root')) - - # Done. Let's remove the added key(s) from the key database. - tuf.keydb.remove_key(KEYS[0]['keyid']) - - # Remove the roles. - tuf.roledb.remove_role('Root') - - - - def test_verify_must_not_count_duplicate_keyids_towards_threshold(self): - # Create and sign dummy metadata twice with same key - # Note that we use the non-deterministic rsassa-pss signing scheme, so - # creating the signature twice shows that we don't only detect duplicate - # signatures but also different signatures from the same key. - signable = {"signed" : "test", "signatures" : []} - signed = securesystemslib.formats.encode_canonical( - signable["signed"]).encode("utf-8") - signable["signatures"].append( - securesystemslib.keys.create_signature(KEYS[0], signed)) - signable["signatures"].append( - securesystemslib.keys.create_signature(KEYS[0], signed)) - - # 'get_signature_status' uses keys from keydb for verification - tuf.keydb.add_key(KEYS[0]) - - # Assert that 'get_signature_status' returns two good signatures ... - status = tuf.sig.get_signature_status( - signable, "root", keyids=[KEYS[0]["keyid"]], threshold=2) - self.assertTrue(len(status["good_sigs"]) == 2) - - # ... but only one counts towards the threshold - self.assertFalse( - tuf.sig.verify(signable, "root", keyids=[KEYS[0]["keyid"]], threshold=2)) - - # Clean-up keydb - tuf.keydb.remove_key(KEYS[0]["keyid"]) - - - - def test_verify_count_different_keyids_for_same_key_towards_threshold(self): - # Create and sign dummy metadata twice with same key but different keyids - signable = {"signed" : "test", "signatures" : []} - key_sha256 = copy.deepcopy(KEYS[0]) - key_sha256["keyid"] = "deadbeef256" - - key_sha512 = copy.deepcopy(KEYS[0]) - key_sha512["keyid"] = "deadbeef512" - - signed = securesystemslib.formats.encode_canonical( - signable["signed"]).encode("utf-8") - signable["signatures"].append( - securesystemslib.keys.create_signature(key_sha256, signed)) - signable["signatures"].append( - securesystemslib.keys.create_signature(key_sha512, signed)) - - # 'get_signature_status' uses keys from keydb for verification - tuf.keydb.add_key(key_sha256) - tuf.keydb.add_key(key_sha512) - - # Assert that the key only counts toward the threshold once - keyids = [key_sha256["keyid"], key_sha512["keyid"]] - self.assertFalse( - tuf.sig.verify(signable, "root", keyids=keyids, threshold=2)) - - # Clean-up keydb - tuf.keydb.remove_key(key_sha256["keyid"]) - tuf.keydb.remove_key(key_sha512["keyid"]) - - - - def test_verify_unrecognized_sig(self): - signable = {'signed' : 'test', 'signatures' : []} - signed = securesystemslib.formats.encode_canonical(signable['signed']).encode('utf-8') - - # Two keys sign it, but only one of them will be trusted. - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[0], signed)) - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[2], signed)) - - tuf.keydb.add_key(KEYS[0]) - tuf.keydb.add_key(KEYS[1]) - threshold = 2 - - roleinfo = tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROLE_SCHEMA, - keyids=[KEYS[0]['keyid'], KEYS[1]['keyid']], - threshold=threshold) - - tuf.roledb.add_role('Root', roleinfo) - - self.assertFalse(tuf.sig.verify(signable, 'Root')) - - # Done. Let's remove the added key(s) from the key database. - tuf.keydb.remove_key(KEYS[0]['keyid']) - tuf.keydb.remove_key(KEYS[1]['keyid']) - - # Remove the roles. - tuf.roledb.remove_role('Root') - - - - def test_generate_rsa_signature(self): - signable = {'signed' : 'test', 'signatures' : []} - signed = securesystemslib.formats.encode_canonical(signable['signed']).encode('utf-8') - - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[0], signed)) - - self.assertEqual(1, len(signable['signatures'])) - signature = signable['signatures'][0] - self.assertEqual(KEYS[0]['keyid'], signature['keyid']) - - returned_signature = tuf.sig.generate_rsa_signature(signable['signed'], KEYS[0]) - self.assertTrue(securesystemslib.formats.SIGNATURE_SCHEMA.matches(returned_signature)) - - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[1], signed)) - - self.assertEqual(2, len(signable['signatures'])) - signature = signable['signatures'][1] - self.assertEqual(KEYS[1]['keyid'], signature['keyid']) - - - - def test_may_need_new_keys(self): - # One untrusted key in 'signable'. - signable = {'signed' : 'test', 'signatures' : []} - signed = securesystemslib.formats.encode_canonical(signable['signed']).encode('utf-8') - - signable['signatures'].append(securesystemslib.keys.create_signature( - KEYS[0], signed)) - - tuf.keydb.add_key(KEYS[1]) - threshold = 1 - - roleinfo = tuf.formats.build_dict_conforming_to_schema( - tuf.formats.ROLE_SCHEMA, keyids=[KEYS[1]['keyid']], threshold=threshold) - - tuf.roledb.add_role('Root', roleinfo) - - sig_status = tuf.sig.get_signature_status(signable, 'Root') - - self.assertTrue(tuf.sig.may_need_new_keys(sig_status)) - - - # Done. Let's remove the added key(s) from the key database. - tuf.keydb.remove_key(KEYS[1]['keyid']) - - # Remove the roles. - tuf.roledb.remove_role('Root') - - - def test_signable_has_invalid_format(self): - # get_signature_status() and verify() validate 'signable' before continuing. - # 'signable' must be of the form: {'signed': , 'signatures': [{}]}. - # Object types are checked as well. - signable = {'not_signed' : 'test', 'signatures' : []} - args = (signable['not_signed'], KEYS[0]) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.sig.get_signature_status, *args) - - # 'signatures' value must be a list. Let's try a dict. - signable = {'signed' : 'test', 'signatures' : {}} - args = (signable['signed'], KEYS[0]) - self.assertRaises(securesystemslib.exceptions.FormatError, tuf.sig.get_signature_status, *args) - - - -# Run unit test. -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_slow_retrieval_attack_old.py b/tests/test_slow_retrieval_attack_old.py deleted file mode 100755 index 9f22c88f36..0000000000 --- a/tests/test_slow_retrieval_attack_old.py +++ /dev/null @@ -1,216 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_slow_retrieval_attack_old.py - - - Konstantin Andrianov. - - - March 13, 2012. - - April 5, 2014. - Refactored to use the 'unittest' module (test conditions in code, rather - than verifying text output), use pre-generated repository files, and - discontinue use of the old repository tools. Expanded comments and modified - previous setup. -vladimir.v.diaz - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Simulate a slow retrieval attack, where an attacker is able to prevent clients - from receiving updates by responding to client requests so slowly that updates - never complete. Test cases included for two types of slow retrievals: data - that slowly trickles in, and data that is only returned after a long time - delay. TUF prevents slow retrieval attacks by ensuring the download rate - does not fall below a required rate (tuf.settings.MIN_AVERAGE_DOWNLOAD_SPEED). - - Note: There is no difference between 'updates' and 'target' files. - - # TODO: Consider additional tests for slow metadata download. Tests here only - use slow target download. -""" - -import os -import tempfile -import shutil -import logging -import unittest -import sys - -import tuf.log -import tuf.client.updater as updater -import tuf.unittest_toolbox as unittest_toolbox -import tuf.repository_tool as repo_tool -import tuf.roledb -import tuf.keydb - -from tests import utils - -logger = logging.getLogger(__name__) -repo_tool.disable_console_log_messages() - - - -class TestSlowRetrieval(unittest_toolbox.Modified_TestCase): - - def setUp(self): - # Modified_Testcase can handle temp dir removal - unittest_toolbox.Modified_TestCase.setUp(self) - self.temporary_directory = self.make_temp_directory(directory=os.getcwd()) - - self.repository_name = 'test_repository1' - - # Copy the original repository files provided in the test folder so that - # any modifications made to repository files are restricted to the copies. - # The 'repository_data' directory is expected to exist in 'tuf/tests/'. - original_repository_files = os.path.join(os.getcwd(), 'repository_data') - temporary_repository_root = tempfile.mkdtemp(dir=self.temporary_directory) - - # The original repository, keystore, and client directories will be copied - # for each test case. - original_repository = os.path.join(original_repository_files, 'repository') - original_client = os.path.join(original_repository_files, 'client') - original_keystore = os.path.join(original_repository_files, 'keystore') - - # Save references to the often-needed client repository directories. - # Test cases need these references to access metadata and target files. - self.repository_directory = \ - os.path.join(temporary_repository_root, 'repository') - self.client_directory = os.path.join(temporary_repository_root, 'client') - self.keystore_directory = os.path.join(temporary_repository_root, 'keystore') - - # Copy the original 'repository', 'client', and 'keystore' directories - # to the temporary repository the test cases can use. - shutil.copytree(original_repository, self.repository_directory) - shutil.copytree(original_client, self.client_directory) - shutil.copytree(original_keystore, self.keystore_directory) - - - # Produce a longer target file than exists in the other test repository - # data, to provide for a long-duration slow attack. Then we'll write new - # top-level metadata that includes a hash over that file, and provide that - # metadata to the client as well. - - # The slow retrieval server, in mode 2 (1 byte per second), will only - # sleep for a total of (target file size) seconds. Add a target file - # that contains sufficient number of bytes to trigger a slow retrieval - # error. A transfer should not be permitted to take 1 second per byte - # transferred. Because this test is currently expected to fail, I'm - # limiting the size to 10 bytes (10 seconds) to avoid expected testing - # delays.... Consider increasing again after fix, to, e.g. 400. - total_bytes = 10 - - repository = repo_tool.load_repository(self.repository_directory) - file1_filepath = os.path.join(self.repository_directory, 'targets', - 'file1.txt') - with open(file1_filepath, 'wb') as file_object: - data = 'a' * int(round(total_bytes)) - file_object.write(data.encode('utf-8')) - - key_file = os.path.join(self.keystore_directory, 'timestamp_key') - timestamp_private = repo_tool.import_ed25519_privatekey_from_file(key_file, - 'password') - key_file = os.path.join(self.keystore_directory, 'snapshot_key') - snapshot_private = repo_tool.import_ed25519_privatekey_from_file(key_file, - 'password') - key_file = os.path.join(self.keystore_directory, 'targets_key') - targets_private = repo_tool.import_ed25519_privatekey_from_file(key_file, - 'password') - - repository.targets.load_signing_key(targets_private) - repository.snapshot.load_signing_key(snapshot_private) - repository.timestamp.load_signing_key(timestamp_private) - - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Since we've changed the repository metadata in this setup (by lengthening - # a target file and then writing new metadata), we also have to update the - # client metadata to get to the expected initial state, where the client - # knows the right target info (and so expects the right, longer target - # length. - # We'll skip using updater.refresh since we don't have a server running, - # and we'll update the metadata locally, manually. - shutil.rmtree(os.path.join( - self.client_directory, self.repository_name, 'metadata', 'current')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata'), - os.path.join(self.client_directory, self.repository_name, 'metadata', - 'current')) - - # Set the url prefix required by the 'tuf/client/updater.py' updater. - # 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'. - repository_basepath = self.repository_directory[len(os.getcwd()):] - - self.server_process_handler = utils.TestServerProcess(log=logger, - server='slow_retrieval_server_old.py') - - logger.info('Slow Retrieval Server process started.') - - url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + repository_basepath - - # Setting 'tuf.settings.repository_directory' with the temporary client - # directory copied from the original repository files. - tuf.settings.repositories_directory = self.client_directory - self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, - 'metadata_path': 'metadata', - 'targets_path': 'targets'}} - - # Create the repository instance. The test cases will use this client - # updater to refresh metadata, fetch target files, etc. - self.repository_updater = updater.Updater(self.repository_name, - self.repository_mirrors) - - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - # Cleans the resources and flush the logged lines (if any). - self.server_process_handler.clean() - - # Remove temporary directory - unittest_toolbox.Modified_TestCase.tearDown(self) - - - def test_delay_before_send(self): - # Simulate a slow retrieval attack. - # When download begins,the server blocks the download for a long - # time by doing nothing before it sends the first byte of data. - - # Verify that the TUF client detects replayed metadata and refuses to - # continue the update process. - try: - file1_target = self.repository_updater.get_one_valid_targetinfo('file1.txt') - self.repository_updater.download_target(file1_target, self.client_directory) - - # Verify that the specific 'tuf.exceptions.SlowRetrievalError' exception is raised by - # each mirror. - except tuf.exceptions.NoWorkingMirrorError as exception: - for mirror_url, mirror_error in exception.mirror_errors.items(): - url_prefix = self.repository_mirrors['mirror1']['url_prefix'] - url_file = os.path.join(url_prefix, 'targets', 'file1.txt') - - # Verify that 'file1.txt' is the culprit. - self.assertEqual(url_file.replace('\\', '/'), mirror_url) - self.assertTrue(isinstance(mirror_error, tuf.exceptions.SlowRetrievalError)) - - else: - self.fail('TUF did not prevent a slow retrieval attack.') - - - -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_tutorial_old.py b/tests/test_tutorial_old.py deleted file mode 100755 index ac33dec86a..0000000000 --- a/tests/test_tutorial_old.py +++ /dev/null @@ -1,407 +0,0 @@ -#!/usr/bin/env python - -""" - - test_tutorial_old.py - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Regression test for the TUF tutorial as laid out in TUTORIAL.md. - This essentially runs the tutorial and checks some results. - - There are a few deviations from the TUTORIAL.md instructions: - - steps that involve user input (like passphrases) are modified slightly - to not require user input - - use of path separators '/' is replaced by join() calls. (We assume that - when following the tutorial, users will correctly deal with path - separators for their system if they happen to be using non-Linux systems.) - - shell instructions are mimicked using Python commands - -""" - - -import unittest -import datetime # part of TUTORIAL.md -import os # part of TUTORIAL.md, but also needed separately -import shutil -import tempfile -import sys -import unittest.mock as mock - -from tuf.repository_tool import * # part of TUTORIAL.md - -from tests import utils - -import securesystemslib.exceptions - -from securesystemslib.formats import encode_canonical # part of TUTORIAL.md -from securesystemslib.keys import create_signature # part of TUTORIAL.md - - -class TestTutorial(unittest.TestCase): - def setUp(self): - self.working_dir = os.getcwd() - self.test_dir = os.path.realpath(tempfile.mkdtemp()) - os.chdir(self.test_dir) - - def tearDown(self): - os.chdir(self.working_dir) - shutil.rmtree(self.test_dir) - - def test_tutorial(self): - """ - Run the TUTORIAL.md tutorial. - Note that anywhere the tutorial provides a command that prompts for the - user to enter a passphrase/password, this test is changed to simply provide - that as an argument. It's not worth trying to arrange automated testing of - the interactive password entry process here. Anywhere user entry has been - skipped from the tutorial instructions, "# Skipping user entry of password" - is written, with the original line below it, starting with ##. - """ - - # ----- Tutorial Section: Keys - - generate_and_write_rsa_keypair(password='password', filepath='root_key', bits=2048) - - # Skipping user entry of password - ## generate_and_write_rsa_keypair_with_prompt('root_key2') - generate_and_write_rsa_keypair(password='password', filepath='root_key2') - - # Tutorial tells users to expect these files to exist: - # ['root_key', 'root_key.pub', 'root_key2', 'root_key2.pub'] - for fname in ['root_key', 'root_key.pub', 'root_key2', 'root_key2.pub']: - self.assertTrue(os.path.exists(fname)) - - # Generate key pair at /path/to/KEYID - fname = generate_and_write_rsa_keypair(password="password") - self.assertTrue(os.path.exists(fname)) - - - # ----- Tutorial Section: Import RSA Keys - - public_root_key = import_rsa_publickey_from_file('root_key.pub') - - # Skipping user entry of password - ## private_root_key = import_rsa_privatekey_from_file('root_key') - private_root_key = import_rsa_privatekey_from_file('root_key', 'password') - - # Skipping user entry of password - ## import_rsa_privatekey_from_file('root_key') - with self.assertRaises(securesystemslib.exceptions.CryptoError): - import_rsa_privatekey_from_file('root_key', 'not_the_real_pw') - - - - # ----- Tutorial Section: Create and Import Ed25519 Keys - - # Skipping user entry of password - ## generate_and_write_ed25519_keypair_with_prompt('ed25519_key') - generate_and_write_ed25519_keypair(password='password', filepath='ed25519_key') - - public_ed25519_key = import_ed25519_publickey_from_file('ed25519_key.pub') - - # Skipping user entry of password - ## private_ed25519_key = import_ed25519_privatekey_from_file('ed25519_key') - private_ed25519_key = import_ed25519_privatekey_from_file( - 'ed25519_key', 'password') - - - - # ----- Tutorial Section: Create Top-level Metadata - repository = create_new_repository('repository') - repository.root.add_verification_key(public_root_key) - self.assertTrue(repository.root.keys) - - public_root_key2 = import_rsa_publickey_from_file('root_key2.pub') - repository.root.add_verification_key(public_root_key2) - - repository.root.threshold = 2 - private_root_key2 = import_rsa_privatekey_from_file( - 'root_key2', password='password') - - repository.root.load_signing_key(private_root_key) - repository.root.load_signing_key(private_root_key2) - - # NOTE: The tutorial does not call dirty_roles anymore due to #964 and - # #958. We still call it here to see if roles are dirty as expected. - with mock.patch("tuf.repository_tool.logger") as mock_logger: - repository.dirty_roles() - # Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'') - mock_logger.info.assert_called_with("Dirty roles: " + str(['root'])) - - # Patch logger to assert that it accurately logs the repo's status. Since - # the logger is called multiple times, we have to assert for the accurate - # sequence of calls or rather its call arguments. - with mock.patch("tuf.repository_lib.logger") as mock_logger: - repository.status() - # Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'') - self.assertListEqual([ - repr('targets') + " role contains 0 / 1 public keys.", - repr('snapshot') + " role contains 0 / 1 public keys.", - repr('timestamp') + " role contains 0 / 1 public keys.", - repr('root') + " role contains 2 / 2 signatures.", - repr('targets') + " role contains 0 / 1 signatures." - ], [args[0] for args, _ in mock_logger.info.call_args_list]) - - generate_and_write_rsa_keypair(password='password', filepath='targets_key') - generate_and_write_rsa_keypair(password='password', filepath='snapshot_key') - generate_and_write_rsa_keypair(password='password', filepath='timestamp_key') - - repository.targets.add_verification_key(import_rsa_publickey_from_file( - 'targets_key.pub')) - repository.snapshot.add_verification_key(import_rsa_publickey_from_file( - 'snapshot_key.pub')) - repository.timestamp.add_verification_key(import_rsa_publickey_from_file( - 'timestamp_key.pub')) - - # Skipping user entry of password - ## private_targets_key = import_rsa_privatekey_from_file('targets_key') - private_targets_key = import_rsa_privatekey_from_file( - 'targets_key', 'password') - - # Skipping user entry of password - ## private_snapshot_key = import_rsa_privatekey_from_file('snapshot_key') - private_snapshot_key = import_rsa_privatekey_from_file( - 'snapshot_key', 'password') - - # Skipping user entry of password - ## private_timestamp_key = import_rsa_privatekey_from_file('timestamp_key') - private_timestamp_key = import_rsa_privatekey_from_file( - 'timestamp_key', 'password') - - repository.targets.load_signing_key(private_targets_key) - repository.snapshot.load_signing_key(private_snapshot_key) - repository.timestamp.load_signing_key(private_timestamp_key) - - repository.timestamp.expiration = datetime.datetime(2080, 10, 28, 12, 8) - - # NOTE: The tutorial does not call dirty_roles anymore due to #964 and - # #958. We still call it here to see if roles are dirty as expected. - with mock.patch("tuf.repository_tool.logger") as mock_logger: - repository.dirty_roles() - # Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'') - mock_logger.info.assert_called_with("Dirty roles: " + - str(['root', 'snapshot', 'targets', 'timestamp'])) - - repository.writeall() - - - - # ----- Tutorial Section: Targets - # These next commands in the tutorial are shown as bash commands, so I'll - # just simulate this with some Python commands. - ## $ cd repository/targets/ - ## $ echo 'file1' > file1.txt - ## $ echo 'file2' > file2.txt - ## $ echo 'file3' > file3.txt - ## $ mkdir myproject; echo 'file4' > myproject/file4.txt - ## $ cd ../../ - - with open(os.path.join('repository', 'targets', 'file1.txt'), 'w') as fobj: - fobj.write('file1') - with open(os.path.join('repository', 'targets', 'file2.txt'), 'w') as fobj: - fobj.write('file2') - with open(os.path.join('repository', 'targets', 'file3.txt'), 'w') as fobj: - fobj.write('file3') - - os.mkdir(os.path.join('repository', 'targets', 'myproject')) - with open(os.path.join('repository', 'targets', 'myproject', 'file4.txt'), - 'w') as fobj: - fobj.write('file4') - - - repository = load_repository('repository') - - # TODO: replace the hard-coded list of targets with a helper - # method that returns a list of normalized relative target paths - list_of_targets = ['file1.txt', 'file2.txt', 'file3.txt'] - - repository.targets.add_targets(list_of_targets) - - self.assertTrue('file1.txt' in repository.targets.target_files) - self.assertTrue('file2.txt' in repository.targets.target_files) - self.assertTrue('file3.txt' in repository.targets.target_files) - - target4_filepath = 'myproject/file4.txt' - target4_abspath = os.path.abspath(os.path.join( - 'repository', 'targets', target4_filepath)) - octal_file_permissions = oct(os.stat(target4_abspath).st_mode)[4:] - custom_file_permissions = {'file_permissions': octal_file_permissions} - repository.targets.add_target(target4_filepath, custom_file_permissions) - # Note that target filepaths specified in the repo use '/' even on Windows. - # (This is important to make metadata platform-independent.) - self.assertTrue( - os.path.join(target4_filepath) in repository.targets.target_files) - - - # Skipping user entry of password - ## private_targets_key = import_rsa_privatekey_from_file('targets_key') - private_targets_key = import_rsa_privatekey_from_file( - 'targets_key', 'password') - repository.targets.load_signing_key(private_targets_key) - - # Skipping user entry of password - ## private_snapshot_key = import_rsa_privatekey_from_file('snapshot_key') - private_snapshot_key = import_rsa_privatekey_from_file( - 'snapshot_key', 'password') - repository.snapshot.load_signing_key(private_snapshot_key) - - # Skipping user entry of password - ## private_timestamp_key = import_rsa_privatekey_from_file('timestamp_key') - private_timestamp_key = import_rsa_privatekey_from_file( - 'timestamp_key', 'password') - repository.timestamp.load_signing_key(private_timestamp_key) - - # NOTE: The tutorial does not call dirty_roles anymore due to #964 and - # #958. We still call it here to see if roles are dirty as expected. - with mock.patch("tuf.repository_tool.logger") as mock_logger: - repository.dirty_roles() - # Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'') - mock_logger.info.assert_called_with( - "Dirty roles: " + str(['snapshot', 'targets', 'timestamp'])) - - repository.writeall() - - repository.targets.remove_target('myproject/file4.txt') - self.assertTrue(os.path.exists(os.path.join( - 'repository','targets', 'myproject', 'file4.txt'))) - - # NOTE: The tutorial does not call dirty_roles anymore due to #964 and - # #958. We still call it here to see if roles are dirty as expected. - with mock.patch("tuf.repository_tool.logger") as mock_logger: - repository.dirty_roles() - # Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'') - mock_logger.info.assert_called_with( - "Dirty roles: " + str(['targets'])) - - repository.mark_dirty(['snapshot', 'timestamp']) - repository.writeall() - - - # ----- Tutorial Section: Excursion: Dump Metadata and Append Signature - signable_content = dump_signable_metadata( - os.path.join('repository', 'metadata.staged', 'timestamp.json')) - - # Skipping user entry of password - ## private_ed25519_key = import_ed25519_privatekey_from_file('ed25519_key') - private_ed25519_key = import_ed25519_privatekey_from_file('ed25519_key', 'password') - signature = create_signature( - private_ed25519_key, encode_canonical(signable_content).encode()) - append_signature( - signature, - os.path.join('repository', 'metadata.staged', 'timestamp.json')) - - - - # ----- Tutorial Section: Delegations - generate_and_write_rsa_keypair( - password='password', filepath='unclaimed_key', bits=2048) - public_unclaimed_key = import_rsa_publickey_from_file('unclaimed_key.pub') - repository.targets.delegate( - 'unclaimed', [public_unclaimed_key], ['myproject/*.txt']) - - repository.targets("unclaimed").add_target("myproject/file4.txt") - - # Skipping user entry of password - ## private_unclaimed_key = import_rsa_privatekey_from_file('unclaimed_key') - private_unclaimed_key = import_rsa_privatekey_from_file( - 'unclaimed_key', 'password') - repository.targets("unclaimed").load_signing_key(private_unclaimed_key) - - # NOTE: The tutorial does not call dirty_roles anymore due to #964 and - # #958. We still call it here to see if roles are dirty as expected. - with mock.patch("tuf.repository_tool.logger") as mock_logger: - repository.dirty_roles() - # Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'') - mock_logger.info.assert_called_with( - "Dirty roles: " + str(['targets', 'unclaimed'])) - - repository.mark_dirty(["snapshot", "timestamp"]) - repository.writeall() - - - # Simulate the following shell command: - ## $ cp -r "repository/metadata.staged/" "repository/metadata/" - shutil.copytree( - os.path.join('repository', 'metadata.staged'), - os.path.join('repository', 'metadata')) - - - # ----- Tutorial Section: Delegate to Hashed Bins - repository.targets('unclaimed').remove_target("myproject/file4.txt") - - targets = ['myproject/file4.txt'] - - # Patch logger to assert that it accurately logs the output of hashed bin - # delegation. The logger is called multiple times, first with info level - # then with warning level. So we have to assert for the accurate sequence - # of calls or rather its call arguments. - with mock.patch("tuf.repository_tool.logger") as mock_logger: - repository.targets('unclaimed').delegate_hashed_bins( - targets, [public_unclaimed_key], 32) - - self.assertListEqual([ - "Creating hashed bin delegations.\n" - "1 total targets.\n" - "32 hashed bins.\n" - "256 total hash prefixes.\n" - "Each bin ranges over 8 hash prefixes." - ] + ["Adding a verification key that has already been used."] * 32, - [ - args[0] for args, _ in - mock_logger.info.call_args_list + mock_logger.warning.call_args_list - ]) - - - for delegation in repository.targets('unclaimed').delegations: - delegation.load_signing_key(private_unclaimed_key) - - # NOTE: The tutorial does not call dirty_roles anymore due to #964 and - # #958. We still call it here to see if roles are dirty as expected. - with mock.patch("tuf.repository_tool.logger") as mock_logger: - repository.dirty_roles() - # Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'') - mock_logger.info.assert_called_with( - "Dirty roles: " + str(['00-07', '08-0f', '10-17', '18-1f', '20-27', - '28-2f', '30-37', '38-3f', '40-47', '48-4f', '50-57', '58-5f', - '60-67', '68-6f', '70-77', '78-7f', '80-87', '88-8f', '90-97', - '98-9f', 'a0-a7', 'a8-af', 'b0-b7', 'b8-bf', 'c0-c7', 'c8-cf', - 'd0-d7', 'd8-df', 'e0-e7', 'e8-ef', 'f0-f7', 'f8-ff', 'unclaimed'])) - - repository.mark_dirty(["snapshot", "timestamp"]) - repository.writeall() - - # ----- Tutorial Section: How to Perform an Update - - # A separate tutorial is linked to for client use. That is not tested here. - create_tuf_client_directory("repository/", "client/tufrepo/") - - - - # ----- Tutorial Section: Test TUF Locally - - # TODO: Run subprocess to simulate the following bash instructions: - - # $ cd "repository/"; python3 -m http.server 8001 - # We next retrieve targets from the TUF repository and save them to client/. The client.py script is available to download metadata and files from a specified repository. In a different command-line prompt . . . - - # $ cd "client/" - # $ ls - # metadata/ - - # $ client.py --repo http://localhost:8001 file1.txt - # $ ls . targets/ - # .: - # metadata targets - - # targets/: - # file1.txt - - - -# Run unit test. -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_unittest_toolbox_old.py b/tests/test_unittest_toolbox_old.py deleted file mode 100755 index d26d079286..0000000000 --- a/tests/test_unittest_toolbox_old.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_unittest_toolbox_old.py - - - Vladimir Diaz - - - July 14, 2017. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Test cases for unittest_toolbox.py. -""" - -import unittest -import logging -import shutil -import sys - -import tuf.unittest_toolbox as unittest_toolbox - -from tests import utils - -logger = logging.getLogger(__name__) - - -class TestUnittestToolbox(unittest_toolbox.Modified_TestCase): - def setUp(self): - unittest_toolbox.Modified_TestCase.setUp(self) - - def tearDown(self): - unittest_toolbox.Modified_TestCase.tearDown(self) - - - def test_tear_down_already_deleted_dir(self): - temp_directory = self.make_temp_directory() - - # Delete the temp directory to make sure unittest_toolbox doesn't - # complain about the missing temp_directory. - shutil.rmtree(temp_directory) - - -# Run the unit tests. -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_updater_ng.py b/tests/test_updater_ng.py index 2e0d41b21c..6c3da0f91d 100644 --- a/tests/test_updater_ng.py +++ b/tests/test_updater_ng.py @@ -19,7 +19,7 @@ from securesystemslib.signer import SSlibSigner from tests import utils -from tuf import ngclient, unittest_toolbox +from tuf import ngclient from tuf.api import exceptions from tuf.api.metadata import ( Metadata, @@ -33,20 +33,14 @@ logger = logging.getLogger(__name__) -class TestUpdater(unittest_toolbox.Modified_TestCase): +class TestUpdater(unittest.TestCase): """Test the Updater class from 'tuf/ngclient/updater.py'.""" - temporary_directory: ClassVar[str] + # pylint: disable=too-many-instance-attributes server_process_handler: ClassVar[utils.TestServerProcess] @classmethod def setUpClass(cls) -> None: - # Create a temporary directory to store the repository, metadata, and - # target files. 'temporary_directory' must be deleted in - # TearDownModule() so that temporary files are always removed, even when - # exceptions occur. - cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) - # Needed because in some tests simple_server.py cannot be found. # The reason is that the current working directory # has been changed when executing a subprocess. @@ -65,21 +59,13 @@ def tearDownClass(cls) -> None: # Cleans the resources and flush the logged lines (if any). cls.server_process_handler.clean() - # Remove the temporary repository directory, which should contain all - # the metadata, targets, and key files generated for the test cases - shutil.rmtree(cls.temporary_directory) - def setUp(self) -> None: - # We are inheriting from custom class. - unittest_toolbox.Modified_TestCase.setUp(self) + self.tmp_test_dir = tempfile.mkdtemp(dir=os.getcwd()) # Copy the original repository files provided in the test folder so that # any modifications are restricted to the copies. # The 'repository_data' directory is expected to exist in 'tuf.tests/'. original_repository_files = os.path.join(os.getcwd(), "repository_data") - temporary_repository_root = self.make_temp_directory( - directory=self.temporary_directory - ) # The original repository, keystore, and client directories will be # copied for each test case. @@ -98,15 +84,11 @@ def setUp(self) -> None: # Save references to the often-needed client repository directories. # Test cases need these references to access metadata and target files. self.repository_directory = os.path.join( - temporary_repository_root, "repository" - ) - self.keystore_directory = os.path.join( - temporary_repository_root, "keystore" + self.tmp_test_dir, "repository" ) + self.keystore_directory = os.path.join(self.tmp_test_dir, "keystore") - self.client_directory = os.path.join( - temporary_repository_root, "client" - ) + self.client_directory = os.path.join(self.tmp_test_dir, "client") # Copy the original 'repository', 'client', and 'keystore' directories # to the temporary repository the test cases can use. @@ -126,7 +108,7 @@ def setUp(self) -> None: self.metadata_url = f"{url_prefix}/metadata/" self.targets_url = f"{url_prefix}/targets/" - self.dl_dir = self.make_temp_directory() + self.dl_dir = tempfile.mkdtemp(dir=self.tmp_test_dir) # Creating a repository instance. The test cases will use this client # updater to refresh metadata, fetch target files, etc. self.updater = ngclient.Updater( @@ -137,8 +119,7 @@ def setUp(self) -> None: ) def tearDown(self) -> None: - # We are inheriting from custom class. - unittest_toolbox.Modified_TestCase.tearDown(self) + shutil.rmtree(self.tmp_test_dir) # Logs stdout and stderr from the sever subprocess. self.server_process_handler.flush_log() diff --git a/tests/test_updater_old.py b/tests/test_updater_old.py deleted file mode 100755 index f2148855d7..0000000000 --- a/tests/test_updater_old.py +++ /dev/null @@ -1,2138 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_updater_old.py - - - Konstantin Andrianov. - - - October 15, 2012. - - March 11, 2014. - Refactored to remove mocked modules and old repository tool dependence, use - exact repositories, and add realistic retrieval of files. -vladimir.v.diaz - - - See LICENSE-MIT OR LICENSE for licensing information. - - - 'test_updater.py_old' provides a collection of methods that test the public / - non-public methods and functions of 'tuf.client.updater.py'. - - The 'unittest_toolbox.py' module was created to provide additional testing - tools, such as automatically deleting temporary files created in test cases. - For more information, see 'tests/unittest_toolbox.py'. - - - Test cases here should follow a specific order (i.e., independent methods are - tested before dependent methods). More accurately, least dependent methods - are tested before most dependent methods. There is no reason to rewrite or - construct other methods that replicate already-tested methods solely for - testing purposes. This is possible because the 'unittest.TestCase' class - guarantees the order of unit tests. The 'test_something_A' method would - be tested before 'test_something_B'. To ensure the expected order of tests, - a number is placed after 'test' and before methods name like so: - 'test_1_check_directory'. The number is a measure of dependence, where 1 is - less dependent than 2. -""" - -import os -import time -import shutil -import copy -import tempfile -import logging -import errno -import sys -import unittest -import json -import unittest.mock as mock - -import tuf -import tuf.exceptions -import tuf.log -import tuf.formats -import tuf.keydb -import tuf.roledb -import tuf.repository_tool as repo_tool -import tuf.repository_lib as repo_lib -import tuf.unittest_toolbox as unittest_toolbox -import tuf.client.updater as updater - -from tests import utils - -import securesystemslib - -logger = logging.getLogger(__name__) -repo_tool.disable_console_log_messages() - - -class TestUpdater(unittest_toolbox.Modified_TestCase): - - @classmethod - def setUpClass(cls): - # Create a temporary directory to store the repository, metadata, and target - # files. 'temporary_directory' must be deleted in TearDownModule() so that - # temporary files are always removed, even when exceptions occur. - cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) - - # Needed because in some tests simple_server.py cannot be found. - # The reason is that the current working directory - # has been changed when executing a subprocess. - cls.SIMPLE_SERVER_PATH = os.path.join(os.getcwd(), 'simple_server.py') - - # Launch a SimpleHTTPServer (serves files in the current directory). - # Test cases will request metadata and target files that have been - # pre-generated in 'tuf/tests/repository_data', which will be served - # by the SimpleHTTPServer launched here. The test cases of 'test_updater_old.py' - # assume the pre-generated metadata files have a specific structure, such - # as a delegated role 'targets/role1', three target files, five key files, - # etc. - cls.server_process_handler = utils.TestServerProcess(log=logger, - server=cls.SIMPLE_SERVER_PATH) - - - - @classmethod - def tearDownClass(cls): - # Cleans the resources and flush the logged lines (if any). - cls.server_process_handler.clean() - - # Remove the temporary repository directory, which should contain all the - # metadata, targets, and key files generated for the test cases - shutil.rmtree(cls.temporary_directory) - - - - def setUp(self): - # We are inheriting from custom class. - unittest_toolbox.Modified_TestCase.setUp(self) - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - self.repository_name = 'test_repository1' - - # Copy the original repository files provided in the test folder so that - # any modifications made to repository files are restricted to the copies. - # The 'repository_data' directory is expected to exist in 'tuf.tests/'. - original_repository_files = os.path.join(os.getcwd(), 'repository_data') - temporary_repository_root = \ - self.make_temp_directory(directory=self.temporary_directory) - - # The original repository, keystore, and client directories will be copied - # for each test case. - original_repository = os.path.join(original_repository_files, 'repository') - original_keystore = os.path.join(original_repository_files, 'keystore') - original_client = os.path.join(original_repository_files, 'client') - - # Save references to the often-needed client repository directories. - # Test cases need these references to access metadata and target files. - self.repository_directory = \ - os.path.join(temporary_repository_root, 'repository') - self.keystore_directory = \ - os.path.join(temporary_repository_root, 'keystore') - - self.client_directory = os.path.join(temporary_repository_root, - 'client') - self.client_metadata = os.path.join(self.client_directory, - self.repository_name, 'metadata') - self.client_metadata_current = os.path.join(self.client_metadata, - 'current') - self.client_metadata_previous = os.path.join(self.client_metadata, - 'previous') - - # Copy the original 'repository', 'client', and 'keystore' directories - # to the temporary repository the test cases can use. - shutil.copytree(original_repository, self.repository_directory) - shutil.copytree(original_client, self.client_directory) - shutil.copytree(original_keystore, self.keystore_directory) - - # 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'. - repository_basepath = self.repository_directory[len(os.getcwd()):] - url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + repository_basepath - - # Setting 'tuf.settings.repository_directory' with the temporary client - # directory copied from the original repository files. - tuf.settings.repositories_directory = self.client_directory - - self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, - 'metadata_path': 'metadata', - 'targets_path': 'targets'}} - - # Creating a repository instance. The test cases will use this client - # updater to refresh metadata, fetch target files, etc. - self.repository_updater = updater.Updater(self.repository_name, - self.repository_mirrors) - - # Metadata role keys are needed by the test cases to make changes to the - # repository (e.g., adding a new target file to 'targets.json' and then - # requesting a refresh()). - self.role_keys = _load_role_keys(self.keystore_directory) - - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - # Logs stdout and stderr from the sever subprocess. - self.server_process_handler.flush_log() - - # Remove temporary directory - unittest_toolbox.Modified_TestCase.tearDown(self) - - - # UNIT TESTS. - - def test_1__init__exceptions(self): - # The client's repository requires a metadata directory (and the 'current' - # and 'previous' sub-directories), and at least the 'root.json' file. - # setUp(), called before each test case, instantiates the required updater - # objects and keys. The needed objects/data is available in - # 'self.repository_updater', 'self.client_directory', etc. - - - # Test: Invalid arguments. - # Invalid 'updater_name' argument. String expected. - self.assertRaises(securesystemslib.exceptions.FormatError, updater.Updater, 8, - self.repository_mirrors) - - # Invalid 'repository_mirrors' argument. 'tuf.formats.MIRRORDICT_SCHEMA' - # expected. - self.assertRaises(securesystemslib.exceptions.FormatError, updater.Updater, updater.Updater, 8) - - - # 'tuf.client.updater.py' requires that the client's repositories directory - # be configured in 'tuf.settings.py'. - tuf.settings.repositories_directory = None - self.assertRaises(tuf.exceptions.RepositoryError, updater.Updater, 'test_repository1', - self.repository_mirrors) - # Restore 'tuf.settings.repositories_directory' to the original client - # directory. - tuf.settings.repositories_directory = self.client_directory - - # Test: repository does not exist - self.assertRaises(tuf.exceptions.MissingLocalRepositoryError, updater.Updater, - 'test_non_existing_repository', self.repository_mirrors) - - # Test: empty client repository (i.e., no metadata directory). - metadata_backup = self.client_metadata + '.backup' - shutil.move(self.client_metadata, metadata_backup) - self.assertRaises(tuf.exceptions.RepositoryError, updater.Updater, 'test_repository1', - self.repository_mirrors) - # Restore the client's metadata directory. - shutil.move(metadata_backup, self.client_metadata) - - - # Test: repository with only a '{repository_directory}/metadata' directory. - # (i.e., missing the required 'current' and 'previous' sub-directories). - current_backup = self.client_metadata_current + '.backup' - previous_backup = self.client_metadata_previous + '.backup' - - shutil.move(self.client_metadata_current, current_backup) - shutil.move(self.client_metadata_previous, previous_backup) - self.assertRaises(tuf.exceptions.RepositoryError, updater.Updater, 'test_repository1', - self.repository_mirrors) - - # Restore the client's previous directory. The required 'current' directory - # is still missing. - shutil.move(previous_backup, self.client_metadata_previous) - - # Test: repository with only a '{repository_directory}/metadata/previous' - # directory. - self.assertRaises(tuf.exceptions.RepositoryError, updater.Updater, 'test_repository1', - self.repository_mirrors) - # Restore the client's current directory. - shutil.move(current_backup, self.client_metadata_current) - - # Test: repository with a '{repository_directory}/metadata/current' - # directory, but the 'previous' directory is missing. - shutil.move(self.client_metadata_previous, previous_backup) - self.assertRaises(tuf.exceptions.RepositoryError, updater.Updater, 'test_repository1', - self.repository_mirrors) - shutil.move(previous_backup, self.client_metadata_previous) - - # Test: repository missing the required 'root.json' file. - client_root_file = os.path.join(self.client_metadata_current, 'root.json') - backup_root_file = client_root_file + '.backup' - shutil.move(client_root_file, backup_root_file) - self.assertRaises(tuf.exceptions.RepositoryError, updater.Updater, 'test_repository1', - self.repository_mirrors) - # Restore the client's 'root.json file. - shutil.move(backup_root_file, client_root_file) - - # Test: Normal 'tuf.client.updater.Updater' instantiation. - updater.Updater('test_repository1', self.repository_mirrors) - - - - - - def test_1__load_metadata_from_file(self): - - # Setup - # Get the 'role1.json' filepath. Manually load the role metadata, and - # compare it against the loaded metadata by '_load_metadata_from_file()'. - role1_filepath = \ - os.path.join(self.client_metadata_current, 'role1.json') - role1_meta = securesystemslib.util.load_json_file(role1_filepath) - - # Load the 'role1.json' file with _load_metadata_from_file, which should - # store the loaded metadata in the 'self.repository_updater.metadata' - # store. - self.assertEqual(len(self.repository_updater.metadata['current']), 4) - self.repository_updater._load_metadata_from_file('current', 'role1') - - # Verify that the correct number of metadata objects has been loaded - # (i.e., only the 'root.json' file should have been loaded. - self.assertEqual(len(self.repository_updater.metadata['current']), 5) - - # Verify that the content of root metadata is valid. - self.assertEqual(self.repository_updater.metadata['current']['role1'], - role1_meta['signed']) - - # Verify that _load_metadata_from_file() doesn't raise an exception for - # improperly formatted metadata, and doesn't load the bad file. - with open(role1_filepath, 'ab') as file_object: - file_object.write(b'bad JSON data') - - self.repository_updater._load_metadata_from_file('current', 'role1') - self.assertEqual(len(self.repository_updater.metadata['current']), 5) - - # Test if we fail gracefully if we can't deserialize a meta file - self.repository_updater._load_metadata_from_file('current', 'empty_file') - self.assertFalse('empty_file' in self.repository_updater.metadata['current']) - - # Test invalid metadata set argument (must be either - # 'current' or 'previous'.) - self.assertRaises(securesystemslib.exceptions.Error, - self.repository_updater._load_metadata_from_file, - 'bad_metadata_set', 'role1') - - - - - def test_1__rebuild_key_and_role_db(self): - # Setup - root_roleinfo = tuf.roledb.get_roleinfo('root', self.repository_name) - root_metadata = self.repository_updater.metadata['current']['root'] - root_threshold = root_metadata['roles']['root']['threshold'] - number_of_root_keys = len(root_metadata['keys']) - - self.assertEqual(root_roleinfo['threshold'], root_threshold) - - # Ensure we add 2 to the number of root keys (actually, the number of root - # keys multiplied by the number of keyid hash algorithms), to include the - # delegated targets key (+1 for its sha512 keyid). The delegated roles of - # 'targets.json' are also loaded when the repository object is - # instantiated. - - self.assertEqual(number_of_root_keys + 1, len(tuf.keydb._keydb_dict[self.repository_name])) - - # Test: normal case. - self.repository_updater._rebuild_key_and_role_db() - - root_roleinfo = tuf.roledb.get_roleinfo('root', self.repository_name) - self.assertEqual(root_roleinfo['threshold'], root_threshold) - - # _rebuild_key_and_role_db() will only rebuild the keys and roles specified - # in the 'root.json' file, unlike __init__(). Instantiating an updater - # object calls both _rebuild_key_and_role_db() and _import_delegations(). - self.assertEqual(number_of_root_keys, len(tuf.keydb._keydb_dict[self.repository_name])) - - # Test: properly updated roledb and keydb dicts if the Root role changes. - root_metadata = self.repository_updater.metadata['current']['root'] - root_metadata['roles']['root']['threshold'] = 8 - root_metadata['keys'].popitem() - - self.repository_updater._rebuild_key_and_role_db() - - root_roleinfo = tuf.roledb.get_roleinfo('root', self.repository_name) - self.assertEqual(root_roleinfo['threshold'], 8) - self.assertEqual(number_of_root_keys - 1, len(tuf.keydb._keydb_dict[self.repository_name])) - - - - - def test_1__update_versioninfo(self): - # Tests - # Verify that the 'self.versioninfo' dictionary is empty (it starts off - # empty and is only populated if _update_versioninfo() is called. - versioninfo_dict = self.repository_updater.versioninfo - self.assertEqual(len(versioninfo_dict), 0) - - # Load the versioninfo of the top-level Targets role. This action - # populates the 'self.versioninfo' dictionary. - self.repository_updater._update_versioninfo('targets.json') - self.assertEqual(len(versioninfo_dict), 1) - self.assertTrue(tuf.formats.FILEINFODICT_SCHEMA.matches(versioninfo_dict)) - - # The Snapshot role stores the version numbers of all the roles available - # on the repository. Load Snapshot to extract Root's version number - # and compare it against the one loaded by 'self.repository_updater'. - snapshot_filepath = os.path.join(self.client_metadata_current, 'snapshot.json') - snapshot_signable = securesystemslib.util.load_json_file(snapshot_filepath) - targets_versioninfo = snapshot_signable['signed']['meta']['targets.json'] - - # Verify that the manually loaded version number of root.json matches - # the one loaded by the updater object. - self.assertTrue('targets.json' in versioninfo_dict) - self.assertEqual(versioninfo_dict['targets.json'], targets_versioninfo) - - # Verify that 'self.versioninfo' is incremented if another role is updated. - self.repository_updater._update_versioninfo('role1.json') - self.assertEqual(len(versioninfo_dict), 2) - - # Verify that 'self.versioninfo' is incremented if a non-existent role is - # requested, and has its versioninfo entry set to 'None'. - self.repository_updater._update_versioninfo('bad_role.json') - self.assertEqual(len(versioninfo_dict), 3) - self.assertEqual(versioninfo_dict['bad_role.json'], None) - - # Verify that the versioninfo specified in Timestamp is used if the Snapshot - # role hasn't been downloaded yet. - del self.repository_updater.metadata['current']['snapshot'] - #self.assertRaises(self.repository_updater._update_versioninfo('snapshot.json')) - self.repository_updater._update_versioninfo('snapshot.json') - self.assertEqual(versioninfo_dict['snapshot.json']['version'], 1) - - - - def test_1__refresh_must_not_count_duplicate_keyids_towards_threshold(self): - # Update root threshold on the server repository and sign twice with 1 key - repository = repo_tool.load_repository(self.repository_directory) - repository.root.threshold = 2 - repository.root.load_signing_key(self.role_keys['root']['private']) - - storage_backend = securesystemslib.storage.FilesystemBackend() - # The client uses the threshold from the previous root file to verify the - # new root. Thus we need to make two updates so that the threshold used for - # verification becomes 2. I.e. we bump the version, sign twice with the - # same key and write to disk '2.root.json' and '3.root.json'. - for version in [2, 3]: - repository.root.version = version - info = tuf.roledb.get_roleinfo("root") - metadata = repo_lib.generate_root_metadata( - info["version"], info["expires"], False) - signed_metadata = repo_lib.sign_metadata( - metadata, info["keyids"], "root.json", "default") - signed_metadata["signatures"].append(signed_metadata["signatures"][0]) - live_root_path = os.path.join( - self.repository_directory, "metadata", "root.json") - - # Bypass server side verification in 'write' or 'writeall', which would - # catch the unmet threshold. - # We also skip writing to 'metadata.staged' and copying to 'metadata' and - # instead write directly to 'metadata' - repo_lib.write_metadata_file(signed_metadata, live_root_path, - info["version"], True, storage_backend) - - - # Update from current '1.root.json' to '3.root.json' on client and assert - # raise of 'BadSignatureError' (caused by unmet signature threshold). - try: - self.repository_updater.refresh() - - except tuf.exceptions.NoWorkingMirrorError as e: - mirror_errors = list(e.mirror_errors.values()) - self.assertTrue(len(mirror_errors) == 1) - self.assertTrue( - isinstance(mirror_errors[0], - securesystemslib.exceptions.BadSignatureError)) - self.assertEqual( - str(mirror_errors[0]), - repr("root") + " metadata has bad signature.") - - else: - self.fail( - "Expected a NoWorkingMirrorError composed of one BadSignatureError") - - - def test_2__import_delegations(self): - # Setup. - # In order to test '_import_delegations' the parent of the delegation - # has to be in Repository.metadata['current'], but it has to be inserted - # there without using '_load_metadata_from_file()' since it calls - # '_import_delegations()'. - repository_name = self.repository_updater.repository_name - tuf.keydb.clear_keydb(repository_name) - tuf.roledb.clear_roledb(repository_name) - - self.assertEqual(len(tuf.roledb._roledb_dict[repository_name]), 0) - self.assertEqual(len(tuf.keydb._keydb_dict[repository_name]), 0) - - self.repository_updater._rebuild_key_and_role_db() - - self.assertEqual(len(tuf.roledb._roledb_dict[repository_name]), 4) - - # Take into account the number of keyids algorithms supported by default, - # which this test condition expects to be two (sha256 and sha512). - self.assertEqual(4, len(tuf.keydb._keydb_dict[repository_name])) - - # Test: pass a role without delegations. - self.repository_updater._import_delegations('root') - - # Verify that there was no change to the roledb and keydb dictionaries by - # checking the number of elements in the dictionaries. - self.assertEqual(len(tuf.roledb._roledb_dict[repository_name]), 4) - # Take into account the number of keyid hash algorithms, which this - # test condition expects to be one - self.assertEqual(len(tuf.keydb._keydb_dict[repository_name]), 4) - - # Test: normal case, first level delegation. - self.repository_updater._import_delegations('targets') - - self.assertEqual(len(tuf.roledb._roledb_dict[repository_name]), 5) - # The number of root keys (times the number of key hash algorithms) + - # delegation's key (+1 for its sha512 keyid). - self.assertEqual(len(tuf.keydb._keydb_dict[repository_name]), 4 + 1) - - # Verify that roledb dictionary was added. - self.assertTrue('role1' in tuf.roledb._roledb_dict[repository_name]) - - # Verify that keydb dictionary was updated. - role1_signable = \ - securesystemslib.util.load_json_file(os.path.join(self.client_metadata_current, - 'role1.json')) - keyids = [] - for signature in role1_signable['signatures']: - keyids.append(signature['keyid']) - - for keyid in keyids: - self.assertTrue(keyid in tuf.keydb._keydb_dict[repository_name]) - - # Verify that _import_delegations() ignores invalid keytypes in the 'keys' - # field of parent role's 'delegations'. - existing_keyid = keyids[0] - - self.repository_updater.metadata['current']['targets']\ - ['delegations']['keys'][existing_keyid]['keytype'] = 'bad_keytype' - self.repository_updater._import_delegations('targets') - - # Restore the keytype of 'existing_keyid'. - self.repository_updater.metadata['current']['targets']\ - ['delegations']['keys'][existing_keyid]['keytype'] = 'ed25519' - - # Verify that _import_delegations() raises an exception if one of the - # delegated keys is malformed. - valid_keyval = self.repository_updater.metadata['current']['targets']\ - ['delegations']['keys'][existing_keyid]['keyval'] - - self.repository_updater.metadata['current']['targets']\ - ['delegations']['keys'][existing_keyid]['keyval'] = 1 - self.assertRaises(securesystemslib.exceptions.FormatError, self.repository_updater._import_delegations, 'targets') - - self.repository_updater.metadata['current']['targets']\ - ['delegations']['keys'][existing_keyid]['keyval'] = valid_keyval - - # Verify that _import_delegations() raises an exception if one of the - # delegated roles is malformed. - self.repository_updater.metadata['current']['targets']\ - ['delegations']['roles'][0]['name'] = 1 - self.assertRaises(securesystemslib.exceptions.FormatError, self.repository_updater._import_delegations, 'targets') - - - - def test_2__versioninfo_has_been_updated(self): - # Verify that the method returns 'False' if a versioninfo was not changed. - snapshot_filepath = os.path.join(self.client_metadata_current, 'snapshot.json') - snapshot_signable = securesystemslib.util.load_json_file(snapshot_filepath) - targets_versioninfo = snapshot_signable['signed']['meta']['targets.json'] - - self.assertFalse(self.repository_updater._versioninfo_has_been_updated('targets.json', - targets_versioninfo)) - - # Verify that the method returns 'True' if Root's version number changes. - targets_versioninfo['version'] = 8 - self.assertTrue(self.repository_updater._versioninfo_has_been_updated('targets.json', - targets_versioninfo)) - - - - - - def test_2__move_current_to_previous(self): - # Test case will consist of removing a metadata file from client's - # '{client_repository}/metadata/previous' directory, executing the method - # and then verifying that the 'previous' directory contains the snapshot - # file. - previous_snapshot_filepath = os.path.join(self.client_metadata_previous, - 'snapshot.json') - os.remove(previous_snapshot_filepath) - self.assertFalse(os.path.exists(previous_snapshot_filepath)) - - # Verify that the current 'snapshot.json' is moved to the previous directory. - self.repository_updater._move_current_to_previous('snapshot') - self.assertTrue(os.path.exists(previous_snapshot_filepath)) - - # assert that non-ascii alphanumeric role name "../ä" (that is url encoded - # in local filename) works - encoded_current = os.path.join( - self.client_metadata_current, '..%2F%C3%A4.json' - ) - encoded_previous = os.path.join( - self.client_metadata_previous, '..%2F%C3%A4.json' - ) - - with open(encoded_current, "w"): - pass - self.repository_updater._move_current_to_previous('../ä') - self.assertTrue(os.path.exists(encoded_previous)) - - - - - - def test_2__delete_metadata(self): - # This test will verify that 'root' metadata is never deleted. When a role - # is deleted verify that the file is not present in the - # 'self.repository_updater.metadata' dictionary. - self.repository_updater._delete_metadata('root') - self.assertTrue('root' in self.repository_updater.metadata['current']) - - self.repository_updater._delete_metadata('timestamp') - self.assertFalse('timestamp' in self.repository_updater.metadata['current']) - - - - - - def test_2__ensure_not_expired(self): - # This test condition will verify that nothing is raised when a metadata - # file has a future expiration date. - root_metadata = self.repository_updater.metadata['current']['root'] - self.repository_updater._ensure_not_expired(root_metadata, 'root') - - # Metadata with an expiration time in the future should, of course, not - # count as expired - expires = tuf.formats.unix_timestamp_to_datetime(int(time.time() + 10)) - expires = expires.isoformat() + 'Z' - root_metadata['expires'] = expires - self.assertTrue(tuf.formats.ROOT_SCHEMA.matches(root_metadata)) - self.repository_updater._ensure_not_expired(root_metadata, 'root') - - # Metadata that expires at the exact current time is considered expired - expire_time = int(time.time()) - expires = \ - tuf.formats.unix_timestamp_to_datetime(expire_time).isoformat()+'Z' - root_metadata['expires'] = expires - mock_time = mock.Mock() - mock_time.return_value = expire_time - self.assertTrue(tuf.formats.ROOT_SCHEMA.matches(root_metadata)) - with mock.patch('time.time', mock_time): - self.assertRaises(tuf.exceptions.ExpiredMetadataError, - self.repository_updater._ensure_not_expired, - root_metadata, 'root') - - # Metadata that expires in the past is considered expired - expires = tuf.formats.unix_timestamp_to_datetime(int(time.time() - 10)) - expires = expires.isoformat() + 'Z' - root_metadata['expires'] = expires - self.assertTrue(tuf.formats.ROOT_SCHEMA.matches(root_metadata)) - self.assertRaises(tuf.exceptions.ExpiredMetadataError, - self.repository_updater._ensure_not_expired, - root_metadata, 'root') - - - - - - def test_3__update_metadata(self): - # Setup - # _update_metadata() downloads, verifies, and installs the specified - # metadata role. Remove knowledge of currently installed metadata and - # verify that they are re-installed after calling _update_metadata(). - - # This is the default metadata that we would create for the timestamp role, - # because it has no signed metadata for itself. - DEFAULT_TIMESTAMP_FILELENGTH = tuf.settings.DEFAULT_TIMESTAMP_REQUIRED_LENGTH - - # This is the upper bound length for Targets metadata. - DEFAULT_TARGETS_FILELENGTH = tuf.settings.DEFAULT_TARGETS_REQUIRED_LENGTH - - # Save the versioninfo of 'targets.json,' needed later when re-installing - # with _update_metadata(). - targets_versioninfo = \ - self.repository_updater.metadata['current']['snapshot']['meta']\ - ['targets.json'] - - # Remove the currently installed metadata from the store and disk. Verify - # that the metadata dictionary is re-populated after calling - # _update_metadata(). - del self.repository_updater.metadata['current']['timestamp'] - del self.repository_updater.metadata['current']['targets'] - - timestamp_filepath = \ - os.path.join(self.client_metadata_current, 'timestamp.json') - targets_filepath = os.path.join(self.client_metadata_current, 'targets.json') - root_filepath = os.path.join(self.client_metadata_current, 'root.json') - os.remove(timestamp_filepath) - os.remove(targets_filepath) - - # Test: normal case. - # Verify 'timestamp.json' is properly installed. - self.assertFalse('timestamp' in self.repository_updater.metadata) - - logger.info('\nroleinfo: ' + repr(tuf.roledb.get_rolenames(self.repository_name))) - self.repository_updater._update_metadata('timestamp', - DEFAULT_TIMESTAMP_FILELENGTH) - self.assertTrue('timestamp' in self.repository_updater.metadata['current']) - os.path.exists(timestamp_filepath) - - # Verify 'targets.json' is properly installed. - self.assertFalse('targets' in self.repository_updater.metadata['current']) - self.repository_updater._update_metadata('targets', - DEFAULT_TARGETS_FILELENGTH, - targets_versioninfo['version']) - self.assertTrue('targets' in self.repository_updater.metadata['current']) - - targets_signable = securesystemslib.util.load_json_file(targets_filepath) - loaded_targets_version = targets_signable['signed']['version'] - self.assertEqual(targets_versioninfo['version'], loaded_targets_version) - - # Test: Invalid / untrusted version numbers. - # Invalid version number for 'targets.json'. - self.assertRaises(tuf.exceptions.NoWorkingMirrorError, - self.repository_updater._update_metadata, - 'targets', DEFAULT_TARGETS_FILELENGTH, 88) - - # Verify that the specific exception raised is correct for the previous - # case. - try: - self.repository_updater._update_metadata('targets', - DEFAULT_TARGETS_FILELENGTH, 88) - - except tuf.exceptions.NoWorkingMirrorError as e: - for mirror_error in e.mirror_errors.values(): - assert isinstance(mirror_error, tuf.exceptions.BadVersionNumberError) - - else: - self.fail( - 'Expected a NoWorkingMirrorError composed of BadVersionNumberErrors') - - # Verify that the specific exception raised is correct for the previous - # case. The version number is checked, so the specific error in - # this case should be 'tuf.exceptions.BadVersionNumberError'. - try: - self.repository_updater._update_metadata('targets', - DEFAULT_TARGETS_FILELENGTH, - 88) - - except tuf.exceptions.NoWorkingMirrorError as e: - for mirror_error in e.mirror_errors.values(): - assert isinstance(mirror_error, tuf.exceptions.BadVersionNumberError) - - else: - self.fail( - 'Expected a NoWorkingMirrorError composed of BadVersionNumberErrors') - - - - - - def test_3__get_metadata_file(self): - - ''' - This test focuses on making sure that the updater rejects unknown or - badly-formatted TUF specification version numbers.... - ''' - - # Make note of the correct supported TUF specification version. - correct_specification_version = tuf.SPECIFICATION_VERSION - - # Change it long enough to write new metadata. - tuf.SPECIFICATION_VERSION = '0.9.0' - - repository = repo_tool.load_repository(self.repository_directory) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - - # Change the supported TUF specification version back to what it should be - # so that we can parse the metadata and see that the spec version in the - # metadata does not match the code's expected spec version. - tuf.SPECIFICATION_VERSION = correct_specification_version - - upperbound_filelength = tuf.settings.DEFAULT_TIMESTAMP_REQUIRED_LENGTH - try: - self.repository_updater._get_metadata_file('timestamp', 'timestamp.json', - upperbound_filelength, 1) - - except tuf.exceptions.NoWorkingMirrorError as e: - # Note that this test provides a piece of metadata which would fail to - # be accepted -- with a different error -- if the specification version - # number were not a problem. - for mirror_error in e.mirror_errors.values(): - assert isinstance( - mirror_error, tuf.exceptions.UnsupportedSpecificationError) - - else: - self.fail( - 'Expected a failure to verify metadata when the metadata had a ' - 'specification version number that was unexpected. ' - 'No error was raised.') - - - - - - def test_3__update_metadata_if_changed(self): - # Setup. - # The client repository is initially loaded with only four top-level roles. - # Verify that the metadata store contains the metadata of only these four - # roles before updating the metadata of 'targets.json'. - self.assertEqual(len(self.repository_updater.metadata['current']), 4) - self.assertTrue('targets' in self.repository_updater.metadata['current']) - targets_path = os.path.join(self.client_metadata_current, 'targets.json') - self.assertTrue(os.path.exists(targets_path)) - self.assertEqual(self.repository_updater.metadata['current']['targets']['version'], 1) - - # Test: normal case. Update 'targets.json'. The version number should not - # change. - self.repository_updater._update_metadata_if_changed('targets') - - # Verify the current version of 'targets.json' has not changed. - self.assertEqual(self.repository_updater.metadata['current']['targets']['version'], 1) - - # Modify one target file on the remote repository. - repository = repo_tool.load_repository(self.repository_directory) - target3 = 'file3.txt' - - repository.targets.add_target(target3) - repository.root.version = repository.root.version + 1 - repository.root.load_signing_key(self.role_keys['root']['private']) - repository.targets.load_signing_key(self.role_keys['targets']['private']) - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Update 'targets.json' and verify that the client's current 'targets.json' - # has been updated. 'timestamp' and 'snapshot' must be manually updated - # so that new 'targets' can be recognized. - DEFAULT_TIMESTAMP_FILELENGTH = tuf.settings.DEFAULT_TIMESTAMP_REQUIRED_LENGTH - - self.repository_updater._update_metadata('timestamp', DEFAULT_TIMESTAMP_FILELENGTH) - self.repository_updater._update_metadata_if_changed('snapshot', 'timestamp') - self.repository_updater._update_metadata_if_changed('targets') - targets_path = os.path.join(self.client_metadata_current, 'targets.json') - self.assertTrue(os.path.exists(targets_path)) - self.assertTrue(self.repository_updater.metadata['current']['targets']) - self.assertEqual(self.repository_updater.metadata['current']['targets']['version'], 2) - - # Test for an invalid 'referenced_metadata' argument. - self.assertRaises(tuf.exceptions.RepositoryError, - self.repository_updater._update_metadata_if_changed, 'snapshot', 'bad_role') - - - - def test_3__targets_of_role(self): - # Setup. - # Extract the list of targets from 'targets.json', to be compared to what - # is returned by _targets_of_role('targets'). - targets_in_metadata = \ - self.repository_updater.metadata['current']['targets']['targets'] - - # Test: normal case. - targetinfos_list = self.repository_updater._targets_of_role('targets') - - # Verify that the list of targets was returned, and that it contains valid - # target files. - self.assertTrue(tuf.formats.TARGETINFOS_SCHEMA.matches(targetinfos_list)) - for targetinfo in targetinfos_list: - self.assertTrue((targetinfo['filepath'], targetinfo['fileinfo']) in targets_in_metadata.items()) - - - - - - def test_4_refresh(self): - # This unit test is based on adding an extra target file to the - # server and rebuilding all server-side metadata. All top-level metadata - # should be updated when the client calls refresh(). - - # First verify that an expired root metadata is updated. - expired_date = '1960-01-01T12:00:00Z' - self.repository_updater.metadata['current']['root']['expires'] = expired_date - self.repository_updater.refresh() - - # Second, verify that expired root metadata is not updated if - # 'unsafely_update_root_if_necessary' is explicitly set to 'False'. - expired_date = '1960-01-01T12:00:00Z' - self.repository_updater.metadata['current']['root']['expires'] = expired_date - self.assertRaises(tuf.exceptions.ExpiredMetadataError, - self.repository_updater.refresh, - unsafely_update_root_if_necessary=False) - - repository = repo_tool.load_repository(self.repository_directory) - target3 = 'file3.txt' - - repository.targets.add_target(target3) - repository.targets.load_signing_key(self.role_keys['targets']['private']) - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Reference 'self.Repository.metadata['current']['targets']'. Ensure - # 'target3' is not already specified. - targets_metadata = self.repository_updater.metadata['current']['targets'] - self.assertFalse(target3 in targets_metadata['targets']) - - # Verify the expected version numbers of the roles to be modified. - self.assertEqual(self.repository_updater.metadata['current']['targets']\ - ['version'], 1) - self.assertEqual(self.repository_updater.metadata['current']['snapshot']\ - ['version'], 1) - self.assertEqual(self.repository_updater.metadata['current']['timestamp']\ - ['version'], 1) - - # Test: normal case. 'targes.json' should now specify 'target3', and the - # following top-level metadata should have also been updated: - # 'snapshot.json' and 'timestamp.json'. - self.repository_updater.refresh() - - # Verify that the client's metadata was updated. - targets_metadata = self.repository_updater.metadata['current']['targets'] - self.assertTrue(target3 in targets_metadata['targets']) - - # Verify the expected version numbers of the updated roles. - self.assertEqual(self.repository_updater.metadata['current']['targets']\ - ['version'], 2) - self.assertEqual(self.repository_updater.metadata['current']['snapshot']\ - ['version'], 2) - self.assertEqual(self.repository_updater.metadata['current']['timestamp']\ - ['version'], 2) - - - - - - def test_4__refresh_targets_metadata(self): - # Setup. - # It is assumed that the client repository has only loaded the top-level - # metadata. Refresh the 'targets.json' metadata, including all delegated - # roles (i.e., the client should add the missing 'role1.json' metadata. - self.assertEqual(len(self.repository_updater.metadata['current']), 4) - - # Test: normal case. - self.repository_updater._refresh_targets_metadata(refresh_all_delegated_roles=True) - - # Verify that client's metadata files were refreshed successfully. - self.assertEqual(len(self.repository_updater.metadata['current']), 6) - - # Test for non-existing rolename. - self.repository_updater._refresh_targets_metadata('bad_rolename', - refresh_all_delegated_roles=False) - - # Test that non-json metadata in Snapshot is ignored. - self.repository_updater.metadata['current']['snapshot']['meta']['bad_role.xml'] = {} - self.repository_updater._refresh_targets_metadata(refresh_all_delegated_roles=True) - - - - def test_5_all_targets(self): - # Setup - # As with '_refresh_targets_metadata()', - - # Update top-level metadata before calling one of the "targets" methods, as - # recommended by 'updater.py'. - self.repository_updater.refresh() - - # Test: normal case. - with utils.ignore_deprecation_warnings('tuf.client.updater'): - all_targets = self.repository_updater.all_targets() - - # Verify format of 'all_targets', it should correspond to - # 'TARGETINFOS_SCHEMA'. - self.assertTrue(tuf.formats.TARGETINFOS_SCHEMA.matches(all_targets)) - - # Verify that there is a correct number of records in 'all_targets' list, - # and the expected filepaths specified in the metadata. On the targets - # directory of the repository, there should be 3 target files (2 of - # which are specified by 'targets.json'.) The delegated role 'role1' - # specifies 1 target file. The expected total number targets in - # 'all_targets' should be 3. - self.assertEqual(len(all_targets), 3) - - target_filepaths = [] - for target in all_targets: - target_filepaths.append(target['filepath']) - - self.assertTrue('file1.txt' in target_filepaths) - self.assertTrue('file2.txt' in target_filepaths) - self.assertTrue('file3.txt' in target_filepaths) - - - - - - def test_5_targets_of_role(self): - # Setup - # Remove knowledge of 'targets.json' from the metadata store. - self.repository_updater.metadata['current']['targets'] - - # Remove the metadata of the delegated roles. - #shutil.rmtree(os.path.join(self.client_metadata, 'targets')) - os.remove(os.path.join(self.client_metadata_current, 'targets.json')) - - # Extract the target files specified by the delegated role, 'role1.json', - # as available on the server-side version of the role. - role1_filepath = os.path.join(self.repository_directory, 'metadata', - 'role1.json') - role1_signable = securesystemslib.util.load_json_file(role1_filepath) - expected_targets = role1_signable['signed']['targets'] - - - # Test: normal case. - with utils.ignore_deprecation_warnings('tuf.client.updater'): - targetinfos = self.repository_updater.targets_of_role('role1') - - # Verify that the expected role files were downloaded and installed. - os.path.exists(os.path.join(self.client_metadata_current, 'targets.json')) - os.path.exists(os.path.join(self.client_metadata_current, 'targets', - 'role1.json')) - self.assertTrue('targets' in self.repository_updater.metadata['current']) - self.assertTrue('role1' in self.repository_updater.metadata['current']) - - # Verify that list of targets was returned and that it contains valid - # target files. - self.assertTrue(tuf.formats.TARGETINFOS_SCHEMA.matches(targetinfos)) - for targetinfo in targetinfos: - self.assertTrue((targetinfo['filepath'], targetinfo['fileinfo']) in expected_targets.items()) - - # Test: Invalid arguments. - # targets_of_role() expected a string rolename. - with utils.ignore_deprecation_warnings('tuf.client.updater'): - self.assertRaises(securesystemslib.exceptions.FormatError, self.repository_updater.targets_of_role, - 8) - self.assertRaises(tuf.exceptions.UnknownRoleError, self.repository_updater.targets_of_role, - 'unknown_rolename') - - - - - - def test_6_get_one_valid_targetinfo(self): - # Setup - # Unlike some of the other tests, start up a fresh server here. - # The SimpleHTTPServer started in the setupclass has a tendency to - # timeout in Windows after a few tests. - - # Creates a subprocess running a server. - server_process_handler = utils.TestServerProcess(log=logger, - server=self.SIMPLE_SERVER_PATH) - - # 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'. - repository_basepath = self.repository_directory[len(os.getcwd()):] - url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + repository_basepath - - self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, - 'metadata_path': 'metadata', 'targets_path': 'targets'}} - - # Creating a repository instance. The test cases will use this client - # updater to refresh metadata, fetch target files, etc. - self.repository_updater = updater.Updater(self.repository_name, - self.repository_mirrors) - - # Extract the file information of the targets specified in 'targets.json'. - self.repository_updater.refresh() - targets_metadata = self.repository_updater.metadata['current']['targets'] - - target_files = targets_metadata['targets'] - # Extract random target from 'target_files', which will be compared to what - # is returned by get_one_valid_targetinfo(). Restore the popped target - # (dict value stored in the metadata store) so that it can be found later. - filepath, fileinfo = target_files.popitem() - target_files[filepath] = fileinfo - - target_targetinfo = self.repository_updater.get_one_valid_targetinfo(filepath) - self.assertTrue(tuf.formats.TARGETINFO_SCHEMA.matches(target_targetinfo)) - self.assertEqual(target_targetinfo['filepath'], filepath) - self.assertEqual(target_targetinfo['fileinfo'], fileinfo) - - # Test: invalid target path. - self.assertRaises(tuf.exceptions.UnknownTargetError, - self.repository_updater.get_one_valid_targetinfo, - self.random_path().lstrip(os.sep).lstrip('/')) - - # Test updater.get_one_valid_targetinfo() backtracking behavior (enabled by - # default.) - targets_directory = os.path.join(self.repository_directory, 'targets') - os.makedirs(os.path.join(targets_directory, 'foo')) - - foo_package = 'foo/foo1.1.tar.gz' - foo_pattern = 'foo/foo*.tar.gz' - - foo_fullpath = os.path.join(targets_directory, foo_package) - with open(foo_fullpath, 'wb') as file_object: - file_object.write(b'new release') - - # Modify delegations on the remote repository to test backtracking behavior. - repository = repo_tool.load_repository(self.repository_directory) - - - repository.targets.delegate('role3', [self.role_keys['targets']['public']], - [foo_pattern]) - - repository.targets.delegate('role4', [self.role_keys['targets']['public']], - [foo_pattern], list_of_targets=[foo_package]) - repository.targets('role4').add_target(foo_package) - - repository.targets.load_signing_key(self.role_keys['targets']['private']) - repository.targets('role3').load_signing_key(self.role_keys['targets']['private']) - repository.targets('role4').load_signing_key(self.role_keys['targets']['private']) - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - - # updater.get_one_valid_targetinfo() should find 'foo1.1.tar.gz' by - # backtracking to 'role3'. 'role2' allows backtracking. - self.repository_updater.refresh() - self.repository_updater.get_one_valid_targetinfo('foo/foo1.1.tar.gz') - - # A leading path separator is disallowed. - self.assertRaises(tuf.exceptions.FormatError, - self.repository_updater.get_one_valid_targetinfo, '/foo/foo1.1.tar.gz') - - # Test when 'role2' does *not* allow backtracking. If 'foo/foo1.1.tar.gz' - # is not provided by the authoritative 'role2', - # updater.get_one_valid_targetinfo() should return a - # 'tuf.exceptions.UnknownTargetError' exception. - repository = repo_tool.load_repository(self.repository_directory) - - repository.targets.revoke('role3') - repository.targets.revoke('role4') - - # Ensure we delegate in trusted order (i.e., 'role2' has higher priority.) - repository.targets.delegate('role3', [self.role_keys['targets']['public']], - [foo_pattern], terminating=True, list_of_targets=[]) - - repository.targets.delegate('role4', [self.role_keys['targets']['public']], - [foo_pattern], list_of_targets=[foo_package]) - - repository.targets('role3').load_signing_key(self.role_keys['targets']['private']) - repository.targets('role4').load_signing_key(self.role_keys['targets']['private']) - repository.targets.load_signing_key(self.role_keys['targets']['private']) - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Verify that 'tuf.exceptions.UnknownTargetError' is raised by - # updater.get_one_valid_targetinfo(). - self.repository_updater.refresh() - self.assertRaises(tuf.exceptions.UnknownTargetError, - self.repository_updater.get_one_valid_targetinfo, - 'foo/foo1.1.tar.gz') - - # Verify that a 'tuf.exceptions.FormatError' is raised for delegated paths - # that contain a leading path separator. - self.assertRaises(tuf.exceptions.FormatError, - self.repository_updater.get_one_valid_targetinfo, - '/foo/foo1.1.tar.gz') - - # Cleans the resources and flush the logged lines (if any). - server_process_handler.clean() - - - - - def test_6_download_target(self): - # Create temporary directory (destination directory of downloaded targets) - # that will be passed as an argument to 'download_target()'. - destination_directory = self.make_temp_directory() - target_filepaths = \ - list(self.repository_updater.metadata['current']['targets']['targets'].keys()) - - # Test: normal case. - # Get the target info, which is an argument to 'download_target()'. - - # 'target_filepaths' is expected to have at least two targets. The first - # target will be used to test against download_target(). The second - # will be used to test against download_target() and a repository with - # 'consistent_snapshot' set to True. - target_filepath1 = target_filepaths.pop() - targetinfo = self.repository_updater.get_one_valid_targetinfo(target_filepath1) - self.repository_updater.download_target(targetinfo, - destination_directory) - - download_filepath = \ - os.path.join(destination_directory, target_filepath1.lstrip('/')) - self.assertTrue(os.path.exists(download_filepath)) - length, hashes = \ - securesystemslib.util.get_file_details(download_filepath, - securesystemslib.settings.HASH_ALGORITHMS) - download_targetfileinfo = tuf.formats.make_targets_fileinfo(length, hashes) - - # Add any 'custom' data from the repository's target fileinfo to the - # 'download_targetfileinfo' object being tested. - if 'custom' in targetinfo['fileinfo']: - download_targetfileinfo['custom'] = targetinfo['fileinfo']['custom'] - - self.assertEqual(targetinfo['fileinfo'], download_targetfileinfo) - - # Test when consistent snapshots is set. First, create a valid - # repository with consistent snapshots set (root.json contains a - # "consistent_snapshot" entry that the updater uses to correctly fetch - # snapshots. The updater expects the existence of - # '.filename' files if root.json sets 'consistent_snapshot - # = True'. - - # The repository must be rewritten with 'consistent_snapshot' set. - repository = repo_tool.load_repository(self.repository_directory) - - # Write metadata for all the top-level roles , since consistent snapshot - # is now being set to true (i.e., the pre-generated repository isn't set - # to support consistent snapshots. A new version of targets.json is needed - # to ensure .filename target files are written to disk. - repository.targets.load_signing_key(self.role_keys['targets']['private']) - repository.root.load_signing_key(self.role_keys['root']['private']) - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - - repository.writeall(consistent_snapshot=True) - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # And ensure the client has the latest top-level metadata. - self.repository_updater.refresh() - - target_filepath2 = target_filepaths.pop() - targetinfo2 = self.repository_updater.get_one_valid_targetinfo(target_filepath2) - self.repository_updater.download_target(targetinfo2, - destination_directory) - - # Checks if the file has been successfully downloaded - download_filepath = os.path.join(destination_directory, target_filepath2) - self.assertTrue(os.path.exists(download_filepath)) - - # Removes the file so that it can be downloaded again in the next test - os.remove(download_filepath) - - # Test downloading with consistent snapshot enabled, but without adding - # the hash of the file as a prefix to its name. - - file1_path = targetinfo2['filepath'] - file1_hashes = securesystemslib.util.get_file_hashes( - os.path.join(self.repository_directory, 'targets', file1_path), - hash_algorithms=['sha256', 'sha512']) - - # Currently in the repository directory, those three files exists: - # "file1.txt", ".file1.txt" and ".file1.txt" - # where both sha256 and sha512 hashes are for file file1.txt. - # Remove the files with the hash digest prefix to ensure that - # the served target file is not prefixed. - os.remove(os.path.join(self.repository_directory, 'targets', - file1_hashes['sha256'] + '.' + file1_path)) - os.remove(os.path.join(self.repository_directory, 'targets', - file1_hashes['sha512'] + '.' + file1_path)) - - - self.repository_updater.download_target(targetinfo2, - destination_directory, - prefix_filename_with_hash=False) - - # Checks if the file has been successfully downloaded - self.assertTrue(os.path.exists(download_filepath)) - - # Test for a destination that cannot be written to (apart from a target - # file that already exists at the destination) and which raises an - # exception. - bad_destination_directory = 'bad' * 2000 - - try: - self.repository_updater.download_target(targetinfo, bad_destination_directory) - - except OSError as e: - self.assertTrue( - e.errno in [errno.ENAMETOOLONG, errno.ENOENT, errno.EINVAL], - "wrong errno: " + str(e.errno)) - - else: - self.fail('No OSError raised') - - - # Test: Invalid arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, - self.repository_updater.download_target, - 8, destination_directory) - - self.assertRaises(securesystemslib.exceptions.FormatError, - self.repository_updater.download_target, - targetinfo, 8) - - # Test: - # Attempt a file download of a valid target, however, a download exception - # occurs because the target is not within the mirror's confined target - # directories. Adjust mirrors dictionary, so that 'confined_target_dirs' - # field contains at least one confined target and excludes needed target - # file. - mirrors = self.repository_updater.mirrors - for mirror_name, mirror_info in mirrors.items(): - mirrors[mirror_name]['confined_target_dirs'] = [self.random_path()] - - try: - self.repository_updater.download_target(targetinfo, - destination_directory) - - except tuf.exceptions.NoWorkingMirrorError as exception: - # Ensure that no mirrors were found due to mismatch in confined target - # directories. get_list_of_mirrors() returns an empty list in this case, - # which does not generate specific exception errors. - self.assertEqual(len(exception.mirror_errors), 0) - - else: - self.fail( - 'Expected a NoWorkingMirrorError with zero mirror errors in it.') - - - - - - def test_7_updated_targets(self): - # Verify that the list of targets returned by updated_targets() contains - # all the files that need to be updated, these files include modified and - # new target files. Also, confirm that files that need not to be updated - # are absent from the list. - # Setup - - # Unlike some of the other tests, start up a fresh server here. - # The SimpleHTTPServer started in the setupclass has a tendency to - # timeout in Windows after a few tests. - - # Creates a subprocess running a server. - server_process_handler = utils.TestServerProcess(log=logger, - server=self.SIMPLE_SERVER_PATH) - - # 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'. - repository_basepath = self.repository_directory[len(os.getcwd()):] - url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + repository_basepath - - # Setting 'tuf.settings.repository_directory' with the temporary client - # directory copied from the original repository files. - tuf.settings.repositories_directory = self.client_directory - - self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, - 'metadata_path': 'metadata', 'targets_path': 'targets'}} - - # Creating a repository instance. The test cases will use this client - # updater to refresh metadata, fetch target files, etc. - self.repository_updater = updater.Updater(self.repository_name, - self.repository_mirrors) - - # Create temporary directory which will hold client's target files. - destination_directory = self.make_temp_directory() - - # Get the list of target files. It will be used as an argument to the - # 'updated_targets()' function. - with utils.ignore_deprecation_warnings('tuf.client.updater'): - all_targets = self.repository_updater.all_targets() - - # Test for duplicates and targets in the root directory of the repository. - additional_target = all_targets[0].copy() - all_targets.append(additional_target) - additional_target_in_root_directory = additional_target.copy() - additional_target_in_root_directory['filepath'] = 'file1.txt' - all_targets.append(additional_target_in_root_directory) - - # At this point client needs to update and download all targets. - # Test: normal cases. - updated_targets = \ - self.repository_updater.updated_targets(all_targets, destination_directory) - - with utils.ignore_deprecation_warnings('tuf.client.updater'): - all_targets = self.repository_updater.all_targets() - - # Assumed the pre-generated repository specifies two target files in - # 'targets.json' and one delegated target file in 'role1.json'. - self.assertEqual(len(updated_targets), 3) - - # Test: download one of the targets. - download_target = copy.deepcopy(updated_targets).pop() - self.repository_updater.download_target(download_target, - destination_directory) - - updated_targets = \ - self.repository_updater.updated_targets(all_targets, destination_directory) - - self.assertEqual(len(updated_targets), 2) - - # Test: download all the targets. - for download_target in all_targets: - self.repository_updater.download_target(download_target, - destination_directory) - updated_targets = \ - self.repository_updater.updated_targets(all_targets, destination_directory) - - self.assertEqual(len(updated_targets), 0) - - - # Test: Invalid arguments. - self.assertRaises(securesystemslib.exceptions.FormatError, - self.repository_updater.updated_targets, - 8, destination_directory) - - self.assertRaises(securesystemslib.exceptions.FormatError, - self.repository_updater.updated_targets, - all_targets, 8) - - # Modify one target file on the remote repository. - repository = repo_tool.load_repository(self.repository_directory) - - target1 = os.path.join(self.repository_directory, 'targets', 'file1.txt') - repository.targets.remove_target(os.path.basename(target1)) - - length, hashes = securesystemslib.util.get_file_details(target1) - - repository.targets.add_target(os.path.basename(target1)) - repository.targets.load_signing_key(self.role_keys['targets']['private']) - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - - with open(target1, 'ab') as file_object: - file_object.write(b'append extra text') - - length, hashes = securesystemslib.util.get_file_details(target1) - - repository.targets.add_target(os.path.basename(target1)) - repository.targets.load_signing_key(self.role_keys['targets']['private']) - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Ensure the client has up-to-date metadata. - self.repository_updater.refresh() - - # Verify that the new target file is considered updated. - with utils.ignore_deprecation_warnings('tuf.client.updater'): - all_targets = self.repository_updater.all_targets() - updated_targets = \ - self.repository_updater.updated_targets(all_targets, destination_directory) - self.assertEqual(len(updated_targets), 1) - - # Cleans the resources and flush the logged lines (if any). - server_process_handler.clean() - - - - - def test_8_remove_obsolete_targets(self): - # Setup. - # Unlike some of the other tests, start up a fresh server here. - # The SimpleHTTPServer started in the setupclass has a tendency to - # timeout in Windows after a few tests. - - # Creates a subprocess running a server. - server_process_handler = utils.TestServerProcess(log=logger, - server=self.SIMPLE_SERVER_PATH) - - # 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'. - repository_basepath = self.repository_directory[len(os.getcwd()):] - url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + repository_basepath - - # Setting 'tuf.settings.repository_directory' with the temporary client - # directory copied from the original repository files. - tuf.settings.repositories_directory = self.client_directory - - self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, - 'metadata_path': 'metadata', 'targets_path': 'targets'}} - - # Creating a repository instance. The test cases will use this client - # updater to refresh metadata, fetch target files, etc. - self.repository_updater = updater.Updater(self.repository_name, - self.repository_mirrors) - - # Create temporary directory that will hold the client's target files. - destination_directory = self.make_temp_directory() - - # Populate 'destination_direction' with all target files. - with utils.ignore_deprecation_warnings('tuf.client.updater'): - all_targets = self.repository_updater.all_targets() - - self.assertEqual(len(os.listdir(destination_directory)), 0) - - for target in all_targets: - self.repository_updater.download_target(target, destination_directory) - - self.assertEqual(len(os.listdir(destination_directory)), 3) - - # Remove two target files from the server's repository. - repository = repo_tool.load_repository(self.repository_directory) - target1 = os.path.join(self.repository_directory, 'targets', 'file1.txt') - repository.targets.remove_target(os.path.basename(target1)) - - repository.targets.load_signing_key(self.role_keys['targets']['private']) - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Update client's metadata. - self.repository_updater.refresh() - - # Test: normal case. - # Verify number of target files in 'destination_directory' (should be 1 - # after the update made to the remote repository), and call - # 'remove_obsolete_targets()'. - with utils.ignore_deprecation_warnings('tuf.client.updater'): - all_targets = self.repository_updater.all_targets() - - updated_targets = \ - self.repository_updater.updated_targets(all_targets, - destination_directory) - - for updated_target in updated_targets: - self.repository_updater.download_target(updated_target, - destination_directory) - - self.assertEqual(len(os.listdir(destination_directory)), 3) - self.repository_updater.remove_obsolete_targets(destination_directory) - self.assertEqual(len(os.listdir(destination_directory)), 2) - - # Verify that, if there are no obsolete files, the number of files - # in 'destination_directory' remains the same. - self.repository_updater.remove_obsolete_targets(destination_directory) - self.assertEqual(len(os.listdir(destination_directory)), 2) - - # Test coverage for a destination path that causes an exception not due - # to an already removed target. - bad_destination_directory = 'bad' * 2000 - self.repository_updater.remove_obsolete_targets(bad_destination_directory) - - # Test coverage for a target that is not specified in current metadata. - del self.repository_updater.metadata['current']['targets']['targets']['file2.txt'] - self.repository_updater.remove_obsolete_targets(destination_directory) - - # Test coverage for a role that doesn't exist in the previously trusted set - # of metadata. - del self.repository_updater.metadata['previous']['targets'] - self.repository_updater.remove_obsolete_targets(destination_directory) - - # Cleans the resources and flush the logged lines (if any). - server_process_handler.clean() - - - - def test_9__get_target_hash(self): - # Test normal case. - # Test target filepaths with ascii and non-ascii characters. - expected_target_hashes = { - '/file1.txt': 'e3a3d89eb3b70ce3fbce6017d7b8c12d4abd5635427a0e8a238f53157df85b3d', - '/Jalape\xc3\xb1o': '78bfd5c314680545eb48ecad508aceb861f8d6e680f4fe1b791da45c298cda88' - } - for filepath, target_hash in expected_target_hashes.items(): - self.assertTrue(tuf.formats.RELPATH_SCHEMA.matches(filepath)) - self.assertTrue(securesystemslib.formats.HASH_SCHEMA.matches(target_hash)) - self.assertEqual(self.repository_updater._get_target_hash(filepath), target_hash) - - # Test for improperly formatted argument. - #self.assertRaises(securesystemslib.exceptions.FormatError, self.repository_updater._get_target_hash, 8) - - - - - def test_10__check_file_length(self): - # Test for exception if file object is not equal to trusted file length. - with tempfile.TemporaryFile() as temp_file_object: - temp_file_object.write(b'X') - temp_file_object.seek(0) - self.assertRaises(tuf.exceptions.DownloadLengthMismatchError, - self.repository_updater._check_file_length, - temp_file_object, 10) - - - - - - def test_10__targets_of_role(self): - # Test for non-existent role. - self.assertRaises(tuf.exceptions.UnknownRoleError, - self.repository_updater._targets_of_role, - 'non-existent-role') - - # Test for role that hasn't been loaded yet. - del self.repository_updater.metadata['current']['targets'] - self.assertEqual(len(self.repository_updater._targets_of_role('targets', - skip_refresh=True)), 0) - - # 'targets.json' tracks two targets. - self.assertEqual(len(self.repository_updater._targets_of_role('targets')), - 2) - - - - def test_10__preorder_depth_first_walk(self): - - # Test that infinite loop is prevented if the target file is not found and - # the max number of delegations is reached. - valid_max_number_of_delegations = tuf.settings.MAX_NUMBER_OF_DELEGATIONS - tuf.settings.MAX_NUMBER_OF_DELEGATIONS = 0 - self.assertEqual(None, self.repository_updater._preorder_depth_first_walk('unknown.txt')) - - # Reset the setting for max number of delegations so that subsequent unit - # tests reference the expected setting. - tuf.settings.MAX_NUMBER_OF_DELEGATIONS = valid_max_number_of_delegations - - # Attempt to create a circular delegation, where role1 performs a - # delegation to the top-level Targets role. The updater should ignore the - # delegation and not raise an exception. - targets_path = os.path.join(self.client_metadata_current, 'targets.json') - targets_metadata = securesystemslib.util.load_json_file(targets_path) - targets_metadata['signed']['delegations']['roles'][0]['paths'] = ['/file8.txt'] - with open(targets_path, 'wb') as file_object: - file_object.write(repo_lib._get_written_metadata(targets_metadata)) - - role1_path = os.path.join(self.client_metadata_current, 'role1.json') - role1_metadata = securesystemslib.util.load_json_file(role1_path) - role1_metadata['signed']['delegations']['roles'][0]['name'] = 'targets' - role1_metadata['signed']['delegations']['roles'][0]['paths'] = ['/file8.txt'] - with open(role1_path, 'wb') as file_object: - file_object.write(repo_lib._get_written_metadata(role1_metadata)) - - role2_path = os.path.join(self.client_metadata_current, 'role2.json') - role2_metadata = securesystemslib.util.load_json_file(role2_path) - role2_metadata['signed']['delegations']['roles'] = role1_metadata['signed']['delegations']['roles'] - role2_metadata['signed']['delegations']['roles'][0]['paths'] = ['/file8.txt'] - with open(role2_path, 'wb') as file_object: - file_object.write(repo_lib._get_written_metadata(role2_metadata)) - - logger.debug('attempting circular delegation') - self.assertEqual(None, self.repository_updater._preorder_depth_first_walk('/file8.txt')) - - - - - - - def test_10__visit_child_role(self): - # Call _visit_child_role and test the dict keys: 'paths', - # 'path_hash_prefixes', and if both are missing. - - targets_role = self.repository_updater.metadata['current']['targets'] - targets_role['delegations']['roles'][0]['paths'] = ['/*.txt', '/target.exe'] - child_role = targets_role['delegations']['roles'][0] - - role1_path = os.path.join(self.client_metadata_current, 'role1.json') - role1_metadata = securesystemslib.util.load_json_file(role1_path) - role1_metadata['signed']['delegations']['roles'][0]['paths'] = ['/*.exe'] - with open(role1_path, 'wb') as file_object: - file_object.write(repo_lib._get_written_metadata(role1_metadata)) - - self.assertEqual(self.repository_updater._visit_child_role(child_role, - '/target.exe'), child_role['name']) - - # Test for a valid path hash prefix... - child_role['path_hash_prefixes'] = ['8baf'] - self.assertEqual(self.repository_updater._visit_child_role(child_role, - '/file3.txt'), child_role['name']) - - # ... and an invalid one, as well. - child_role['path_hash_prefixes'] = ['badd'] - self.assertEqual(self.repository_updater._visit_child_role(child_role, - '/file3.txt'), None) - - # Test for a forbidden target. - del child_role['path_hash_prefixes'] - self.repository_updater._visit_child_role(child_role, '/forbidden.tgz') - - # Verify that unequal path_hash_prefixes are skipped. - child_role['path_hash_prefixes'] = ['bad', 'bad'] - self.assertEqual(None, self.repository_updater._visit_child_role(child_role, - '/unknown.exe')) - - # Test if both 'path' and 'path_hash_prefixes' are missing. - del child_role['paths'] - del child_role['path_hash_prefixes'] - self.assertRaises(securesystemslib.exceptions.FormatError, self.repository_updater._visit_child_role, - child_role, child_role['name']) - - - - def test_11__verify_metadata_file(self): - # Test for invalid metadata content. - with tempfile.TemporaryFile() as metadata_file_object: - metadata_file_object.write(b'X') - metadata_file_object.seek(0) - - self.assertRaises(tuf.exceptions.InvalidMetadataJSONError, - self.repository_updater._verify_metadata_file, - metadata_file_object, 'root') - - - def test_13__targets_of_role(self): - # Test case where a list of targets is given. By default, the 'targets' - # parameter is None. - targets = [{'filepath': 'file1.txt', 'fileinfo': {'length': 1, 'hashes': {'sha256': 'abc'}}}] - self.repository_updater._targets_of_role('targets', - targets=targets, skip_refresh=False) - - - - -class TestMultiRepoUpdater(unittest_toolbox.Modified_TestCase): - - def setUp(self): - # Modified_Testcase can handle temp dir removal - unittest_toolbox.Modified_TestCase.setUp(self) - self.temporary_directory = self.make_temp_directory(directory=os.getcwd()) - - # Copy the original repository files provided in the test folder so that - # any modifications made to repository files are restricted to the copies. - # The 'repository_data' directory is expected to exist in 'tuf/tests/'. - original_repository_files = os.path.join(os.getcwd(), 'repository_data') - - self.temporary_repository_root = tempfile.mkdtemp(dir=self.temporary_directory) - - # Needed because in some tests simple_server.py cannot be found. - # The reason is that the current working directory - # has been changed when executing a subprocess. - self.SIMPLE_SERVER_PATH = os.path.join(os.getcwd(), 'simple_server.py') - - # The original repository, keystore, and client directories will be copied - # for each test case. - original_repository = os.path.join(original_repository_files, 'repository') - original_client = os.path.join(original_repository_files, 'client', 'test_repository1') - original_keystore = os.path.join(original_repository_files, 'keystore') - original_map_file = os.path.join(original_repository_files, 'map.json') - - # Save references to the often-needed client repository directories. - # Test cases need these references to access metadata and target files. - self.repository_directory = os.path.join(self.temporary_repository_root, - 'repository_server1') - self.repository_directory2 = os.path.join(self.temporary_repository_root, - 'repository_server2') - - # Setting 'tuf.settings.repositories_directory' with the temporary client - # directory copied from the original repository files. - tuf.settings.repositories_directory = self.temporary_repository_root - - repository_name = 'test_repository1' - repository_name2 = 'test_repository2' - - self.client_directory = os.path.join(self.temporary_repository_root, - repository_name) - self.client_directory2 = os.path.join(self.temporary_repository_root, - repository_name2) - - self.keystore_directory = os.path.join(self.temporary_repository_root, - 'keystore') - self.map_file = os.path.join(self.client_directory, 'map.json') - self.map_file2 = os.path.join(self.client_directory2, 'map.json') - - # Copy the original 'repository', 'client', and 'keystore' directories - # to the temporary repository the test cases can use. - shutil.copytree(original_repository, self.repository_directory) - shutil.copytree(original_repository, self.repository_directory2) - shutil.copytree(original_client, self.client_directory) - shutil.copytree(original_client, self.client_directory2) - shutil.copyfile(original_map_file, self.map_file) - shutil.copyfile(original_map_file, self.map_file2) - shutil.copytree(original_keystore, self.keystore_directory) - - # Launch a SimpleHTTPServer (serves files in the current directory). - # Test cases will request metadata and target files that have been - # pre-generated in 'tuf/tests/repository_data', which will be served by the - # SimpleHTTPServer launched here. The test cases of this unit test assume - # the pre-generated metadata files have a specific structure, such - # as a delegated role 'targets/role1', three target files, five key files, - # etc. - - # Creates a subprocess running a server. - self.server_process_handler = utils.TestServerProcess(log=logger, - server=self.SIMPLE_SERVER_PATH, popen_cwd=self.repository_directory) - - logger.debug('Server process started.') - - # Creates a subprocess running a server. - self.server_process_handler2 = utils.TestServerProcess(log=logger, - server=self.SIMPLE_SERVER_PATH, popen_cwd=self.repository_directory2) - - logger.debug('Server process 2 started.') - - url_prefix = \ - 'http://' + utils.TEST_HOST_ADDRESS + ':' + \ - str(self.server_process_handler.port) - url_prefix2 = \ - 'http://' + utils.TEST_HOST_ADDRESS + ':' + \ - str(self.server_process_handler2.port) - - # We have all of the necessary information for two repository mirrors - # in map.json, except for url prefixes. - # For the url prefixes, we create subprocesses that run a server script. - # In server scripts we get a free port from the OS which is sent - # back to the parent process. - # That's why we dynamically add the ports to the url prefixes - # and changing the content of map.json. - self.map_file_path = os.path.join(self.client_directory, 'map.json') - data = securesystemslib.util.load_json_file(self.map_file_path) - - data['repositories']['test_repository1'] = [url_prefix] - data['repositories']['test_repository2'] = [url_prefix2] - with open(self.map_file_path, 'w') as f: - json.dump(data, f) - - self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, - 'metadata_path': 'metadata', 'targets_path': 'targets'}} - - self.repository_mirrors2 = {'mirror1': {'url_prefix': url_prefix2, - 'metadata_path': 'metadata', 'targets_path': 'targets'}} - - # Create the repository instances. The test cases will use these client - # updaters to refresh metadata, fetch target files, etc. - self.repository_updater = updater.Updater(repository_name, - self.repository_mirrors) - self.repository_updater2 = updater.Updater(repository_name2, - self.repository_mirrors2) - - # Creating a repository instance. The test cases will use this client - # updater to refresh metadata, fetch target files, etc. - self.multi_repo_updater = updater.MultiRepoUpdater(self.map_file) - - # Metadata role keys are needed by the test cases to make changes to the - # repository (e.g., adding a new target file to 'targets.json' and then - # requesting a refresh()). - self.role_keys = _load_role_keys(self.keystore_directory) - - - - def tearDown(self): - - # Cleans the resources and flush the logged lines (if any). - self.server_process_handler.clean() - self.server_process_handler2.clean() - - # updater.Updater() populates the roledb with the name "test_repository1" - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - # Remove top-level temporary directory - unittest_toolbox.Modified_TestCase.tearDown(self) - - - - # UNIT TESTS. - def test__init__(self): - # The client's repository requires a metadata directory (and the 'current' - # and 'previous' sub-directories), and at least the 'root.json' file. - # setUp(), called before each test case, instantiates the required updater - # objects and keys. The needed objects/data is available in - # 'self.repository_updater', 'self.client_directory', etc. - - # Test: Invalid arguments. - # Invalid 'updater_name' argument. String expected. - self.assertRaises(securesystemslib.exceptions.FormatError, - updater.MultiRepoUpdater, 8) - - # Restore 'tuf.settings.repositories_directory' to the original client - # directory. - tuf.settings.repositories_directory = self.client_directory - - # Test for a non-existent map file. - self.assertRaises(tuf.exceptions.Error, updater.MultiRepoUpdater, - 'non-existent.json') - - # Test for a map file that doesn't contain the required fields. - root_filepath = os.path.join( - self.repository_directory, 'metadata', 'root.json') - self.assertRaises(securesystemslib.exceptions.FormatError, - updater.MultiRepoUpdater, root_filepath) - - # Test for a valid instantiation. - multi_repo_updater = updater.MultiRepoUpdater(self.map_file_path) - - - - def test__target_matches_path_pattern(self): - multi_repo_updater = updater.MultiRepoUpdater(self.map_file_path) - paths = ['foo*.tgz', 'bar*.tgz', 'file1.txt'] - self.assertTrue( - multi_repo_updater._target_matches_path_pattern('bar-1.0.tgz', paths)) - self.assertTrue( - multi_repo_updater._target_matches_path_pattern('file1.txt', paths)) - self.assertFalse( - multi_repo_updater._target_matches_path_pattern('baz-1.0.tgz', paths)) - - - - def test_get_valid_targetinfo(self): - multi_repo_updater = updater.MultiRepoUpdater(self.map_file_path) - - # Verify the multi repo updater refuses to save targetinfo if - # required local repositories are missing. - repo_dir = os.path.join(tuf.settings.repositories_directory, - 'test_repository1') - backup_repo_dir = os.path.join(tuf.settings.repositories_directory, - 'test_repository1.backup') - shutil.move(repo_dir, backup_repo_dir) - self.assertRaises(tuf.exceptions.Error, - multi_repo_updater.get_valid_targetinfo, 'file3.txt') - - # Restore the client's repository directory. - shutil.move(backup_repo_dir, repo_dir) - - # Verify that the Root file must exist. - root_filepath = os.path.join(repo_dir, 'metadata', 'current', 'root.json') - backup_root_filepath = os.path.join(root_filepath, root_filepath + '.backup') - shutil.move(root_filepath, backup_root_filepath) - self.assertRaises(tuf.exceptions.Error, - multi_repo_updater.get_valid_targetinfo, 'file3.txt') - - # Restore the Root file. - shutil.move(backup_root_filepath, root_filepath) - - # Test that the first mapping is skipped if it's irrelevant to the target - # file. - self.assertRaises(tuf.exceptions.UnknownTargetError, - multi_repo_updater.get_valid_targetinfo, 'non-existent.txt') - - # Verify that a targetinfo is not returned for a non-existent target. - multi_repo_updater.map_file['mapping'][1]['terminating'] = False - self.assertRaises(tuf.exceptions.UnknownTargetError, - multi_repo_updater.get_valid_targetinfo, 'non-existent.txt') - multi_repo_updater.map_file['mapping'][1]['terminating'] = True - - # Test for a mapping that sets terminating = True, and that appears before - # the final mapping. - multi_repo_updater.map_file['mapping'][0]['terminating'] = True - self.assertRaises(tuf.exceptions.UnknownTargetError, - multi_repo_updater.get_valid_targetinfo, 'bad3.txt') - multi_repo_updater.map_file['mapping'][0]['terminating'] = False - - # Test for the case where multiple repos sign for the same target. - valid_targetinfo = multi_repo_updater.get_valid_targetinfo('file1.txt') - - multi_repo_updater.map_file['mapping'][0]['threshold'] = 2 - valid_targetinfo = multi_repo_updater.get_valid_targetinfo('file1.txt') - - # Verify that valid targetinfo is matched for two repositories that provide - # different custom field. Make sure to set the 'match_custom_field' - # argument to 'False' when calling get_valid_targetinfo(). - repository = repo_tool.load_repository(self.repository_directory2) - - target1 = os.path.join(self.repository_directory2, 'targets', 'file1.txt') - repository.targets.remove_target(os.path.basename(target1)) - - custom_field = {"custom": "my_custom_data"} - repository.targets.add_target(os.path.basename(target1), custom_field) - repository.targets.load_signing_key(self.role_keys['targets']['private']) - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory2, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory2, 'metadata.staged'), - os.path.join(self.repository_directory2, 'metadata')) - - # Do we get the expected match for the two targetinfo that only differ - # by the custom field? - valid_targetinfo = multi_repo_updater.get_valid_targetinfo( - 'file1.txt', match_custom_field=False) - - # Verify the case where two repositories provide different targetinfo. - # Modify file1.txt so that different length and hashes are reported by the - # two repositories. - repository = repo_tool.load_repository(self.repository_directory2) - target1 = os.path.join(self.repository_directory2, 'targets', 'file1.txt') - with open(target1, 'ab') as file_object: - file_object.write(b'append extra text') - - repository.targets.remove_target(os.path.basename(target1)) - - repository.targets.add_target(os.path.basename(target1)) - repository.targets.load_signing_key(self.role_keys['targets']['private']) - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory2, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory2, 'metadata.staged'), - os.path.join(self.repository_directory2, 'metadata')) - - # Ensure the threshold is modified to 2 (assumed to be 1, by default) and - # verify that get_valid_targetinfo() raises an UnknownTargetError - # despite both repos signing for file1.txt. - multi_repo_updater.map_file['mapping'][0]['threshold'] = 2 - self.assertRaises(tuf.exceptions.UnknownTargetError, - multi_repo_updater.get_valid_targetinfo, 'file1.txt') - - - - - - def test_get_updater(self): - multi_repo_updater = updater.MultiRepoUpdater(self.map_file_path) - - # Test for a non-existent repository name. - self.assertEqual(None, multi_repo_updater.get_updater('bad_repo_name')) - - # Test get_updater indirectly via the "private" _update_from_repository(). - self.assertRaises(tuf.exceptions.Error, multi_repo_updater._update_from_repository, 'bad_repo_name', 'file3.txt') - - # Test for a repository that doesn't exist. - multi_repo_updater.map_file['repositories']['bad_repo_name'] = ['https://bogus:30002'] - self.assertEqual(None, multi_repo_updater.get_updater('bad_repo_name')) - - -class TestUpdaterRolenames(unittest_toolbox.Modified_TestCase): - def setUp(self): - unittest_toolbox.Modified_TestCase.setUp(self) - - repo_dir = os.path.join(os.getcwd(), 'repository_data', 'fishy_rolenames') - - self.client_dir = self.make_temp_directory() - os.makedirs(os.path.join(self.client_dir, "fishy_rolenames", "metadata", "current")) - os.makedirs(os.path.join(self.client_dir, "fishy_rolenames", "metadata", "previous")) - shutil.copy( - os.path.join(repo_dir, 'metadata', '1.root.json'), - os.path.join(self.client_dir, "fishy_rolenames", "metadata", "current", "root.json") - ) - - simple_server_path = os.path.join(os.getcwd(), 'simple_server.py') - self.server_process_handler = utils.TestServerProcess(log=logger, - server=simple_server_path) - - url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + "/repository_data/fishy_rolenames" - - tuf.settings.repositories_directory = self.client_dir - mirrors = {'mirror1': { - 'url_prefix': url_prefix, - 'metadata_path': 'metadata/', - 'targets_path': '' - }} - self.updater = updater.Updater("fishy_rolenames", mirrors) - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - self.server_process_handler.flush_log() - self.server_process_handler.clean() - unittest_toolbox.Modified_TestCase.tearDown(self) - - def test_unusual_rolenames(self): - """Test rolenames that may be tricky to handle as filenames - - The test data in repository_data/fishy_rolenames has been produced - semi-manually using RepositorySimulator: using the RepositorySimulator - in these tests directly (like test_updater_with_simulator.py does for - ngclient) might make more sense... but would require some integration work - """ - - # Make a target search that fetches the delegated targets - self.updater.refresh() - with self.assertRaises(tuf.exceptions.UnknownTargetError): - self.updater.get_one_valid_targetinfo("anything") - - # Assert that the metadata files are in the client metadata directory - metadata_dir = os.path.join( - self.client_dir, "fishy_rolenames", "metadata", "current" - ) - local_metadata = os.listdir(metadata_dir) - for fname in ['%C3%B6.json', '..%2Fa.json', '..json']: - self.assertTrue(fname in local_metadata) - - -def _load_role_keys(keystore_directory): - - # Populating 'self.role_keys' by importing the required public and private - # keys of 'tuf/tests/repository_data/'. The role keys are needed when - # modifying the remote repository used by the test cases in this unit test. - - # The pre-generated key files in 'repository_data/keystore' are all encrypted with - # a 'password' passphrase. - EXPECTED_KEYFILE_PASSWORD = 'password' - - # Store and return the cryptography keys of the top-level roles, including 1 - # delegated role. - role_keys = {} - - root_key_file = os.path.join(keystore_directory, 'root_key') - targets_key_file = os.path.join(keystore_directory, 'targets_key') - snapshot_key_file = os.path.join(keystore_directory, 'snapshot_key') - timestamp_key_file = os.path.join(keystore_directory, 'timestamp_key') - delegation_key_file = os.path.join(keystore_directory, 'delegation_key') - - role_keys = {'root': {}, 'targets': {}, 'snapshot': {}, 'timestamp': {}, - 'role1': {}} - - # Import the top-level and delegated role public keys. - role_keys['root']['public'] = \ - repo_tool.import_rsa_publickey_from_file(root_key_file+'.pub') - role_keys['targets']['public'] = \ - repo_tool.import_ed25519_publickey_from_file(targets_key_file+'.pub') - role_keys['snapshot']['public'] = \ - repo_tool.import_ed25519_publickey_from_file(snapshot_key_file+'.pub') - role_keys['timestamp']['public'] = \ - repo_tool.import_ed25519_publickey_from_file(timestamp_key_file+'.pub') - role_keys['role1']['public'] = \ - repo_tool.import_ed25519_publickey_from_file(delegation_key_file+'.pub') - - # Import the private keys of the top-level and delegated roles. - role_keys['root']['private'] = \ - repo_tool.import_rsa_privatekey_from_file(root_key_file, - EXPECTED_KEYFILE_PASSWORD) - role_keys['targets']['private'] = \ - repo_tool.import_ed25519_privatekey_from_file(targets_key_file, - EXPECTED_KEYFILE_PASSWORD) - role_keys['snapshot']['private'] = \ - repo_tool.import_ed25519_privatekey_from_file(snapshot_key_file, - EXPECTED_KEYFILE_PASSWORD) - role_keys['timestamp']['private'] = \ - repo_tool.import_ed25519_privatekey_from_file(timestamp_key_file, - EXPECTED_KEYFILE_PASSWORD) - role_keys['role1']['private'] = \ - repo_tool.import_ed25519_privatekey_from_file(delegation_key_file, - EXPECTED_KEYFILE_PASSWORD) - - return role_keys - - -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_updater_root_rotation_integration_old.py b/tests/test_updater_root_rotation_integration_old.py deleted file mode 100755 index b8f93043ba..0000000000 --- a/tests/test_updater_root_rotation_integration_old.py +++ /dev/null @@ -1,685 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2016 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - test_updater_root_rotation_integration_old.py - - - Evan Cordell. - - - August 8, 2016. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - 'test_updater_root_rotation.py' provides a collection of methods that test - root key rotation in the example client. - - - Test cases here should follow a specific order (i.e., independent methods are - tested before dependent methods). More accurately, least dependent methods - are tested before most dependent methods. There is no reason to rewrite or - construct other methods that replicate already-tested methods solely for - testing purposes. This is possible because the 'unittest.TestCase' class - guarantees the order of unit tests. The 'test_something_A' method would - be tested before 'test_something_B'. To ensure the expected order of tests, - a number is placed after 'test' and before methods name like so: - 'test_1_check_directory'. The number is a measure of dependence, where 1 is - less dependent than 2. -""" - -import os -import shutil -import tempfile -import logging -import unittest -import filecmp -import sys - -import tuf -import tuf.log -import tuf.keydb -import tuf.roledb -import tuf.exceptions -import tuf.repository_tool as repo_tool -import tuf.unittest_toolbox as unittest_toolbox -import tuf.client.updater as updater -import tuf.settings - -from tests import utils - -import securesystemslib - -logger = logging.getLogger(__name__) -repo_tool.disable_console_log_messages() - - -class TestUpdater(unittest_toolbox.Modified_TestCase): - - @classmethod - def setUpClass(cls): - # Create a temporary directory to store the repository, metadata, and target - # files. 'temporary_directory' must be deleted in TearDownModule() so that - # temporary files are always removed, even when exceptions occur. - cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) - - # Launch a SimpleHTTPServer (serves files in the current directory). Test - # cases will request metadata and target files that have been pre-generated - # in 'tuf/tests/repository_data', which will be served by the - # SimpleHTTPServer launched here. The test cases of - # 'test_updater_root_rotation_integration_old.py' assume the - # pre-generated metadata files have a specific structure, such - # as a delegated role 'targets/role1', three target files, five key files, - # etc. - cls.server_process_handler = utils.TestServerProcess(log=logger) - - - - - @classmethod - def tearDownClass(cls): - # Cleans the resources and flush the logged lines (if any). - cls.server_process_handler.clean() - - # Remove the temporary repository directory, which should contain all the - # metadata, targets, and key files generated for the test cases. - shutil.rmtree(cls.temporary_directory) - - - - - def setUp(self): - # We are inheriting from custom class. - unittest_toolbox.Modified_TestCase.setUp(self) - - self.repository_name = 'test_repository1' - - # Copy the original repository files provided in the test folder so that - # any modifications made to repository files are restricted to the copies. - # The 'repository_data' directory is expected to exist in 'tuf.tests/'. - original_repository_files = os.path.join(os.getcwd(), 'repository_data') - temporary_repository_root = \ - self.make_temp_directory(directory=self.temporary_directory) - - # The original repository, keystore, and client directories will be copied - # for each test case. - original_repository = os.path.join(original_repository_files, 'repository') - original_keystore = os.path.join(original_repository_files, 'keystore') - original_client = os.path.join(original_repository_files, 'client') - - # Save references to the often-needed client repository directories. - # Test cases need these references to access metadata and target files. - self.repository_directory = \ - os.path.join(temporary_repository_root, 'repository') - self.keystore_directory = \ - os.path.join(temporary_repository_root, 'keystore') - self.client_directory = os.path.join(temporary_repository_root, 'client') - self.client_metadata = os.path.join(self.client_directory, - self.repository_name, 'metadata') - self.client_metadata_current = os.path.join(self.client_metadata, 'current') - self.client_metadata_previous = os.path.join(self.client_metadata, 'previous') - - # Copy the original 'repository', 'client', and 'keystore' directories - # to the temporary repository the test cases can use. - shutil.copytree(original_repository, self.repository_directory) - shutil.copytree(original_client, self.client_directory) - shutil.copytree(original_keystore, self.keystore_directory) - - # 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'. - repository_basepath = self.repository_directory[len(os.getcwd()):] - url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \ - + str(self.server_process_handler.port) + repository_basepath - - # Setting 'tuf.settings.repository_directory' with the temporary client - # directory copied from the original repository files. - tuf.settings.repositories_directory = self.client_directory - - self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, - 'metadata_path': 'metadata', - 'targets_path': 'targets'}} - - # Creating a repository instance. The test cases will use this client - # updater to refresh metadata, fetch target files, etc. - self.repository_updater = updater.Updater(self.repository_name, - self.repository_mirrors) - - # Metadata role keys are needed by the test cases to make changes to the - # repository (e.g., adding a new target file to 'targets.json' and then - # requesting a refresh()). - self.role_keys = _load_role_keys(self.keystore_directory) - - - - def tearDown(self): - tuf.roledb.clear_roledb(clear_all=True) - tuf.keydb.clear_keydb(clear_all=True) - - # Logs stdout and stderr from the sever subprocess. - self.server_process_handler.flush_log() - - # Remove temporary directory - unittest_toolbox.Modified_TestCase.tearDown(self) - - - # UNIT TESTS. - def test_root_rotation(self): - repository = repo_tool.load_repository(self.repository_directory) - repository.root.threshold = 2 - - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - - # Errors, not enough signing keys to satisfy root's threshold. - self.assertRaises(tuf.exceptions.UnsignedMetadataError, repository.writeall) - - repository.root.add_verification_key(self.role_keys['role1']['public']) - repository.root.load_signing_key(self.role_keys['root']['private']) - repository.root.load_signing_key(self.role_keys['role1']['private']) - repository.writeall() - - repository.root.add_verification_key(self.role_keys['snapshot']['public']) - repository.root.load_signing_key(self.role_keys['snapshot']['private']) - repository.root.threshold = 3 - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - self.repository_updater.refresh() - - - - def test_verify_root_with_current_keyids_and_threshold(self): - """ - Each root file is signed by the current root threshold of keys as well - as the previous root threshold of keys. Test that a root file which is - not 'self-signed' with the current root threshold of keys causes the - update to fail - """ - # Load repository with root.json == 1.root.json (available on client) - # Signing key: "root", Threshold: 1 - repository = repo_tool.load_repository(self.repository_directory) - - # Rotate keys and update root: 1.root.json --> 2.root.json - # Signing key: "root" (previous) and "root2" (current) - # Threshold (for both): 1 - repository.root.load_signing_key(self.role_keys['root']['private']) - repository.root.add_verification_key(self.role_keys['root2']['public']) - repository.root.load_signing_key(self.role_keys['root2']['private']) - # Remove the previous "root" key from the list of current - # verification keys - repository.root.remove_verification_key(self.role_keys['root']['public']) - repository.writeall() - - # Move staged metadata to "live" metadata - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Intercept 2.root.json and tamper with "root2" (current) key signature - root2_path_live = os.path.join( - self.repository_directory, 'metadata', '2.root.json') - root2 = securesystemslib.util.load_json_file(root2_path_live) - - for idx, sig in enumerate(root2['signatures']): - if sig['keyid'] == self.role_keys['root2']['public']['keyid']: - sig_len = len(root2['signatures'][idx]['sig']) - root2['signatures'][idx]['sig'] = "deadbeef".ljust(sig_len, '0') - - roo2_fobj = tempfile.TemporaryFile() - roo2_fobj.write(tuf.repository_lib._get_written_metadata(root2)) - securesystemslib.util.persist_temp_file(roo2_fobj, root2_path_live) - - # Update 1.root.json -> 2.root.json - # Signature verification with current keys should fail because we replaced - with self.assertRaises(tuf.exceptions.NoWorkingMirrorError) as cm: - self.repository_updater.refresh() - - for mirror_url, mirror_error in cm.exception.mirror_errors.items(): - self.assertTrue(mirror_url.endswith('/2.root.json')) - self.assertTrue(isinstance(mirror_error, - securesystemslib.exceptions.BadSignatureError)) - - # Assert that the current 'root.json' on the client side is the verified one - self.assertTrue(filecmp.cmp( - os.path.join(self.repository_directory, 'metadata', '1.root.json'), - os.path.join(self.client_metadata_current, 'root.json'))) - - - - - - def test_verify_root_with_duplicate_current_keyids(self): - """ - Each root file is signed by the current root threshold of keys as well - as the previous root threshold of keys. In each case, a keyid must only - count once towards the threshold. Test that the new root signatures - specific signature verification implemented in _verify_root_self_signed() - only counts one signature per keyid towards the threshold. - """ - # Load repository with root.json == 1.root.json (available on client) - # Signing key: "root", Threshold: 1 - repository = repo_tool.load_repository(self.repository_directory) - - # Add an additional signing key and bump the threshold to 2 - repository.root.load_signing_key(self.role_keys['root']['private']) - repository.root.add_verification_key(self.role_keys['root2']['public']) - repository.root.load_signing_key(self.role_keys['root2']['private']) - repository.root.threshold = 2 - repository.writeall() - - # Move staged metadata to "live" metadata - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Modify 2.root.json and list two signatures with the same keyid - root2_path_live = os.path.join( - self.repository_directory, 'metadata', '2.root.json') - root2 = securesystemslib.util.load_json_file(root2_path_live) - - signatures = [] - signatures.append(root2['signatures'][0]) - signatures.append(root2['signatures'][0]) - - root2['signatures'] = signatures - - root2_fobj = tempfile.TemporaryFile() - root2_fobj.write(tuf.repository_lib._get_written_metadata(root2)) - securesystemslib.util.persist_temp_file(root2_fobj, root2_path_live) - - # Update 1.root.json -> 2.root.json - # Signature verification with new keys should fail because the threshold - # can only be met by two signatures with the same keyid - with self.assertRaises(tuf.exceptions.NoWorkingMirrorError) as cm: - self.repository_updater.refresh() - - for mirror_url, mirror_error in cm.exception.mirror_errors.items(): - self.assertTrue(mirror_url.endswith('/2.root.json')) - self.assertTrue(isinstance(mirror_error, - securesystemslib.exceptions.BadSignatureError)) - - # Assert that the current 'root.json' on the client side is the verified one - self.assertTrue(filecmp.cmp( - os.path.join(self.repository_directory, 'metadata', '1.root.json'), - os.path.join(self.client_metadata_current, 'root.json'))) - - - - - - def test_root_rotation_full(self): - """Test that a client whose root is outdated by multiple versions and who - has none of the latest nor next-to-latest root keys can still update and - does so by incrementally verifying all roots until the most recent one. """ - # Load initial repository with 1.root.json == root.json, signed by "root" - # key. This is the root.json that is already on the client. - repository = repo_tool.load_repository(self.repository_directory) - - # 1st rotation: 1.root.json --> 2.root.json - # 2.root.json will be signed by previous "root" key and by new "root2" key - repository.root.load_signing_key(self.role_keys['root']['private']) - repository.root.add_verification_key(self.role_keys['root2']['public']) - repository.root.load_signing_key(self.role_keys['root2']['private']) - repository.writeall() - - # 2nd rotation: 2.root.json --> 3.root.json - # 3.root.json will be signed by previous "root2" key and by new "root3" key - repository.root.unload_signing_key(self.role_keys['root']['private']) - repository.root.remove_verification_key(self.role_keys['root']['public']) - repository.root.add_verification_key(self.role_keys['root3']['public']) - repository.root.load_signing_key(self.role_keys['root3']['private']) - repository.writeall() - - # Move staged metadata to "live" metadata - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Update on client 1.root.json --> 2.root.json --> 3.root.json - self.repository_updater.refresh() - - # Assert that client updated to the latest root from the repository - self.assertTrue(filecmp.cmp( - os.path.join(self.repository_directory, 'metadata', '3.root.json'), - os.path.join(self.client_metadata_current, 'root.json'))) - - - - def test_root_rotation_max(self): - """Test that client does not rotate beyond a configured upper bound, i.e. - `current_version + MAX_NUMBER_ROOT_ROTATIONS`. """ - # NOTE: The nature of below root changes is irrelevant. Here we only want - # the client to update but not beyond a configured upper bound. - - # 1.root.json --> 2.root.json (add root2 and root3 keys) - repository = repo_tool.load_repository(self.repository_directory) - repository.root.load_signing_key(self.role_keys['root']['private']) - repository.root.add_verification_key(self.role_keys['root2']['public']) - repository.root.load_signing_key(self.role_keys['root2']['private']) - repository.root.add_verification_key(self.role_keys['root3']['public']) - repository.root.load_signing_key(self.role_keys['root3']['private']) - repository.writeall() - - # 2.root.json --> 3.root.json (change threshold) - repository.root.threshold = 2 - repository.writeall() - - # 3.root.json --> 4.root.json (change threshold again) - repository.root.threshold = 3 - repository.writeall() - - # Move staged metadata to "live" metadata - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Assert that repo indeed has "4.root.json" and that it's the latest root - self.assertTrue(filecmp.cmp( - os.path.join(self.repository_directory, 'metadata', '4.root.json'), - os.path.join(self.repository_directory, 'metadata', 'root.json'))) - - # Lower max root rotation cap so that client stops updating early - max_rotation_backup = tuf.settings.MAX_NUMBER_ROOT_ROTATIONS - tuf.settings.MAX_NUMBER_ROOT_ROTATIONS = 2 - - # Update on client 1.root.json --> 2.root.json --> 3.root.json, - # but stop before updating to 4.root.json - self.repository_updater.refresh() - - # Assert that the client indeed only updated until 3.root.json - self.assertTrue(filecmp.cmp( - os.path.join(self.repository_directory, 'metadata', '3.root.json'), - os.path.join(self.client_metadata_current, 'root.json'))) - - # reset - tuf.settings.MAX_NUMBER_ROOT_ROTATIONS = max_rotation_backup - - - - def test_root_rotation_missing_keys(self): - repository = repo_tool.load_repository(self.repository_directory) - - # A partially written root.json (threshold = 2, and signed with only 1 key) - # causes an invalid root chain later. - repository.root.threshold = 2 - repository.root.load_signing_key(self.role_keys['root']['private']) - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - - repository.write('root') - repository.write('snapshot') - repository.write('timestamp') - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Create a new, valid root.json. - # Still not valid, because it is not written with a threshold of 2 - # previous keys - repository.root.add_verification_key(self.role_keys['role1']['public']) - repository.root.load_signing_key(self.role_keys['role1']['private']) - - repository.writeall() - - repository.root.add_verification_key(self.role_keys['snapshot']['public']) - repository.root.load_signing_key(self.role_keys['snapshot']['private']) - repository.root.threshold = 3 - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - with self.assertRaises(tuf.exceptions.NoWorkingMirrorError) as cm: - self.repository_updater.refresh() - - for mirror_url, mirror_error in cm.exception.mirror_errors.items(): - self.assertTrue(mirror_url.endswith('/2.root.json')) - self.assertTrue(isinstance(mirror_error, - securesystemslib.exceptions.BadSignatureError)) - - # Assert that the current 'root.json' on the client side is the verified one - self.assertTrue(filecmp.cmp( - os.path.join(self.repository_directory, 'metadata', '1.root.json'), - os.path.join(self.client_metadata_current, 'root.json'))) - - - - - def test_root_rotation_unmet_last_version_threshold(self): - """Test that client detects a root.json version that is not signed - by a previous threshold of signatures """ - - repository = repo_tool.load_repository(self.repository_directory) - - # Add verification keys - repository.root.add_verification_key(self.role_keys['root']['public']) - repository.root.add_verification_key(self.role_keys['role1']['public']) - - repository.targets.add_verification_key(self.role_keys['targets']['public']) - repository.snapshot.add_verification_key(self.role_keys['snapshot']['public']) - repository.timestamp.add_verification_key(self.role_keys['timestamp']['public']) - - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - - # Add signing keys - repository.root.load_signing_key(self.role_keys['root']['private']) - repository.root.load_signing_key(self.role_keys['role1']['private']) - - # Set root threshold - repository.root.threshold = 2 - repository.writeall() - - # Unload Root's previous signing keys to ensure that these keys are not - # used by mistake. - repository.root.unload_signing_key(self.role_keys['role1']['private']) - repository.root.unload_signing_key(self.role_keys['root']['private']) - - # Add new verification key - repository.root.add_verification_key(self.role_keys['snapshot']['public']) - - # Remove one of the original signing keys - repository.root.remove_verification_key(self.role_keys['role1']['public']) - - # Set the threshold for the new Root file, but note that the previous - # threshold of 2 must still be met. - repository.root.threshold = 1 - - repository.root.load_signing_key(self.role_keys['role1']['private']) - repository.root.load_signing_key(self.role_keys['snapshot']['private']) - - repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) - repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - - # We use write() rather than writeall() because the latter should fail due - # to the missing self.role_keys['root'] signature. - repository.write('root', increment_version_number=True) - repository.write('snapshot', increment_version_number=True) - repository.write('timestamp', increment_version_number=True) - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # The following refresh should fail because root must be signed by the - # previous self.role_keys['root'] key, which wasn't loaded. - with self.assertRaises(tuf.exceptions.NoWorkingMirrorError) as cm: - self.repository_updater.refresh() - - for mirror_url, mirror_error in cm.exception.mirror_errors.items(): - self.assertTrue(mirror_url.endswith('/3.root.json')) - self.assertTrue(isinstance(mirror_error, - securesystemslib.exceptions.BadSignatureError)) - - # Assert that the current 'root.json' on the client side is the verified one - self.assertTrue(filecmp.cmp( - os.path.join(self.repository_directory, 'metadata', '2.root.json'), - os.path.join(self.client_metadata_current, 'root.json'))) - - - - def test_root_rotation_unmet_new_threshold(self): - """Test that client detects a root.json version that is not signed - by a current threshold of signatures """ - repository = repo_tool.load_repository(self.repository_directory) - - # Create a new, valid root.json. - repository.root.threshold = 2 - repository.root.load_signing_key(self.role_keys['root']['private']) - repository.root.add_verification_key(self.role_keys['root2']['public']) - repository.root.load_signing_key(self.role_keys['root2']['private']) - - repository.writeall() - - # Increase the threshold and add a new verification key without - # actually loading the signing key - repository.root.threshold = 3 - repository.root.add_verification_key(self.role_keys['root3']['public']) - - # writeall fails as expected since the third signature is missing - self.assertRaises(tuf.exceptions.UnsignedMetadataError, repository.writeall) - # write an invalid '3.root.json' as partially signed - repository.write('root') - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - - # The following refresh should fail because root must be signed by the - # current self.role_keys['root3'] key, which wasn't loaded. - with self.assertRaises(tuf.exceptions.NoWorkingMirrorError) as cm: - self.repository_updater.refresh() - - for mirror_url, mirror_error in cm.exception.mirror_errors.items(): - self.assertTrue(mirror_url.endswith('/3.root.json')) - self.assertTrue(isinstance(mirror_error, - securesystemslib.exceptions.BadSignatureError)) - - # Assert that the current 'root.json' on the client side is the verified one - self.assertTrue(filecmp.cmp( - os.path.join(self.repository_directory, 'metadata', '2.root.json'), - os.path.join(self.client_metadata_current, 'root.json'))) - - - - def test_root_rotation_discard_untrusted_version(self): - """Test that client discards root.json version that failed the - signature verification """ - repository = repo_tool.load_repository(self.repository_directory) - - # Rotate the root key without signing with the previous version key 'root' - repository.root.remove_verification_key(self.role_keys['root']['public']) - repository.root.add_verification_key(self.role_keys['root2']['public']) - repository.root.load_signing_key(self.role_keys['root2']['private']) - - # 2.root.json - repository.writeall() - - # Move the staged metadata to the "live" metadata. - shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) - shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), - os.path.join(self.repository_directory, 'metadata')) - - # Refresh on the client side should fail because 2.root.json is not signed - # with a threshold of prevous keys - with self.assertRaises(tuf.exceptions.NoWorkingMirrorError) as cm: - self.repository_updater.refresh() - - for mirror_url, mirror_error in cm.exception.mirror_errors.items(): - self.assertTrue(mirror_url.endswith('/2.root.json')) - self.assertTrue(isinstance(mirror_error, - securesystemslib.exceptions.BadSignatureError)) - - # Assert that the current 'root.json' on the client side is the trusted one - # and 2.root.json is discarded - self.assertTrue(filecmp.cmp( - os.path.join(self.repository_directory, 'metadata', '1.root.json'), - os.path.join(self.client_metadata_current, 'root.json'))) - - - - -def _load_role_keys(keystore_directory): - - # Populating 'self.role_keys' by importing the required public and private - # keys of 'tuf/tests/repository_data/'. The role keys are needed when - # modifying the remote repository used by the test cases in this unit test. - - # The pre-generated key files in 'repository_data/keystore' are all encrypted - # with a 'password' passphrase. - EXPECTED_KEYFILE_PASSWORD = 'password' - - # Store and return the cryptography keys of the top-level roles, including 1 - # delegated role. - role_keys = {} - - root_key_file = os.path.join(keystore_directory, 'root_key') - root2_key_file = os.path.join(keystore_directory, 'root_key2') - root3_key_file = os.path.join(keystore_directory, 'root_key3') - targets_key_file = os.path.join(keystore_directory, 'targets_key') - snapshot_key_file = os.path.join(keystore_directory, 'snapshot_key') - timestamp_key_file = os.path.join(keystore_directory, 'timestamp_key') - delegation_key_file = os.path.join(keystore_directory, 'delegation_key') - - role_keys = {'root': {}, 'root2': {}, 'root3': {}, 'targets': {}, 'snapshot': - {}, 'timestamp': {}, 'role1': {}} - - # Import the top-level and delegated role public keys. - role_keys['root']['public'] = \ - repo_tool.import_rsa_publickey_from_file(root_key_file+'.pub') - role_keys['root2']['public'] = \ - repo_tool.import_ed25519_publickey_from_file(root2_key_file+'.pub') - role_keys['root3']['public'] = \ - repo_tool.import_ecdsa_publickey_from_file(root3_key_file+'.pub') - role_keys['targets']['public'] = \ - repo_tool.import_ed25519_publickey_from_file(targets_key_file+'.pub') - role_keys['snapshot']['public'] = \ - repo_tool.import_ed25519_publickey_from_file(snapshot_key_file+'.pub') - role_keys['timestamp']['public'] = \ - repo_tool.import_ed25519_publickey_from_file(timestamp_key_file+'.pub') - role_keys['role1']['public'] = \ - repo_tool.import_ed25519_publickey_from_file(delegation_key_file+'.pub') - - # Import the private keys of the top-level and delegated roles. - role_keys['root']['private'] = \ - repo_tool.import_rsa_privatekey_from_file(root_key_file, - EXPECTED_KEYFILE_PASSWORD) - role_keys['root2']['private'] = \ - repo_tool.import_ed25519_privatekey_from_file(root2_key_file, - EXPECTED_KEYFILE_PASSWORD) - role_keys['root3']['private'] = \ - repo_tool.import_ecdsa_privatekey_from_file(root3_key_file, - EXPECTED_KEYFILE_PASSWORD) - role_keys['targets']['private'] = \ - repo_tool.import_ed25519_privatekey_from_file(targets_key_file, - EXPECTED_KEYFILE_PASSWORD) - role_keys['snapshot']['private'] = \ - repo_tool.import_ed25519_privatekey_from_file(snapshot_key_file, - EXPECTED_KEYFILE_PASSWORD) - role_keys['timestamp']['private'] = \ - repo_tool.import_ed25519_privatekey_from_file(timestamp_key_file, - EXPECTED_KEYFILE_PASSWORD) - role_keys['role1']['private'] = \ - repo_tool.import_ed25519_privatekey_from_file(delegation_key_file, - EXPECTED_KEYFILE_PASSWORD) - - return role_keys - - -if __name__ == '__main__': - utils.configure_test_logging(sys.argv) - unittest.main() diff --git a/tests/test_utils.py b/tests/test_utils.py index 6ad06a0fbb..2fefeedbdc 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -21,13 +21,11 @@ """ import logging -import os import socket import sys import unittest from tests import utils -from tuf import unittest_toolbox logger = logging.getLogger(__name__) @@ -47,7 +45,7 @@ def can_connect(port: int) -> bool: sock.close() -class TestServerProcess(unittest_toolbox.Modified_TestCase): +class TestServerProcess(unittest.TestCase): """Test functionality provided in TestServerProcess from tests/utils.py.""" def test_simple_server_startup(self) -> None: @@ -58,50 +56,6 @@ def test_simple_server_startup(self) -> None: self.assertTrue(can_connect(server_process_handler.port)) server_process_handler.clean() - def test_simple_https_server_startup(self) -> None: - # Test normal case - good_cert_path = os.path.join("ssl_certs", "ssl_cert.crt") - server_process_handler = utils.TestServerProcess( - log=logger, - server="simple_https_server_old.py", - extra_cmd_args=[good_cert_path], - ) - - # Make sure we can connect to the server - self.assertTrue(can_connect(server_process_handler.port)) - server_process_handler.clean() - - # Test when no cert file is provided - server_process_handler = utils.TestServerProcess( - log=logger, server="simple_https_server_old.py" - ) - - # Make sure we can connect to the server - self.assertTrue(can_connect(server_process_handler.port)) - server_process_handler.clean() - - # Test with a non existing cert file. - non_existing_cert_path = os.path.join("ssl_certs", "non_existing.crt") - server_process_handler = utils.TestServerProcess( - log=logger, - server="simple_https_server_old.py", - extra_cmd_args=[non_existing_cert_path], - ) - - # Make sure we can connect to the server - self.assertTrue(can_connect(server_process_handler.port)) - server_process_handler.clean() - - def test_slow_retrieval_server_startup(self) -> None: - # Test normal case - server_process_handler = utils.TestServerProcess( - log=logger, server="slow_retrieval_server_old.py" - ) - - # Make sure we can connect to the server - self.assertTrue(can_connect(server_process_handler.port)) - server_process_handler.clean() - def test_cleanup(self) -> None: # Test normal case server_process_handler = utils.TestServerProcess( diff --git a/tests/utils.py b/tests/utils.py index e8060d6cf3..7ba7516d1e 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -35,8 +35,6 @@ from contextlib import contextmanager from typing import IO, Any, Callable, Dict, Iterator, List, Optional -import tuf.log - logger = logging.getLogger(__name__) # Used when forming URLs on the client side @@ -152,7 +150,6 @@ def configure_test_logging(argv: List[str]) -> None: loglevel = logging.DEBUG logging.basicConfig(level=loglevel) - tuf.log.set_log_level(loglevel) def cleanup_dir(path: str) -> None: diff --git a/tox.ini b/tox.ini index 6cddcc82ba..2f975bc7ec 100644 --- a/tox.ini +++ b/tox.ini @@ -45,10 +45,6 @@ commands = isort --check --diff {[testenv:lint]lint_dirs} pylint -j 0 --rcfile=pyproject.toml {[testenv:lint]lint_dirs} - # NOTE: Contrary to what the pylint docs suggest, ignoring full paths does - # work, unfortunately each subdirectory has to be ignored explicitly. - pylint -j 0 tuf --ignore=tuf/api,tuf/api/serialization,tuf/ngclient,tuf/ngclient/_internal - mypy {[testenv:lint]lint_dirs} bandit -r tuf diff --git a/tuf/ATTACKS.md b/tuf/ATTACKS.md deleted file mode 100644 index 416e164ddb..0000000000 --- a/tuf/ATTACKS.md +++ /dev/null @@ -1,323 +0,0 @@ -# Demonstrate protection against malicious updates - -## Table of Contents ## -- [Blocking Malicious Updates](#blocking-malicious-updates) - - [Arbitrary Package Attack](#arbitrary-package-attack) - - [Rollback Attack](#rollback-attack) - - [Indefinite Freeze Attack](#indefinite-freeze-attack) - - [Endless Data Attack](#endless-data-attack) - - [Compromised Key Attack](#compromised-key-attack) - - [Slow Retrieval Attack](#slow-retrieval-attack) -- [Conclusion](#conclusion) - -## Blocking Malicious Updates ## -TUF protects against a number of attacks, some of which include rollback, -arbitrary package, and mix and match attacks. We begin this document on -blocking malicious updates by demonstrating how the client rejects a target -file downloaded from the software repository that doesn't match what is listed -in TUF metadata. - -The following demonstration requires and operates on the repository created in -the [repository management -tutorial](https://github.com/theupdateframework/python-tuf/blob/develop/tuf/README.md). - -### Arbitrary Package Attack ### -In an arbitrary package attack, an attacker installs anything they want on the -client system. That is, an attacker can provide arbitrary files in response to -download requests and the files will not be detected as illegitimate. We -simulate an arbitrary package attack by creating a "malicious" target file -that our client attempts to fetch. - -```Bash -$ mv 'repository/targets/file2.txt' 'repository/targets/file2.txt.backup' -$ echo 'bad_target' > 'repository/targets/file2.txt' -``` - -We next reset our local timestamp (so that a new update is prompted), and -the target files previously downloaded by the client. -```Bash -$ rm -rf "client/targets/" "client/metadata/current/timestamp.json" -``` - -The client now performs an update and should detect the invalid target file... -Note: The following command should be executed in the "client/" directory. -```Bash -$ python3 basic_client.py --repo http://localhost:8001 -Error: No working mirror was found: - localhost:8001: BadHashError() -``` - -The log file (tuf.log) saved to the current working directory contains more -information on the update procedure and the cause of the BadHashError. - -```Bash -... - -BadHashError: Observed -hash ('f569179171c86aa9ed5e8b1d6c94dfd516123189568d239ed57d818946aaabe7') != -expected hash (u'67ee5478eaadb034ba59944eb977797b49ca6aa8d3574587f36ebcbeeb65f70e') -[2016-10-20 19:45:16,079 UTC] [tuf.client.updater] [ERROR] [_get_file:1415@updater.py] -Failed to update /file2.txt from all mirrors: {u'http://localhost:8001/targets/file2.txt': BadHashError()} -``` - -Note: The "malicious" target file should be removed and the original file2.txt -restored, otherwise the following examples will fail with BadHashError -exceptions: - -```Bash -$ mv 'repository/targets/file2.txt.backup' 'repository/targets/file2.txt' -``` - -### Indefinite Freeze Attack ### -In an indefinite freeze attack, an attacker continues to present a software -update system with the same files the client has already seen. The result is -that the client does not know that new files are available. Although the -client would be unable to prevent an attacker or compromised repository from -feeding it stale metadata, it can at least detect when an attacker is doing so -indefinitely. The signed metadata used by TUF contains an "expires" field that -indicates when metadata should no longer be trusted. - -In the following simulation, the client first tries to perform an update. - -```Bash -$ python3 basic_client.py --repo http://localhost:8001 -``` - -According to the logger (`tuf.log` file in the current working directory), -everything appears to be up-to-date. The remote server should also show that -the client retrieved only the timestamp.json file. Let's suppose now that an -attacker continues to feed our client the same stale metadata. If we were to -move the time to a future date that would cause metadata to expire, the TUF -framework should raise an exception or error to indicate that the metadata -should no longer be trusted. - -```Bash -$ sudo date -s '2080-12-25 12:34:56' -Wed Dec 25 12:34:56 EST 2080 - -$ python3 basic_client.py --repo http://localhost:8001 -Error: No working mirror was found: - u'localhost:8001': ExpiredMetadataError(u"Metadata u'root' expired on Tue Jan 1 00:00:00 2030 (UTC).",) -``` - -Note: Reset the date to continue with the rest of the attacks. - - -### Rollback Attack ### -In a rollback attack, an attacker presents a software update system with older -files than those the client has already seen, causing the client to use files -older than those the client knows about. We begin this example by saving the -current version of the Timestamp file available on the repository. This saved -file will later be served to the client to see if it is rejected. The client -should not accept versions of metadata that is older than previously trusted. - -Navigate to the directory containing the server's files and save the current -timestamp.json to a temporary location: -```Bash -$ cp repository/metadata/timestamp.json /tmp -``` - -We should next generate a new Timestamp file on the repository side. -```Bash -$ python3 ->>> from tuf.repository_tool import * ->>> repository = load_repository('repository') ->>> repository.timestamp.version -1 ->>> repository.timestamp.version = 2 ->>> repository.dirty_roles() -Dirty roles: [u'timestamp'] ->>> private_timestamp_key = import_rsa_privatekey_from_file("keystore/timestamp_key") -Enter a password for the encrypted RSA file (/path/to/keystore/timestamp_key): ->>> repository.timestamp.load_signing_key(private_timestamp_key) ->>> repository.write('timestamp') - -$ cp repository/metadata.staged/* repository/metadata -``` - -Now start the HTTP server from the directory containing the 'repository' -subdirectory. -```Bash -$ python3 -m SimpleHTTPServer 8001 -``` - -And perform an update so that the client retrieves the updated timestamp.json. -```Bash -$ python3 basic_client.py --repo http://localhost:8001 -``` - -Finally, move the previous timestamp.json file to the current live repository -and have the client try to download the outdated version. The client should -reject it! -```Bash -$ cp /tmp/timestamp.json repository/metadata/ -$ cd repository; python3 -m SimpleHTTPServer 8001 -``` - -On the client side, perform an update... -```Bash -$ python3 basic_client.py --repo http://localhost:8001 -Error: No working mirror was found: - u'localhost:8001': ReplayedMetadataError() -``` - -The tuf.log file contains more information about the ReplayedMetadataError -exception and update process. Please reset timestamp.json to the latest -version, which can be found in the 'repository/metadata.staged' subdirectory. - -```Bash -$ cp repository/metadata.staged/timestamp.json repository/metadata -``` - - -### Endless Data Attack ### -In an endless data attack, an attacker responds to a file download request with -an endless stream of data, causing harm to clients (e.g., a disk partition -filling up or memory exhaustion). In this simulated attack, we append extra -data to one of the target files available on the software repository. The -client should only download the exact number of bytes it expects for a -requested target file (according to what is listed in trusted TUF metadata). - -```Bash -$ cp repository/targets/file1.txt /tmp -$ python3 -c "print 'a' * 1000" >> repository/targets/file1.txt -``` - -Now delete the local metadata and target files on the client side so -that remote metadata and target files are downloaded again. -```Bash -$ rm -rf client/targets/ -$ rm client/metadata/current/snapshot.json* client/metadata/current/timestamp.json* -``` - -Lastly, perform an update to verify that the file1.txt is downloaded up to the -expected size, and no more. The target file available on the software -repository does contain more data than expected, though. - -```Bash -$ python3 basic_client.py --repo http://localhost:8001 -``` - -At this point, part of the "file1.txt" file should have been fetched. That is, -up to 31 bytes of it should have been downloaded, and the rest of the maliciously -appended data ignored. If we inspect the logger, we'd discover the following: - -```Bash -[2016-10-06 21:37:39,092 UTC] [tuf.download] [INFO] [_download_file:235@download.py] -Downloading: u'http://localhost:8001/targets/file1.txt' - -[2016-10-06 21:37:39,145 UTC] [tuf.download] [INFO] [_check_downloaded_length:610@download.py] -Downloaded 31 bytes out of the expected 31 bytes. - -[2016-10-06 21:37:39,145 UTC] [tuf.client.updater] [INFO] [_get_file:1372@updater.py] -Not decompressing http://localhost:8001/targets/file1.txt - -[2016-10-06 21:37:39,145 UTC] [tuf.client.updater] [INFO] [_check_hashes:778@updater.py] -The file's sha256 hash is correct: 65b8c67f51c993d898250f40aa57a317d854900b3a04895464313e48785440da -``` - -Indeed, the sha256 sum of the first 31 bytes of the "file1.txt" available -on the repository should match to what is trusted. The client did not -downloaded the appended data. - -Note: Restore file1.txt - -```Bash -$ cp /tmp/file1.txt repository/targets/ -``` - - -### Compromised Key Attack ### -An attacker who compromise less than a given threshold of keys is limited in -scope. This includes relying on a single online key (such as only being -protected by SSL) or a single offline key (such as most software update systems -use to sign files). In this example, we attempt to sign a role file with -less-than-a-threshold number of keys. A single key (suppose this is a -compromised key) is used to demonstrate that roles must be signed with the -total number of keys required for the role. In order to compromise a role, an -attacker would have to compromise a threshold of keys. This approach of -requiring a threshold number of signatures provides compromise resilience. - -Let's attempt to sign a new snapshot file with a less-than-threshold number of -keys. The client should reject the partially signed snapshot file served by -the repository (or imagine that it is a compromised software repository). - -```Bash -$ python3 ->>> from tuf.repository_tool import * ->>> repository = load_repository('repository') ->>> version = repository.root.version ->>> repository.root.version = version + 1 ->>> private_root_key = import_rsa_privatekey_from_file("keystore/root_key", password="password") ->>> repository.root.load_signing_key(private_root_key) ->>> private_root_key2 = import_rsa_privatekey_from_file("keystore/root_key2", password="password") ->>> repository.root.load_signing_key(private_root_key2) - ->>> repository.snapshot.version = 8 ->>> repository.snapshot.threshold = 2 ->>> private_snapshot_key = import_rsa_privatekey_from_file("keystore/snapshot_key", password="password") ->>> repository.snapshot.load_signing_key(private_snapshot_key) - ->>> repository.timestamp.version = 8 ->>> private_timestamp_key = import_rsa_privatekey_from_file("keystore/timestamp_key", password="password") ->>> repository.timestamp.load_signing_key(private_timestamp_key) - ->>> repository.write('root') ->>> repository.write('snapshot') ->>> repository.write('timestamp') - -$ cp repository/metadata.staged/* repository/metadata -``` - -The client now attempts to refresh the top-level metadata and the -partially written snapshot.json, which should be rejected. - -```Bash -$ python3 basic_client.py --repo http://localhost:8001 -Error: No working mirror was found: - u'localhost:8001': BadSignatureError() -``` - - -### Slow Retrieval Attack ### -In a slow retrieval attack, an attacker responds to clients with a very slow -stream of data that essentially results in the client never continuing the -update process. In this example, we simulate a slow retrieval attack by -spawning a server that serves data at a slow rate to our update client data. -TUF should not be vulnerable to this attack, and the framework should raise an -exception or error when it detects that a malicious server is serving it data -at a slow enough rate. - -We first spawn the server that slowly streams data to the client. The -'slow_retrieval_server_old.py' module (can be found in the tests/ directory of the -source code) should be copied over to the server's 'repository/' directory from -which to launch it. - -```Bash -# Before launching the slow retrieval server, copy 'slow_retrieval_server_old.py' -# to the 'repository/' directory and run it from that directory as follows: -$ python3 slow_retrieval_server_old.py 8002 mode_2 -``` - -The client may now make a request to the slow retrieval server on port 8002. -However, before doing so, we'll reduce (for the purposes of this demo) the -minimum average download rate allowed and download chunk size. Open the -'settings.py' module and set MIN_AVERAGE_DOWNLOAD_SPEED = 5 and CHUNK_SIZE = 1. -This should make it so that the client detects the slow retrieval server's -delayed streaming. - -```Bash -$ python3 basic_client.py --verbose 1 --repo http://localhost:8002 -Error: No working mirror was found: - u'localhost:8002': SlowRetrievalError() -``` - -The framework should detect the slow retrieval attack and raise a -SlowRetrievalError exception to the client application. - - -## Conclusion ## -These are just some of the attacks that TUF provides protection against. For -more attacks and updater weaknesses, please see the -[Security](https://theupdateframework.io/security/) -page. diff --git a/tuf/README-developer-tools.md b/tuf/README-developer-tools.md deleted file mode 100644 index 1b593400a5..0000000000 --- a/tuf/README-developer-tools.md +++ /dev/null @@ -1,342 +0,0 @@ -# The Update Framework Developer Tool: How to Update your Project Securely on a TUF Repository - -## Table of Contents -- [Overview](#overview) -- [Creating a Simple Project](#creating_a_simple_project) - - [Generating a Key](#generating_a_key) - - [The Project Class](#the_project_class) - - [Signing and Writing the Metadata](#signing_and_writing_the_metadata) -- [Loading an Existing Project](#loading_an_existing_project) -- [Delegations](#delegations) -- [Managing Keys](#managing_keys) -- [Managing Targets](#managing_targets) - - -## Overview -The Update Framework (TUF) is a Python-based security system for software -updates. In order to prevent your users from downloading vulnerable or malicious -code disguised as updates to your software, TUF requires that each update you -release include certain metadata verifying your authorship of the files. - -The TUF developer tools are a Python Library that enables you to create and -maintain the required metadata for files hosted on a TUF Repository. (We call -these files “targets,” to distinguish them from the metadata associated with -them. Both of these together comprise a complete “project”.) You will use these -tools to generate the keys and metadata you need to claim and secure your files -on the repository, and to update the metadata and sign it with those keys -whenever you upload a new version of those files. - -This document will teach you how to use these tools in two parts. The first -part walks through the creation of a minimal-complexity TUF project, which is -all you need to get started, and can be expanded later. The second part details -the full functionality of the tools, which offer a finer degree of control in -securing your project. - - -## Creating a Simple Project -This section walks through the creation of a small example project with just -one target. Once created, this project will be fully functional, and can be -modified as needed. - - -### Generating a Key -First, we will need to generate a key to sign the metadata. Keys are generated -in pairs: one public and the other private. The private key is -password-protected and is used to sign metadata. The public key can be shared -freely, and is used to verify signatures made by the private key. You will need -to share your public key with the repository hosting your project so they can -verify your metadata is signed by the right person. - -The generate\_and\_write\_rsa\_keypair function will create two key files named -"path/to/key.pub", which is the public key and "path/to/key", which -is the private key. - -``` ->>> from tuf.developer_tool import * ->>> generate_and_write_rsa_keypair_with_prompt(filepath="path/to/key") -enter password to encrypt private key file 'path/to/key' -(leave empty if key should not be encrypted): -Confirm: ->>> -``` - -We can also use the bits parameter to set a different key length (the default -is 3072). We can also `generate_and_write_rsa_keypair` with a `password` -parameter if a prompt is not desired. - -In this example we will be using rsa keys, but ed25519 keys are also supported. - -Now we have a key for our project, we can proceed to create our project. - - -### The Project Class -The TUF developer tool is built around the Project class, which is used to -organize groups of targets associated with a single set of metadata. A single -Project instance is used to keep track of all the target files and metadata -files in one project. The Project also keeps track of the keys and signatures, -so that it can update all the metadata with the correct changes and signatures -on a single command. - -Before creating a project, you must know where it will be located in the TUF -Repository. In the following example, we will create a project to be hosted as -"repo/unclaimed/example_project" within the repository, and store a local copy -of the metadata at "path/to/metadata". The project will comprise a single -target file, "local/path/to/example\_project/target\_1" locally, and we will -secure it with the key generated above. - -First, we must import the generated keys. We can do that by issuing the -following command: - -``` ->>> public_key = import_rsa_publickey_from_file("path/to/keys.pub") -``` - -After importing the key, we can generate a new project with the following -command: - -``` ->>> project = create_new_project(project_name="example_project", -... metadata_directory="local/path/to/metadata/", -... targets_directory="local/path/to/example_project", -... location_in_repository="repo/unclaimed", key=public_key) -``` - -Let's list the arguments and make sense out of this rather long function call: - -- create a project named example_project: the name of the metadata file will match this name -- the metadata will be located in "local/path/to/metadata", this means all of the generated files -for this project will be located here -- the targets are located in local/path/to/example project. If your targets are located in some other -place, you can point the targets directory there. Files must reside under the path local/path/to/example_project or else it won't be possible to add them. -- location\_in\_repository points to repo/unclaimed, this will be prepended to the paths in the generated metadata so the signatures all match. - -Now the project is in memory and we can do different operations on it such as -adding and removing targets, delegating files, changing signatures and keys, -etc. For the moment we are interested in adding our one and only target inside -the project. - -To add a target, we issue the following method: - -``` ->>> project.add_target("local/path/to/example_project/target_1") -``` - -Note that the file "target\_1" should be located in -"local/path/to/example\_project", or this method will throw an -error. - -At this point, the metadata is not valid. We have assigned a key to the -project, but we have not *signed* it with that key. Signing is the process of -generating a signature with our private key so it can be verified with the -public key by the server (upon uploading) and by the clients (when updating). - - -### Signing and Writing the Metadata ### -In order to sign the metadata, we need to import the private key corresponding -to the public key we added to the project. One the key is loaded to the project, -it will automatically be used to sign the metadata whenever it is written. - -``` ->>> private_key = import_rsa_privatekey_from_file("path/to/key") -Enter password for the RSA key: ->>> project.load_signing_key(private_key) ->>> project.write() -``` - -When all changes to the project have been written, the metadata is ready to be -uploaded to the repository, and it is safe to exit the Python interpreter, or -to delete the Project instance. - -The project can be loaded later to update changes to the project. The metadata -contains checksums that have to match the actual files or else it won't be -accepted by the upstream repository. - -At this point, if you have followed all the steps in this document so far -(substituting appropriate names and filepaths) you will have created a basic -TUF project, which can be expanded as needed. The simplest way to get your -project secured is to add all your files using add\_target() (or see [Managing -Keys](#managing_keys) on how to add whole directories). If your project has -several contributors, you may want to consider adding -[delegations](#delegations) to your project. - - -## Loading an Existing Project -To make changes to existing metadata, we will need the Project again. We can -restore it with the load_project() function. - -``` ->>> from tuf.developer_tool import * ->>> project = load_project("local/path/to/metadata") -``` -Each time the project is loaded anew, the necessary private keys must also be -loaded in order to sign metadata. - -``` ->>> private_key = import_rsa_privatekey_from_file("path/to/key") -Enter a password for the RSA key: ->>> project.load_signing_key(private_key) ->>> project.write() -``` - -If your project does not use any delegations, the five commands above are all -you need to update your project's metadata. - - -## Delegations - -The project we created above is secured entirely by one key. If you want to -allow someone else to update part of your project independently, you will need -to delegate a new role for them. For example, we can do the following: - -``` ->>> other_key = import_rsa_publickey_from_file(“another_public_key.pub”) ->>> targets = ['local/path/to/newtarget'] ->>> project.delegate(“newrole”, [other_key], targets) -``` - -The new role is now an attribute of the Project instance, and contains the same -methods as Project. For example, we can add targets in the same way as before: - -``` ->>> project(“newrole”).add_target(“delegated_1”) -``` - -Recall that we input the other person’s key as part of a list. That list can -contain any number of public keys. We can also add keys to the role after -creating it using the [add\_verification\_key()](#adding_a_key_to_a_delegation) -method. - -### Delegated Paths - -By default, a delegated role is permitted to add and modify targets anywhere in -the Project's targets directory. We can delegate trust of paths to a role to -limit this permission. - -``` ->>> project.add_paths(["delegated/filepath"], "newrole") -``` - -This will prevent the delegated role from signing targets whose local filepaths -do not begin with "delegated/filepath". We can delegate several filepaths to a -role by adding them to the list in the first parameter, or by invoking the -method again. A role with multiple delegated paths can add targets to any of -them. - -Note that this method is invoked from the parent role (in this case, the Project) -and takes the delegated role name as an argument. - -### Nested Delegations - -It is possible for a delegated role to have delegations of its own. We can do -this by calling delegate() on a delegated role: - -``` ->>> project("newrole").delegate(“nestedrole”, [key], targets) -``` - -Nested delegations function no differently than first-order delegations. to -demonstrate, adding a target to nested delegation looks like this: - -``` ->>> project("newrole")("nestedrole").add_target("foo") -``` - -### Revoking Delegations -Delegations can be revoked, removing the delegated role from the project. - -``` ->>> project.revoke("newrole") -``` - - -## Managing Keys -This section describes the key-related functions and parameters not covered in -the [Creating a Simple Project](#creating_a_simple_project) section. - -### Additional Parameters for Key Generation -When generating keys, it is possible to specify the length of the key in bits -and its password as parameters: - -``` ->>> generate_and_write_rsa_keypair(password="pw", filepath="path/to/key", bits=2048) -``` -The bits parameter defaults to 3072, and values below 2048 will raise an error. -The password parameter is only intended to be used in scripts. - - -### Adding a Key to a Delegation -New verifications keys can be added to an existing delegation using -add\_verification\_key(): - -``` ->>> project("rolename").add_verification_key(pubkey) -``` - -A delegation can have several verification keys at once. By default, a -delegated role with multiple keys can be written using any one of their -corresponding signing keys. To modify this behavior, you can change the -delegated role's [threshold](#delegation_thrsholds). - -### Removing a Key from a Delegation -Verification keys can also be removed, like this: - -``` ->>> project("rolename").remove_verification_key(pubkey) -``` - -Remember that a project can only have one key, so this method will return an -error if there is already a key assigned to it. In order to replace a key we -must first delete the existing one and then add the new one. It is possible to -omit the key parameter in the create\_new\_project() function, and add the key -later. - -### Changing the Project Key -Each Project instance can only have one verification key. This key can be -replaced by removing it and adding a new key, in that order. - -``` ->>> project.remove_verification_key(oldkey) ->>> project.add_verification_key(new) -``` - - -### Delegation Thresholds - -Every delegated role has a threshold, which determines how many of its signing -keys need to be loaded to write the role. The threshold defaults to 1, and -should not exceed the number of verification keys assigned to the role. The -threshold can be accessed as a property of a delegated role. - -``` ->>> project("rolename").threshold = 2 -``` - -The above line will set the "rolename" role's threshold to 2. - - -## Managing Targets -There are supporting functions of the targets library to make the project -maintenance easier. These functions are described in this section. - -### Adding Targets by Directory -This function is especially useful when creating a new project to add all the -files contained in the targets directory. The following code block illustrates -the usage of this function: - -``` ->>> list_of_targets = \ -... project.get_filepaths_in_directory(“path/within/targets/folder”, -... recursive_walk=False, follow_links=False) ->>> project.add_targets(list_of_targets) -``` - -### Deleting Targets from a Project -It is possible that we want to delete existing targets inside our project. To -stop the developer tool from tracking this file we can issue the following -command: - -``` ->>> project.remove_target(“target_1”) -``` - -Now the target file won't be part of the metadata. diff --git a/tuf/README.md b/tuf/README.md deleted file mode 100644 index dbc53b61b5..0000000000 --- a/tuf/README.md +++ /dev/null @@ -1,5 +0,0 @@ -[Quickstart](../docs/QUICKSTART.md) - -[CLI](../docs/CLI.md) - -[Tutorial](../docs/TUTORIAL.md) diff --git a/tuf/client/README.md b/tuf/client/README.md deleted file mode 100644 index 29b838bc4d..0000000000 --- a/tuf/client/README.md +++ /dev/null @@ -1,151 +0,0 @@ -# updater.py -**updater.py** is intended as the only TUF module that software update -systems need to utilize for a low-level integration. It provides a single -class representing an updater that includes methods to download, install, and -verify metadata or target files in a secure manner. Importing -**tuf.client.updater** and instantiating its main class is all that is -required by the client prior to a TUF update request. The importation and -instantiation steps allow TUF to load all of the required metadata files -and set the repository mirror information. - -The **tuf.repository_tool** module can be used to create a TUF repository. See -[tuf/README](../README.md) for more information on creating TUF repositories. - - -## Overview of the Update Process - -1. The software update system instructs TUF to check for updates. - -2. TUF downloads and verifies timestamp.json. - -3. If timestamp.json indicates that snapshot.json has changed, TUF downloads and -verifies snapshot.json. - -4. TUF determines which metadata files listed in snapshot.json differ from those -described in the last snapshot.json that TUF has seen. If root.json has changed, -the update process starts over using the new root.json. - -5. TUF provides the software update system with a list of available files -according to targets.json. - -6. The software update system instructs TUF to download a specific target -file. - -7. TUF downloads and verifies the file and then makes the file available to -the software update system. - - -If at any point in the above procedure there is a problem (i.e., if unexpired, -signed, valid metadata cannot be retrieved from the repository), the Root file -is downloaded and the process is retried once more (and only once to avoid an -infinite loop). Optionally, the software update system using the framework -can decide how to proceed rather than automatically downloading a new Root file. - - -## Example Client -### Refresh TUF Metadata -```Python -# The client first imports the 'updater.py' module, the only module the -# client is required to import. The client will utilize a single class -# from this module. -import tuf.client.updater -import tuf.settings - -# The only other module the client interacts with is 'settings'. The -# client accesses this module solely to set the repository directory. -# This directory will hold the files downloaded from a remote repository. -tuf.settings.repositories_directory = 'path/to/local_repository' - -# Next, the client creates a dictionary object containing the repository -# mirrors. The client may download content from any one of these mirrors. -# In the example below, a single mirror named 'mirror1' is defined. The -# mirror is located at 'http://localhost:8001', and all of the metadata -# and targets files can be found in the 'metadata' and 'targets' directory, -# respectively. If the client wishes to only download target files from -# specific directories on the mirror, the 'confined_target_dirs' field -# should be set. In this example, the client hasn't set confined_target_dirs, -# which is interpreted as no confinement. In other words, the client can download -# targets from any directory or subdirectories. If the client had chosen -# 'targets1/', they would have been confined to the '/targets/targets1/' -# directory on the 'http://localhost:8001' mirror. -repository_mirrors = {'mirror1': {'url_prefix': 'http://localhost:8001', - 'metadata_path': 'metadata', - 'targets_path': 'targets'}} - -# The updater may now be instantiated. The Updater class of 'updater.py' -# is called with two arguments. The first argument assigns a name to this -# particular updater and the second argument the repository mirrors defined -# above. -updater = tuf.client.updater.Updater('updater', repository_mirrors) - -# The client calls the refresh() method to ensure it has the latest -# copies of the top-level metadata files (i.e., Root, Targets, Snapshot, -# Timestamp). -updater.refresh() -``` - - -### Download Specific Target File -```Python -# Example demonstrating an update that downloads a specific target. - -# Refresh the metadata of the top-level roles (i.e., Root, Targets, Snapshot, Timestamp). -updater.refresh() - -# get_one_valid_targetinfo() updates role metadata when required. In other -# words, if the client doesn't possess the metadata that lists 'LICENSE.txt', -# get_one_valid_targetinfo() will try to fetch / update it. -target = updater.get_one_valid_targetinfo('LICENSE.txt') -updated_target = updater.updated_targets([target], destination_directory) - -for target in updated_target: - updater.download_target(target, destination_directory) - # Client code here may also reference target information (including 'custom') - # by directly accessing the dictionary entries of the target. The 'custom' - # entry is additional file information explicitly set by the remote repository. - target_path = target['filepath'] - target_length = target['fileinfo']['length'] - target_hashes = target['fileinfo']['hashes'] - target_custom_data = target['fileinfo']['custom'] - - # Remove any files from the destination directory that are no longer being - # tracked. For example, a target file from a previous snapshot that has since - # been removed on the remote repository. - updater.remove_obsolete_targets(destination_directory) -``` - -### A Simple Integration Example with client.py -``` Bash -# Assume a simple TUF repository has been setup with 'repo.py'. -$ client.py --repo http://localhost:8001 - -# Metadata and target files are silently updated. An exception is only raised if an error, -# or attack, is detected. Inspect 'tuf.log' for the outcome of the update process. - -$ cat tuf.log -[2013-12-16 16:17:05,267 UTC] [tuf.download] [INFO][_download_file:726@download.py] -Downloading: http://localhost:8001/metadata/timestamp.json - -[2013-12-16 16:17:05,269 UTC] [tuf.download] [WARNING][_check_content_length:589@download.py] -reported_length (545) < required_length (2048) - -[2013-12-16 16:17:05,269 UTC] [tuf.download] [WARNING][_check_downloaded_length:656@download.py] -Downloaded 545 bytes, but expected 2048 bytes. There is a difference of 1503 bytes! - -[2013-12-16 16:17:05,611 UTC] [tuf.download] [INFO][_download_file:726@download.py] -Downloading: http://localhost:8001/metadata/snapshot.json - -[2013-12-16 16:17:05,612 UTC] [tuf.client.updater] [INFO][_check_hashes:636@updater.py] -The file's sha256 hash is correct: 782675fadd650eeb2926d33c401b5896caacf4fd6766498baf2bce2f3b739db4 - -[2013-12-16 16:17:05,951 UTC] [tuf.download] [INFO][_download_file:726@download.py] -Downloading: http://localhost:8001/metadata/targets.json - -[2013-12-16 16:17:05,952 UTC] [tuf.client.updater] [INFO][_check_hashes:636@updater.py] -The file's sha256 hash is correct: a5019c28a1595c43a14cad2b6252c4d1db472dd6412a9204181ad6d61b1dd69a - -[2013-12-16 16:17:06,299 UTC] [tuf.download] [INFO][_download_file:726@download.py] -Downloading: http://localhost:8001/targets/file1.txt - -[2013-12-16 16:17:06,303 UTC] [tuf.client.updater] [INFO][_check_hashes:636@updater.py] -The file's sha256 hash is correct: ecdc5536f73bdae8816f0ea40726ef5e9b810d914493075903bb90623d97b1d8 diff --git a/tuf/client/__init__.py b/tuf/client/__init__.py deleted file mode 100755 index e69de29bb2..0000000000 diff --git a/tuf/client/fetcher.py b/tuf/client/fetcher.py deleted file mode 100644 index 8768bdd4b9..0000000000 --- a/tuf/client/fetcher.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2021, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -"""Provides an interface for network IO abstraction. -""" - -# Imports -import abc - -# Classes -class FetcherInterface(): - """Defines an interface for abstract network download. - - By providing a concrete implementation of the abstract interface, - users of the framework can plug-in their preferred/customized - network stack. - """ - - __metaclass__ = abc.ABCMeta - - @abc.abstractmethod - def fetch(self, url, required_length): - """Fetches the contents of HTTP/HTTPS url from a remote server. - - Ensures the length of the downloaded data is up to 'required_length'. - - Arguments: - url: A URL string that represents a file location. - required_length: An integer value representing the file length in bytes. - - Raises: - tuf.exceptions.SlowRetrievalError: A timeout occurs while receiving data. - tuf.exceptions.FetcherHTTPError: An HTTP error code is received. - - Returns: - A bytes iterator - """ - raise NotImplementedError # pragma: no cover diff --git a/tuf/client/updater.py b/tuf/client/updater.py deleted file mode 100755 index 9d08e4d020..0000000000 --- a/tuf/client/updater.py +++ /dev/null @@ -1,3071 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - updater.py - - - Geremy Condra - Vladimir Diaz - - - July 2012. Based on a previous version of this module. (VLAD) - - - See LICENSE-MIT OR LICENSE for licensing information. - - - 'updater.py' is intended to be the only TUF module that software update - systems need to utilize. It provides a single class representing an - updater that includes methods to download, install, and verify - metadata/target files in a secure manner. Importing 'updater.py' and - instantiating its main class is all that is required by the client prior - to a TUF update request. The importation and instantiation steps allow - TUF to load all of the required metadata files and set the repository mirror - information. - - An overview of the update process: - - 1. The software update system instructs TUF to check for updates. - - 2. TUF downloads and verifies timestamp.json. - - 3. If timestamp.json indicates that snapshot.json has changed, TUF downloads - and verifies snapshot.json. - - 4. TUF determines which metadata files listed in snapshot.json differ from - those described in the last snapshot.json that TUF has seen. If root.json - has changed, the update process starts over using the new root.json. - - 5. TUF provides the software update system with a list of available files - according to targets.json. - - 6. The software update system instructs TUF to download a specific target - file. - - 7. TUF downloads and verifies the file and then makes the file available to - the software update system. - - - - # The client first imports the 'updater.py' module, the only module the - # client is required to import. The client will utilize a single class - # from this module. - from tuf.client.updater import Updater - - # The only other module the client interacts with is 'tuf.settings'. The - # client accesses this module solely to set the repository directory. - # This directory will hold the files downloaded from a remote repository. - from tuf import settings - settings.repositories_directory = 'local-repository' - - # Next, the client creates a dictionary object containing the repository - # mirrors. The client may download content from any one of these mirrors. - # In the example below, a single mirror named 'mirror1' is defined. The - # mirror is located at 'http://localhost:8001', and all of the metadata - # and targets files can be found in the 'metadata' and 'targets' directory, - # respectively. If the client wishes to only download target files from - # specific directories on the mirror, the 'confined_target_dirs' field - # should be set. In this example, the client hasn't set confined_target_dirs, - # which is interpreted as no confinement. - # In other words, the client can download - # targets from any directory or subdirectories. If the client had chosen - # 'targets1/', they would have been confined to the '/targets/targets1/' - # directory on the 'http://localhost:8001' mirror. - repository_mirrors = {'mirror1': {'url_prefix': 'http://localhost:8001', - 'metadata_path': 'metadata', - 'targets_path': 'targets'}} - - # The updater may now be instantiated. The Updater class of 'updater.py' - # is called with two arguments. The first argument assigns a name to this - # particular updater and the second argument the repository mirrors defined - # above. - updater = Updater('updater', repository_mirrors) - - # The client next calls the refresh() method to ensure it has the latest - # copies of the metadata files. - updater.refresh() - - # get_one_valid_targetinfo() updates role metadata when required. In other - # words, if the client doesn't possess the metadata that lists 'LICENSE.txt', - # get_one_valid_targetinfo() will try to fetch / update it. - target = updater.get_one_valid_targetinfo('LICENSE.txt') - - # Determine if 'target' has changed since the client's last refresh(). A - # target is considered updated if it does not exist in - # 'destination_directory' (current directory) or the target located there has - # changed. - destination_directory = '.' - updated_target = updater.updated_targets([target], destination_directory) - - for target in updated_target: - updater.download_target(target, destination_directory) - # Client code here may also reference target information (including - # 'custom') by directly accessing the dictionary entries of the target. - # The 'custom' entry is additional file information explicitly set by the - # remote repository. - target_path = target['filepath'] - target_length = target['fileinfo']['length'] - target_hashes = target['fileinfo']['hashes'] - target_custom_data = target['fileinfo']['custom'] -""" - -import errno -import logging -import os -import shutil -import time -import fnmatch -import copy -import warnings -import io -from urllib import parse - -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import hash as sslib_hash -from securesystemslib import keys as sslib_keys -from securesystemslib import util as sslib_util - -import tuf -from tuf import download -from tuf import exceptions -from tuf import formats -from tuf import keydb -from tuf import log # pylint: disable=unused-import -from tuf import mirrors -from tuf import roledb -from tuf import settings -from tuf import sig -from tuf import requests_fetcher - -# The Timestamp role does not have signed metadata about it; otherwise we -# would need an infinite regress of metadata. Therefore, we use some -# default, but sane, upper file length for its metadata. -DEFAULT_TIMESTAMP_UPPERLENGTH = settings.DEFAULT_TIMESTAMP_REQUIRED_LENGTH - -# The Root role may be updated without knowing its version number if -# top-level metadata cannot be safely downloaded (e.g., keys may have been -# revoked, thus requiring a new Root file that includes the updated keys) -# and 'unsafely_update_root_if_necessary' is True. -# We use some default, but sane, upper file length for its metadata. -DEFAULT_ROOT_UPPERLENGTH = settings.DEFAULT_ROOT_REQUIRED_LENGTH - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - - -class MultiRepoUpdater(object): - """ - - Provide a way for clients to request a target file from multiple - repositories. Which repositories to query is determined by the map - file (i.e,. map.json). - - See TAP 4 for more information on the map file and how to request updates - from multiple repositories. TAP 4 describes how users may specify that a - particular threshold of repositories be used for some targets, while a - different threshold of repositories be used for others. - - - map_file: - The path of the map file. The map file is needed to determine which - repositories to query given a target file. - - - securesystemslib.exceptions.FormatError, if the map file is improperly - formatted. - - tuf.exceptions.Error, if the map file cannot be loaded. - - - None. - - - None. - """ - - def __init__(self, map_file): - # Is 'map_file' a path? If not, raise - # 'securesystemslib.exceptions.FormatError'. The actual content of the map - # file is validated later on in this method. - sslib_formats.PATH_SCHEMA.check_match(map_file) - - # A dictionary mapping repositories to TUF updaters. - self.repository_names_to_updaters = {} - - try: - # The map file dictionary that associates targets with repositories. - self.map_file = sslib_util.load_json_file(map_file) - - except (sslib_exceptions.Error) as e: - raise exceptions.Error('Cannot load the map file: ' + str(e)) - - # Raise securesystemslib.exceptions.FormatError if the map file is - # improperly formatted. - formats.MAPFILE_SCHEMA.check_match(self.map_file) - - # Save the "repositories" entry of the map file, with the following - # example format: - # - # "repositories": { - # "Django": ["https://djangoproject.com/"], - # "PyPI": ["https://pypi.python.org/"] - # } - self.repository_names_to_mirrors = self.map_file['repositories'] - - - - def get_valid_targetinfo(self, target_filename, match_custom_field=True): - """ - - Get valid targetinfo, if any, for the given 'target_filename'. The map - file controls the targetinfo returned (see TAP 4). Return a dict of the - form {updater1: targetinfo, updater2: targetinfo, ...}, where the dict - keys are updater objects, and the dict values the matching targetinfo for - 'target_filename'. - - - target_filename: - The relative path of the target file to update. - - match_custom_field: - Boolean that indicates whether the optional custom field in targetinfo - should match across the targetinfo provided by the threshold of - repositories. - - - tuf.exceptions.FormatError, if the argument is improperly formatted. - - tuf.exceptions.Error, if the required local metadata directory or the - Root file does not exist. - - tuf.exceptions.UnknownTargetError, if the repositories in the map file do - not agree on the target, or none of them have signed for the target. - - - None. - - - A dict of the form: {updater1: targetinfo, updater2: targetinfo, ...}. - The targetinfo (conformant with tuf.formats.TARGETINFO_SCHEMA) is for - 'target_filename'. - """ - - # Is the argument properly formatted? If not, raise - # 'tuf.exceptions.FormatError'. - formats.RELPATH_SCHEMA.check_match(target_filename) - - # TAP 4 requires that the following attributes be present in mappings: - # "paths", "repositories", "terminating", and "threshold". - formats.MAPPING_SCHEMA.check_match(self.map_file['mapping']) - - # Set the top-level directory containing the metadata for each repository. - repositories_directory = settings.repositories_directory - - # Verify that the required local directories exist for each repository. - self._verify_metadata_directories(repositories_directory) - - # Iterate mappings. - # [{"paths": [], "repositories": [], "terminating": Boolean, "threshold": - # NUM}, ...] - for mapping in self.map_file['mapping']: - - logger.debug('Interrogating mappings..' + repr(mapping)) - if not self._target_matches_path_pattern( - target_filename, mapping['paths']): - # The mapping is irrelevant to the target file. Try the next one, if - # any. - continue - - # The mapping is relevant to the target... - else: - # Do the repositories in the mapping provide a threshold of matching - # targetinfo? - valid_targetinfo = self._matching_targetinfo(target_filename, - mapping, match_custom_field) - - if valid_targetinfo: - return valid_targetinfo - - else: - # If we are here, it means either (1) the mapping is irrelevant to - # the target, (2) the targets were missing from all repositories in - # this mapping, or (3) the targets on all repositories did not match. - # Whatever the case may be, are we allowed to continue to the next - # mapping? Let's check the terminating entry! - if not mapping['terminating']: - logger.debug('The mapping was irrelevant to the target, and' - ' "terminating" was set to False. Trying the next mapping...') - continue - - else: - raise exceptions.UnknownTargetError('The repositories in the' - ' mapping do not agree on the target, or none of them have' - ' signed for the target, and "terminating" was set to True.') - - # If we are here, it means either there were no mappings, or none of the - # mappings provided the target. - logger.debug('Did not find valid targetinfo for ' + repr(target_filename)) - raise exceptions.UnknownTargetError('The repositories in the map' - ' file do not agree on the target, or none of them have signed' - ' for the target.') - - - - - - def _verify_metadata_directories(self, repositories_directory): - # Iterate 'self.repository_names_to_mirrors' and verify that the expected - # local files and directories exist. TAP 4 requires a separate local - # directory for each repository. - for repository_name in self.repository_names_to_mirrors: - - logger.debug('Interrogating repository: ' + repr(repository_name)) - # Each repository must cache its metadata in a separate location. - repository_directory = os.path.join(repositories_directory, - repository_name) - - if not os.path.isdir(repository_directory): - raise exceptions.Error('The metadata directory' - ' for ' + repr(repository_name) + ' must exist' - ' at ' + repr(repository_directory)) - - else: - logger.debug('Found local directory for ' + repr(repository_name)) - - # The latest known root metadata file must also exist on disk. - root_file = os.path.join( - repository_directory, 'metadata', 'current', 'root.json') - - if not os.path.isfile(root_file): - raise exceptions.Error( - 'The Root file must exist at ' + repr(root_file)) - - else: - logger.debug('Found local Root file at ' + repr(root_file)) - - - - - - def _matching_targetinfo( - self, target_filename, mapping, match_custom_field=True): - valid_targetinfo = {} - - # Retrieve the targetinfo from each repository using the underlying - # Updater() instance. - for repository_name in mapping['repositories']: - logger.debug('Retrieving targetinfo for ' + repr(target_filename) + - ' from repository...') - - try: - targetinfo, updater = self._update_from_repository( - repository_name, target_filename) - - except (exceptions.UnknownTargetError, exceptions.Error): - continue - - valid_targetinfo[updater] = targetinfo - - matching_targetinfo = {} - logger.debug('Verifying that a threshold of targetinfo are equal...') - - # Iterate 'valid_targetinfo', looking for a threshold number of matches - # for 'targetinfo'. The first targetinfo to reach the required threshold - # is returned. For example, suppose the following list of targetinfo and - # a threshold of 2: - # [A, B, C, B, A, C] - # In this case, targetinfo B is returned. - for valid_updater, compared_targetinfo in valid_targetinfo.items(): - - if not self._targetinfo_match( - targetinfo, compared_targetinfo, match_custom_field): - continue - - else: - - matching_targetinfo[valid_updater] = targetinfo - - if not len(matching_targetinfo) >= mapping['threshold']: - continue - - else: - logger.debug('Found a threshold of matching targetinfo!') - # We now have a targetinfo (that matches across a threshold of - # repositories as instructed by the map file), along with the - # updaters that sign for it. - logger.debug( - 'Returning updaters for targetinfo: ' + repr(targetinfo)) - - return matching_targetinfo - - return None - - - - - - def _targetinfo_match(self, targetinfo1, targetinfo2, match_custom_field=True): - if match_custom_field: - return (targetinfo1 == targetinfo2) - - else: - targetinfo1_without_custom = copy.deepcopy(targetinfo1) - targetinfo2_without_custom = copy.deepcopy(targetinfo2) - targetinfo1_without_custom['fileinfo'].pop('custom', None) - targetinfo2_without_custom['fileinfo'].pop('custom', None) - - return (targetinfo1_without_custom == targetinfo2_without_custom) - - - - - - def _target_matches_path_pattern(self, target_filename, path_patterns): - for path_pattern in path_patterns: - logger.debug('Interrogating pattern ' + repr(path_pattern) + 'for' - ' target: ' + repr(target_filename)) - - # Example: "foo.tgz" should match with "/*.tgz". Make sure to strip any - # leading path separators so that a match is made if a repo maintainer - # uses a leading separator with a delegated glob pattern, but a client - # doesn't include one when a target file is requested. - if fnmatch.fnmatch(target_filename.lstrip(os.sep), path_pattern.lstrip(os.sep)): - logger.debug('Found a match for ' + repr(target_filename)) - return True - - else: - logger.debug('Continue searching for relevant paths.') - continue - - # If we are here, then none of the paths are relevant to the target. - logger.debug('None of the paths are relevant.') - return False - - - - - - - def get_updater(self, repository_name): - """ - - Get the updater instance corresponding to 'repository_name'. - - - repository_name: - The name of the repository as it appears in the map file. For example, - "Django" and "PyPI" in the "repositories" entry of the map file. - - "repositories": { - "Django": ["https://djangoproject.com/"], - "PyPI": ["https://pypi.python.org/"] - } - - - tuf.exceptions.FormatError, if any of the arguments are improperly - formatted. - - - None. - - - Returns the Updater() instance for 'repository_name'. If the instance - does not exist, return None. - """ - - # Are the arguments properly formatted? If not, raise - # 'tuf.exceptions.FormatError'. - formats.NAME_SCHEMA.check_match(repository_name) - - updater = self.repository_names_to_updaters.get(repository_name) - - if not updater: - - if repository_name not in self.repository_names_to_mirrors: - return None - - else: - # Create repository mirrors object needed by the - # tuf.client.updater.Updater(). Each 'repository_name' can have more - # than one mirror. - repo_mirrors = {} - - for url in self.repository_names_to_mirrors[repository_name]: - repo_mirrors[url] = { - 'url_prefix': url, - 'metadata_path': 'metadata', - 'targets_path': 'targets'} - - try: - # NOTE: State (e.g., keys) should NOT be shared across different - # updater instances. - logger.debug('Adding updater for ' + repr(repository_name)) - updater = Updater(repository_name, repo_mirrors) - - except Exception: - return None - - else: - self.repository_names_to_updaters[repository_name] = updater - - else: - logger.debug('Found an updater for ' + repr(repository_name)) - - # Ensure the updater's metadata is the latest before returning it. - updater.refresh() - return updater - - - - - - def _update_from_repository(self, repository_name, target_filename): - - updater = self.get_updater(repository_name) - - if not updater: - raise exceptions.Error( - 'Cannot load updater for ' + repr(repository_name)) - - else: - # Get one valid target info from the Updater object. - # 'tuf.exceptions.UnknownTargetError' raised by get_one_valid_targetinfo - # if a valid target cannot be found. - return updater.get_one_valid_targetinfo(target_filename), updater - - - - - -class Updater(object): - """ - - Provide a class that can download target files securely. The updater - keeps track of currently and previously trusted metadata, target files - available to the client, target file attributes such as file size and - hashes, key and role information, metadata signatures, and the ability - to determine when the download of a file should be permitted. - - - self.metadata: - Dictionary holding the currently and previously trusted metadata. - - Example: {'current': {'root': ROOT_SCHEMA, - 'targets':TARGETS_SCHEMA, ...}, - 'previous': {'root': ROOT_SCHEMA, - 'targets':TARGETS_SCHEMA, ...}} - - self.metadata_directory: - The directory where trusted metadata is stored. - - self.versioninfo: - A cache of version numbers for the roles available on the repository. - - Example: {'targets.json': {'version': 128}, ...} - - self.mirrors: - The repository mirrors from which metadata and targets are available. - Conformant to 'tuf.formats.MIRRORDICT_SCHEMA'. - - self.repository_name: - The name of the updater instance. - - - refresh(): - This method downloads, verifies, and loads metadata for the top-level - roles in a specific order (i.e., root -> timestamp -> snapshot -> targets) - The expiration time for downloaded metadata is also verified. - - The metadata for delegated roles are not refreshed by this method, but by - the method that returns targetinfo (i.e., get_one_valid_targetinfo()). - The refresh() method should be called by the client before any target - requests. - - get_one_valid_targetinfo(file_path): - Returns the target information for a specific file identified by its file - path. This target method also downloads the metadata of updated targets. - - updated_targets(targets, destination_directory): - After the client has retrieved the target information for those targets - they are interested in updating, they would call this method to determine - which targets have changed from those saved locally on disk. All the - targets that have changed are returns in a list. From this list, they - can request a download by calling 'download_target()'. - - download_target(target, destination_directory): - This method performs the actual download of the specified target. The - file is saved to the 'destination_directory' argument. - - remove_obsolete_targets(destination_directory): - Any files located in 'destination_directory' that were previously - served by the repository but have since been removed, can be deleted - from disk by the client by calling this method. - - Note: The methods listed above are public and intended for the software - updater integrating TUF with this module. All other methods that may begin - with a single leading underscore are non-public and only used internally. - updater.py is not subclassed in TUF, nor is it designed to be subclassed, - so double leading underscores is not used. - http://www.python.org/dev/peps/pep-0008/#method-names-and-instance-variables - """ - - def __init__(self, repository_name, repository_mirrors, fetcher=None): - """ - - Constructor. Instantiating an updater object causes all the metadata - files for the top-level roles to be read from disk, including the key and - role information for the delegated targets of 'targets'. The actual - metadata for delegated roles is not loaded in __init__. The metadata for - these delegated roles, including nested delegated roles, are loaded, - updated, and saved to the 'self.metadata' store, as needed, by - get_one_valid_targetinfo(). - - The initial set of metadata files are provided by the software update - system utilizing TUF. - - In order to use an updater, the following directories must already - exist locally: - - {tuf.settings.repositories_directory}/{repository_name}/metadata/current - {tuf.settings.repositories_directory}/{repository_name}/metadata/previous - - and, at a minimum, the root metadata file must exist: - - {tuf.settings.repositories_directory}/{repository_name}/metadata/current/root.json - - - repository_name: - The name of the repository. - - repository_mirrors: - A dictionary holding repository mirror information, conformant to - 'tuf.formats.MIRRORDICT_SCHEMA'. This dictionary holds - information such as the directory containing the metadata and target - files, the server's URL prefix, and the target content directories the - client should be confined to. - - repository_mirrors = {'mirror1': {'url_prefix': 'http://localhost:8001', - 'metadata_path': 'metadata', - 'targets_path': 'targets', - 'confined_target_dirs': ['']}} - - fetcher: - A concrete 'FetcherInterface' implementation. Performs the network - related download operations. If an external implementation is not - provided, tuf.fetcher.RequestsFetcher is used. - - - securesystemslib.exceptions.FormatError: - If the arguments are improperly formatted. - - tuf.exceptions.RepositoryError: - If there is an error with the updater's repository files, such - as a missing 'root.json' file. - - - Th metadata files (e.g., 'root.json', 'targets.json') for the top- level - roles are read from disk and stored in dictionaries. In addition, the - key and roledb modules are populated with 'repository_name' entries. - - - None. - """ - - # Do the arguments have the correct format? - # These checks ensure the arguments have the appropriate - # number of objects and object types and that all dict - # keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mistmatch. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - formats.MIRRORDICT_SCHEMA.check_match(repository_mirrors) - - # Save the validated arguments. - self.repository_name = repository_name - self.mirrors = repository_mirrors - - # Initialize Updater with an externally provided 'fetcher' implementing - # the network download. By default tuf.fetcher.RequestsFetcher is used. - if fetcher is None: - self.fetcher = requests_fetcher.RequestsFetcher() - else: - self.fetcher = fetcher - - # Store the trusted metadata read from disk. - self.metadata = {} - - # Store the currently trusted/verified metadata. - self.metadata['current'] = {} - - # Store the previously trusted/verified metadata. - self.metadata['previous'] = {} - - # Store the version numbers of roles available on the repository. The dict - # keys are paths, and the dict values versioninfo data. This information - # can help determine whether a metadata file has changed and needs to be - # re-downloaded. - self.versioninfo = {} - - # Store the file information of the root and snapshot roles. The dict keys - # are paths, the dict values fileinfo data. This information can help - # determine whether a metadata file has changed and so needs to be - # re-downloaded. - self.fileinfo = {} - - # Store the location of the client's metadata directory. - self.metadata_directory = {} - - # Store the 'consistent_snapshot' of the Root role. This setting - # determines if metadata and target files downloaded from remote - # repositories include the digest. - self.consistent_snapshot = False - - # Ensure the repository metadata directory has been set. - if settings.repositories_directory is None: - raise exceptions.RepositoryError('The TUF update client' - ' module must specify the directory containing the local repository' - ' files. "tuf.settings.repositories_directory" MUST be set.') - - # Set the path for the current set of metadata files. - repositories_directory = settings.repositories_directory - repository_directory = os.path.join(repositories_directory, self.repository_name) - - # raise MissingLocalRepository if the repo does not exist at all. - if not os.path.exists(repository_directory): - raise exceptions.MissingLocalRepositoryError('Local repository ' + - repr(repository_directory) + ' does not exist.') - - current_path = os.path.join(repository_directory, 'metadata', 'current') - - # Ensure the current path is valid/exists before saving it. - if not os.path.exists(current_path): - raise exceptions.RepositoryError('Missing' - ' ' + repr(current_path) + '. This path must exist and, at a minimum,' - ' contain the Root metadata file.') - - self.metadata_directory['current'] = current_path - - # Set the path for the previous set of metadata files. - previous_path = os.path.join(repository_directory, 'metadata', 'previous') - - # Ensure the previous path is valid/exists. - if not os.path.exists(previous_path): - raise exceptions.RepositoryError('Missing ' + repr(previous_path) + '.' - ' This path MUST exist.') - - self.metadata_directory['previous'] = previous_path - - # Load current and previous metadata. - for metadata_set in ['current', 'previous']: - for metadata_role in roledb.TOP_LEVEL_ROLES: - self._load_metadata_from_file(metadata_set, metadata_role) - - # Raise an exception if the repository is missing the required 'root' - # metadata. - if 'root' not in self.metadata['current']: - raise exceptions.RepositoryError('No root of trust!' - ' Could not find the "root.json" file.') - - - - - - def __str__(self): - """ - The string representation of an Updater object. - """ - - return self.repository_name - - - @staticmethod - def _get_local_filename(rolename: str) -> str: - """Return safe local filename for roles metadata - - Use URL encoding to prevent issues with path separators and - with forbidden characters in Windows filesystems""" - return parse.quote(rolename, '') + '.json' - - - def _load_metadata_from_file(self, metadata_set, metadata_role): - """ - - Non-public method that loads current or previous metadata if there is a - local file. If the expected file belonging to 'metadata_role' (e.g., - 'root.json') cannot be loaded, raise an exception. The extracted metadata - object loaded from file is saved to the metadata store (i.e., - self.metadata). - - - metadata_set: - The string 'current' or 'previous', depending on whether one wants to - load the currently or previously trusted metadata file. - - metadata_role: - The name of the metadata. This is a role name and should - not end in '.json'. Examples: 'root', 'targets', 'unclaimed'. - - - securesystemslib.exceptions.FormatError: - If the role object loaded for 'metadata_role' is improperly formatted. - - securesystemslib.exceptions.Error: - If there was an error importing a delegated role of 'metadata_role' - or the 'metadata_set' is not one currently supported. - - - If the metadata is loaded successfully, it is saved to the metadata - store. If 'metadata_role' is 'root', the role and key databases - are reloaded. If 'metadata_role' is a target metadata, all its - delegated roles are refreshed. - - - None. - """ - - # Ensure we have a valid metadata set. - if metadata_set not in ['current', 'previous']: - raise sslib_exceptions.Error( - 'Invalid metadata set: ' + repr(metadata_set)) - - # Save and construct the full metadata path. - metadata_directory = self.metadata_directory[metadata_set] - metadata_filename = self._get_local_filename(metadata_role) - metadata_filepath = os.path.join(metadata_directory, metadata_filename) - - # Ensure the metadata path is valid/exists, else ignore the call. - if os.path.exists(metadata_filepath): - # Load the file. The loaded object should conform to - # 'tuf.formats.SIGNABLE_SCHEMA'. - try: - metadata_signable = sslib_util.load_json_file( - metadata_filepath) - - # Although the metadata file may exist locally, it may not - # be a valid json file. On the next refresh cycle, it will be - # updated as required. If Root if cannot be loaded from disk - # successfully, an exception should be raised by the caller. - except sslib_exceptions.Error: - return - - formats.check_signable_object_format(metadata_signable) - - # Extract the 'signed' role object from 'metadata_signable'. - metadata_object = metadata_signable['signed'] - - # Save the metadata object to the metadata store. - self.metadata[metadata_set][metadata_role] = metadata_object - - # If 'metadata_role' is 'root' or targets metadata, the key and role - # databases must be rebuilt. If 'root', ensure self.consistent_snaptshots - # is updated. - if metadata_set == 'current': - if metadata_role == 'root': - self._rebuild_key_and_role_db() - self.consistent_snapshot = metadata_object['consistent_snapshot'] - - elif metadata_object['_type'] == 'targets': - # TODO: Should we also remove the keys of the delegated roles? - self._import_delegations(metadata_role) - - - - - - def _rebuild_key_and_role_db(self): - """ - - Non-public method that rebuilds the key and role databases from the - currently trusted 'root' metadata object extracted from 'root.json'. - This private method is called when a new/updated 'root' metadata file is - loaded or when updater.refresh() is called. This method will only store - the role information of the top-level roles (i.e., 'root', 'targets', - 'snapshot', 'timestamp'). - - - None. - - - securesystemslib.exceptions.FormatError: - If the 'root' metadata is improperly formatted. - - securesystemslib.exceptions.Error: - If there is an error loading a role contained in the 'root' - metadata. - - - The key and role databases are reloaded for the top-level roles. - - - None. - """ - - # Clobbering this means all delegated metadata files are rendered outdated - # and will need to be reloaded. However, reloading the delegated metadata - # files is avoided here because fetching target information with - # get_one_valid_targetinfo() always causes a refresh of these files. The - # metadata files for delegated roles are also not loaded when the - # repository is first instantiated. Due to this setup, reloading delegated - # roles is not required here. - keydb.create_keydb_from_root_metadata(self.metadata['current']['root'], - self.repository_name) - - roledb.create_roledb_from_root_metadata(self.metadata['current']['root'], - self.repository_name) - - - - - - def _import_delegations(self, parent_role): - """ - - Non-public method that imports all the roles delegated by 'parent_role'. - - - parent_role: - The role whose delegations will be imported. - - - securesystemslib.exceptions.FormatError: - If a key attribute of a delegated role's signing key is - improperly formatted. - - securesystemslib.exceptions.Error: - If the signing key of a delegated role cannot not be loaded. - - - The key and role databases are modified to include the newly loaded roles - delegated by 'parent_role'. - - - None. - """ - - current_parent_metadata = self.metadata['current'][parent_role] - - if 'delegations' not in current_parent_metadata: - return - - # This could be quite slow with a large number of delegations. - keys_info = current_parent_metadata['delegations'].get('keys', {}) - roles_info = current_parent_metadata['delegations'].get('roles', []) - - logger.debug('Adding roles delegated from ' + repr(parent_role) + '.') - - # Iterate the keys of the delegated roles of 'parent_role' and load them. - for keyid, keyinfo in keys_info.items(): - if keyinfo['keytype'] in ['rsa', 'ed25519', 'ecdsa', 'ecdsa-sha2-nistp256']: - - # We specify the keyid to ensure that it's the correct keyid - # for the key. - try: - key, _ = sslib_keys.format_metadata_to_key(keyinfo, keyid) - - keydb.add_key(key, repository_name=self.repository_name) - - except exceptions.KeyAlreadyExistsError: - pass - - except (sslib_exceptions.FormatError, sslib_exceptions.Error): - logger.warning('Invalid key: ' + repr(keyid) + '. Aborting role ' + - 'delegation for parent role \'' + parent_role + '\'.') - raise - - else: - logger.warning('Invalid key type for ' + repr(keyid) + '.') - continue - - # Add the roles to the role database. - for roleinfo in roles_info: - try: - # NOTE: roledb.add_role will take care of the case where rolename - # is None. - rolename = roleinfo.get('name') - logger.debug('Adding delegated role: ' + str(rolename) + '.') - roledb.add_role(rolename, roleinfo, self.repository_name) - - except exceptions.RoleAlreadyExistsError: - logger.warning('Role already exists: ' + rolename) - - except Exception: - logger.warning('Failed to add delegated role: ' + repr(rolename) + '.') - raise - - - - - - def refresh(self, unsafely_update_root_if_necessary=True): - """ - - Update the latest copies of the metadata for the top-level roles. The - update request process follows a specific order to ensure the metadata - files are securely updated: - root (if necessary) -> timestamp -> snapshot -> targets. - - Delegated metadata is not refreshed by this method. After this method is - called, the use of get_one_valid_targetinfo() will update delegated - metadata, when required. Calling refresh() ensures that top-level - metadata is up-to-date, so that the target methods can refer to the - latest available content. Thus, refresh() should always be called by the - client before any requests of target file information. - - The expiration time for downloaded metadata is also verified, including - local metadata that the repository claims is up to date. - - If the refresh fails for any reason, then unless - 'unsafely_update_root_if_necessary' is set, refresh will be retried once - after first attempting to update the root metadata file. Only after this - check will the exceptions listed here potentially be raised. - - - unsafely_update_root_if_necessary: - Boolean that indicates whether to unsafely update the Root metadata if - any of the top-level metadata cannot be downloaded successfully. The - Root role is unsafely updated if its current version number is unknown. - - - tuf.exceptions.NoWorkingMirrorError: - If the metadata for any of the top-level roles cannot be updated. - - tuf.exceptions.ExpiredMetadataError: - If any of the top-level metadata is expired and no new version was - found. - - - Updates the metadata files of the top-level roles with the latest - information. - - - None. - """ - - # Do the arguments have the correct format? - # This check ensures the arguments have the appropriate - # number of objects and object types, and that all dict - # keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fail. - sslib_formats.BOOLEAN_SCHEMA.check_match( - unsafely_update_root_if_necessary) - - # Update the top-level metadata. The _update_metadata_if_changed() and - # _update_metadata() calls below do NOT perform an update if there - # is insufficient trusted signatures for the specified metadata. - # Raise 'tuf.exceptions.NoWorkingMirrorError' if an update fails. - root_metadata = self.metadata['current']['root'] - - try: - self._ensure_not_expired(root_metadata, 'root') - - except exceptions.ExpiredMetadataError: - # Raise 'tuf.exceptions.NoWorkingMirrorError' if a valid (not - # expired, properly signed, and valid metadata) 'root.json' cannot be - # installed. - if unsafely_update_root_if_necessary: - logger.info('Expired Root metadata was loaded from disk.' - ' Try to update it now.' ) - - # The caller explicitly requested not to unsafely fetch an expired Root. - else: - logger.info('An expired Root metadata was loaded and must be updated.') - raise - - # Update the root metadata and verify it by building a chain of trusted root - # keys from the current trusted root metadata file - self._update_root_metadata(root_metadata) - - # Ensure that the role and key information of the top-level roles is the - # latest. We do this whether or not Root needed to be updated, in order to - # ensure that, e.g., the entries in roledb for top-level roles are - # populated with expected keyid info so that roles can be validated. In - # certain circumstances, top-level metadata might be missing because it was - # marked obsolete and deleted after a failed attempt, and thus we should - # refresh them here as a protective measure. See Issue #736. - self._rebuild_key_and_role_db() - self.consistent_snapshot = \ - self.metadata['current']['root']['consistent_snapshot'] - - # Use default but sane information for timestamp metadata, and do not - # require strict checks on its required length. - self._update_metadata('timestamp', DEFAULT_TIMESTAMP_UPPERLENGTH) - - self._update_metadata_if_changed('snapshot', - referenced_metadata='timestamp') - self._update_metadata_if_changed('targets') - - - - def _update_root_metadata(self, current_root_metadata): - """ - - The root file must be signed by the current root threshold and keys as - well as the previous root threshold and keys. The update process for root - files means that each intermediate root file must be downloaded, to build - a chain of trusted root keys from keys already trusted by the client: - - 1.root -> 2.root -> 3.root - - 3.root must be signed by the threshold and keys of 2.root, and 2.root - must be signed by the threshold and keys of 1.root. - - - current_root_metadata: - The currently held version of root. - - - Updates the root metadata files with the latest information. - - - None. - """ - - def neither_403_nor_404(mirror_error): - if isinstance(mirror_error, tuf.exceptions.FetcherHTTPError): - if mirror_error.status_code in {403, 404}: - return False - return True - - # Temporarily set consistent snapshot. Will be updated to whatever is set - # in the latest root.json after running through the intermediates with - # _update_metadata(). - self.consistent_snapshot = True - - # Following the spec, try downloading the N+1th root for a certain maximum - # number of times. - lower_bound = current_root_metadata['version'] + 1 - upper_bound = lower_bound + settings.MAX_NUMBER_ROOT_ROTATIONS - - # Try downloading the next root. - for next_version in range(lower_bound, upper_bound): - try: - # Thoroughly verify it. - self._update_metadata('root', DEFAULT_ROOT_UPPERLENGTH, - version=next_version) - # When we run into HTTP 403/404 error from ALL mirrors, break out of - # loop, because the next root metadata file is most likely missing. - except exceptions.NoWorkingMirrorError as exception: - for mirror_error in exception.mirror_errors.values(): - # Otherwise, reraise the error, because it is not a simple HTTP - # error. - if neither_403_nor_404(mirror_error): - logger.info('Misc error for root version ' + str(next_version)) - raise - else: - logger.debug('HTTP error for root version ' + str(next_version)) - # If we are here, then we ran into only 403 / 404 errors, which are - # good reasons to suspect that the next root metadata file does not - # exist. - break - - # Ensure that the role and key information of the top-level roles is the - # latest. We do this whether or not Root needed to be updated, in order - # to ensure that, e.g., the entries in roledb for top-level roles are - # populated with expected keyid info so that roles can be validated. In - # certain circumstances, top-level metadata might be missing because it - # was marked obsolete and deleted after a failed attempt, and thus we - # should refresh them here as a protective measure. See Issue #736. - self._rebuild_key_and_role_db() - - # Set our consistent snapshot property to what the latest root has said. - self.consistent_snapshot = \ - self.metadata['current']['root']['consistent_snapshot'] - - - - def _check_hashes(self, file_object, trusted_hashes): - """ - - Non-public method that verifies multiple secure hashes of 'file_object'. - - - file_object: - A file object. - - trusted_hashes: - A dictionary with hash-algorithm names as keys and hashes as dict values. - The hashes should be in the hexdigest format. Should be Conformant to - 'securesystemslib.formats.HASHDICT_SCHEMA'. - - - securesystemslib.exceptions.BadHashError, if the hashes don't match. - - - Hash digest object is created using the 'securesystemslib.hash' module. - Position within file_object is changed. - - - None. - """ - - # Verify each hash, raise an exception if any hash fails to verify - for algorithm, trusted_hash in trusted_hashes.items(): - digest_object = sslib_hash.digest_fileobject(file_object, - algorithm) - computed_hash = digest_object.hexdigest() - - if trusted_hash != computed_hash: - raise sslib_exceptions.BadHashError(trusted_hash, - computed_hash) - - else: - logger.info('Verified ' + algorithm + ' hash: ' + trusted_hash) - - - - - - def _check_file_length(self, file_object, trusted_file_length): - """ - - Non-public method that ensures the length of 'file_object' is strictly - equal to 'trusted_file_length'. This is a deliberately redundant - implementation designed to complement - download._check_downloaded_length(). - - - file_object: - A file object. - - trusted_file_length: - A non-negative integer that is the trusted length of the file. - - - tuf.exceptions.DownloadLengthMismatchError, if the lengths do not match. - - - Reads the contents of 'file_object' and logs a message if 'file_object' - matches the trusted length. - Position within file_object is changed. - - - None. - """ - - file_object.seek(0, io.SEEK_END) - observed_length = file_object.tell() - - # Return and log a message if the length 'file_object' is equal to - # 'trusted_file_length', otherwise raise an exception. A hard check - # ensures that a downloaded file strictly matches a known, or trusted, - # file length. - if observed_length != trusted_file_length: - raise exceptions.DownloadLengthMismatchError(trusted_file_length, - observed_length) - - else: - logger.debug('Observed length (' + str(observed_length) +\ - ') == trusted length (' + str(trusted_file_length) + ')') - - - - - - def _get_target_file(self, target_filepath, file_length, file_hashes, - prefix_filename_with_hash): - """ - - Non-public method that safely (i.e., the file length and hash are - strictly equal to the trusted) downloads a target file up to a certain - length, and checks its hashes thereafter. - - - target_filepath: - The target filepath (relative to the repository targets directory) - obtained from TUF targets metadata. - - file_length: - The expected compressed length of the target file. If the file is not - compressed, then it will simply be its uncompressed length. - - file_hashes: - The expected hashes of the target file. - - prefix_filename_with_hash: - Whether to prefix the targets file names with their hash when using - consistent snapshot. - This should be set to False when the served target filenames are not - prefixed with hashes (in this case the server uses other means - to ensure snapshot consistency). - - - tuf.exceptions.NoWorkingMirrorError: - The target could not be fetched. This is raised only when all known - mirrors failed to provide a valid copy of the desired target file. - - - The target file is downloaded from all known repository mirrors in the - worst case. If a valid copy of the target file is found, it is stored in - a temporary file and returned. - - - A file object containing the target. - """ - - if self.consistent_snapshot and prefix_filename_with_hash: - # Note: values() does not return a list in Python 3. Use list() - # on values() for Python 2+3 compatibility. - target_digest = list(file_hashes.values()).pop() - dirname, basename = os.path.split(target_filepath) - target_filepath = os.path.join(dirname, target_digest + '.' + basename) - - file_mirrors = mirrors.get_list_of_mirrors('target', target_filepath, - self.mirrors) - - # file_mirror (URL): error (Exception) - file_mirror_errors = {} - file_object = None - - for file_mirror in file_mirrors: - try: - file_object = download.safe_download(file_mirror, - file_length, self.fetcher) - - # Verify 'file_object' against the expected length and hashes. - self._check_file_length(file_object, file_length) - self._check_hashes(file_object, file_hashes) - # If the file verifies, we don't need to try more mirrors - return file_object - - except Exception as exception: - # Remember the error from this mirror, close tempfile if one was opened - logger.debug('Update failed from ' + file_mirror + '.') - file_mirror_errors[file_mirror] = exception - if file_object is not None: - file_object.close() - file_object = None - - logger.debug('Failed to update ' + repr(target_filepath) + ' from' - ' all mirrors: ' + repr(file_mirror_errors)) - raise exceptions.NoWorkingMirrorError(file_mirror_errors) - - - - - - def _verify_root_self_signed(self, signable): - """ - Verify the root metadata in signable is signed by a threshold of keys, - where the threshold and valid keys are defined by itself - """ - threshold = signable['signed']['roles']['root']['threshold'] - keyids = signable['signed']['roles']['root']['keyids'] - keys = signable['signed']['keys'] - signatures = signable['signatures'] - signed = sslib_formats.encode_canonical( - signable['signed']).encode('utf-8') - verified_sig_keyids = set() - - for signature in signatures: - keyid = signature['keyid'] - - # At this point we are verifying that the root metadata is signed by a - # threshold of keys listed in the current root role, therefore skip - # keys with a keyid that is not listed in the current root role. - if keyid not in keyids: - continue - - key = keys[keyid] - # The ANYKEY_SCHEMA check in verify_signature expects the keydict to - # include a keyid - key['keyid'] = keyid - valid_sig = sslib_keys.verify_signature(key, signature, signed) - - if valid_sig: - verified_sig_keyids.add(keyid) - - if len(verified_sig_keyids) >= threshold: - return True - return False - - - - - - def _verify_metadata_file(self, metadata_file_object, - metadata_role): - """ - - Non-public method that verifies a metadata file. An exception is - raised if 'metadata_file_object is invalid. There is no - return value. - - - metadata_file_object: - A file object containing the metadata file. - - metadata_role: - The role name of the metadata (e.g., 'root', 'targets', - 'unclaimed'). - - - securesystemslib.exceptions.FormatError: - In case the metadata file is valid JSON, but not valid TUF metadata. - - tuf.exceptions.InvalidMetadataJSONError: - In case the metadata file is not valid JSON. - - tuf.exceptions.ReplayedMetadataError: - In case the downloaded metadata file is older than the current one. - - tuf.exceptions.RepositoryError: - In case the repository is somehow inconsistent; e.g. a parent has not - delegated to a child (contrary to expectations). - - tuf.SignatureError: - In case the metadata file does not have a valid signature. - - - The content of 'metadata_file_object' is read and loaded, the current - position within the file is changed. - - - None. - """ - - metadata_file_object.seek(0) - metadata = metadata_file_object.read().decode('utf-8') - - try: - metadata_signable = sslib_util.load_json_string(metadata) - - except Exception as exception: - raise exceptions.InvalidMetadataJSONError(exception) - - else: - # Ensure the loaded 'metadata_signable' is properly formatted. Raise - # 'securesystemslib.exceptions.FormatError' if not. - formats.check_signable_object_format(metadata_signable) - - # Is 'metadata_signable' expired? - self._ensure_not_expired(metadata_signable['signed'], metadata_role) - - # We previously verified version numbers in this function, but have since - # moved version number verification to the functions that retrieve - # metadata. - - # Verify the signature on the downloaded metadata object. - valid = sig.verify(metadata_signable, metadata_role, - self.repository_name) - - if not valid: - raise sslib_exceptions.BadSignatureError(metadata_role) - - # For root metadata, verify the downloaded root metadata object with the - # new threshold of new signatures contained within the downloaded root - # metadata object - # NOTE: we perform the checks on root metadata here because this enables - # us to perform the check before the tempfile is persisted. Furthermore, - # by checking here we can easily perform the check for each download - # mirror. Whereas if we check after _verify_metadata_file we may be - # persisting invalid files and we cannot try copies of the file from other - # mirrors. - if valid and metadata_role == 'root': - valid = self._verify_root_self_signed(metadata_signable) - if not valid: - raise sslib_exceptions.BadSignatureError(metadata_role) - - - - - - def _get_metadata_file(self, metadata_role, remote_filename, - upperbound_filelength, expected_version): - """ - - Non-public method that tries downloading, up to a certain length, a - metadata file from a list of known mirrors. As soon as the first valid - copy of the file is found, the downloaded file is returned and the - remaining mirrors are skipped. - - - metadata_role: - The role name of the metadata (e.g., 'root', 'targets', 'unclaimed'). - - remote_filename: - The relative file path (on the remove repository) of 'metadata_role'. - - upperbound_filelength: - The expected length, or upper bound, of the metadata file to be - downloaded. - - expected_version: - The expected and required version number of the 'metadata_role' file - downloaded. 'expected_version' is an integer. - - - tuf.exceptions.NoWorkingMirrorError: - The metadata could not be fetched. This is raised only when all known - mirrors failed to provide a valid copy of the desired metadata file. - - - The file is downloaded from all known repository mirrors in the worst - case. If a valid copy of the file is found, it is stored in a temporary - file and returned. - - - A file object containing the metadata. - """ - - file_mirrors = mirrors.get_list_of_mirrors('meta', remote_filename, - self.mirrors) - - # file_mirror (URL): error (Exception) - file_mirror_errors = {} - file_object = None - - for file_mirror in file_mirrors: - try: - file_object = download.unsafe_download(file_mirror, - upperbound_filelength, self.fetcher) - file_object.seek(0) - - # Verify 'file_object' according to the callable function. - # 'file_object' is also verified if decompressed above (i.e., the - # uncompressed version). - metadata_signable = \ - sslib_util.load_json_string(file_object.read().decode('utf-8')) - - # Determine if the specification version number is supported. It is - # assumed that "spec_version" is in (major.minor.fix) format, (for - # example: "1.4.3") and that releases with the same major version - # number maintain backwards compatibility. Consequently, if the major - # version number of new metadata equals our expected major version - # number, the new metadata is safe to parse. - try: - metadata_spec_version = metadata_signable['signed']['spec_version'] - metadata_spec_version_split = metadata_spec_version.split('.') - metadata_spec_major_version = int(metadata_spec_version_split[0]) - metadata_spec_minor_version = int(metadata_spec_version_split[1]) - - code_spec_version_split = tuf.SPECIFICATION_VERSION.split('.') - code_spec_major_version = int(code_spec_version_split[0]) - code_spec_minor_version = int(code_spec_version_split[1]) - - if metadata_spec_major_version != code_spec_major_version: - raise exceptions.UnsupportedSpecificationError( - 'Downloaded metadata that specifies an unsupported ' - 'spec_version. This code supports major version number: ' + - repr(code_spec_major_version) + '; however, the obtained ' - 'metadata lists version number: ' + str(metadata_spec_version)) - - #report to user if minor versions do not match, continue with update - if metadata_spec_minor_version != code_spec_minor_version: - logger.info("Downloaded metadata that specifies a different minor " + - "spec_version. This code has version " + - str(tuf.SPECIFICATION_VERSION) + - " and the metadata lists version number " + - str(metadata_spec_version) + - ". The update will continue as the major versions match.") - - except (ValueError, TypeError) as error: - raise sslib_exceptions.FormatError('Improperly' - ' formatted spec_version, which must be in major.minor.fix format') from error - - # If the version number is unspecified, ensure that the version number - # downloaded is greater than the currently trusted version number for - # 'metadata_role'. - version_downloaded = metadata_signable['signed']['version'] - - if expected_version is not None: - # Verify that the downloaded version matches the version expected by - # the caller. - if version_downloaded != expected_version: - raise exceptions.BadVersionNumberError('Downloaded' - ' version number: ' + repr(version_downloaded) + '. Version' - ' number MUST be: ' + repr(expected_version)) - - # The caller does not know which version to download. Verify that the - # downloaded version is at least greater than the one locally - # available. - else: - # Verify that the version number of the locally stored - # 'timestamp.json', if available, is less than what was downloaded. - # Otherwise, accept the new timestamp with version number - # 'version_downloaded'. - - try: - current_version = \ - self.metadata['current'][metadata_role]['version'] - - if version_downloaded < current_version: - raise exceptions.ReplayedMetadataError(metadata_role, - version_downloaded, current_version) - - except KeyError: - logger.info(metadata_role + ' not available locally.') - - self._verify_metadata_file(file_object, metadata_role) - - except Exception as exception: - # Remember the error from this mirror, and "reset" the target file. - logger.debug('Update failed from ' + file_mirror + '.') - file_mirror_errors[file_mirror] = exception - if file_object: - file_object.close() - file_object = None - - else: - break - - if file_object: - return file_object - - else: - logger.debug('Failed to update ' + repr(remote_filename) + ' from all' - ' mirrors: ' + repr(file_mirror_errors)) - raise exceptions.NoWorkingMirrorError(file_mirror_errors) - - - - - - def _update_metadata(self, metadata_role, upperbound_filelength, version=None): - """ - - Non-public method that downloads, verifies, and 'installs' the metadata - belonging to 'metadata_role'. Calling this method implies that the - 'metadata_role' on the repository is newer than the client's, and thus - needs to be re-downloaded. The current and previous metadata stores are - updated if the newly downloaded metadata is successfully downloaded and - verified. This method also assumes that the store of top-level metadata - is the latest and exists. - - - metadata_role: - The name of the metadata. This is a role name and should not end - in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'. - - upperbound_filelength: - The expected length, or upper bound, of the metadata file to be - downloaded. - - version: - The expected and required version number of the 'metadata_role' file - downloaded. 'expected_version' is an integer. - - - tuf.exceptions.NoWorkingMirrorError: - The metadata cannot be updated. This is not specific to a single - failure but rather indicates that all possible ways to update the - metadata have been tried and failed. - - - The metadata file belonging to 'metadata_role' is downloaded from a - repository mirror. If the metadata is valid, it is stored in the - metadata store. - - - None. - """ - - # Attempt a file download from each mirror until the file is downloaded and - # verified. If the signature of the downloaded file is valid, proceed, - # otherwise log a warning and try the next mirror. 'metadata_file_object' - # is the file-like object returned by 'download.py'. 'metadata_signable' - # is the object extracted from 'metadata_file_object'. Metadata saved to - # files are regarded as 'signable' objects, conformant to - # 'tuf.formats.SIGNABLE_SCHEMA'. - # - # Some metadata (presently timestamp) will be downloaded "unsafely", in the - # sense that we can only estimate its true length and know nothing about - # its version. This is because not all metadata will have other metadata - # for it; otherwise we will have an infinite regress of metadata signing - # for each other. In this case, we will download the metadata up to the - # best length we can get for it, not request a specific version, but - # perform the rest of the checks (e.g., signature verification). - - # Construct the metadata filename as expected by the download/mirror - # modules. Local filename is quoted to protect against names like"../file". - - remote_filename = metadata_role + '.json' - local_filename = self._get_local_filename(metadata_role) - filename_version = '' - - if self.consistent_snapshot and version: - filename_version = version - dirname, basename = os.path.split(remote_filename) - remote_filename = os.path.join( - dirname, str(filename_version) + '.' + basename) - - metadata_file_object = \ - self._get_metadata_file(metadata_role, remote_filename, - upperbound_filelength, version) - - # The metadata has been verified. Move the metadata file into place. - # First, move the 'current' metadata file to the 'previous' directory - # if it exists. - current_filepath = os.path.join(self.metadata_directory['current'], - local_filename) - current_filepath = os.path.abspath(current_filepath) - sslib_util.ensure_parent_dir(current_filepath) - - previous_filepath = os.path.join(self.metadata_directory['previous'], - local_filename) - previous_filepath = os.path.abspath(previous_filepath) - - if os.path.exists(current_filepath): - # Previous metadata might not exist, say when delegations are added. - sslib_util.ensure_parent_dir(previous_filepath) - shutil.move(current_filepath, previous_filepath) - - # Next, move the verified updated metadata file to the 'current' directory. - metadata_file_object.seek(0) - metadata_signable = \ - sslib_util.load_json_string(metadata_file_object.read().decode('utf-8')) - - sslib_util.persist_temp_file(metadata_file_object, current_filepath) - - # Extract the metadata object so we can store it to the metadata store. - # 'current_metadata_object' set to 'None' if there is not an object - # stored for 'metadata_role'. - updated_metadata_object = metadata_signable['signed'] - current_metadata_object = self.metadata['current'].get(metadata_role) - - # Finally, update the metadata and fileinfo stores, and rebuild the - # key and role info for the top-level roles if 'metadata_role' is root. - # Rebuilding the key and role info is required if the newly-installed - # root metadata has revoked keys or updated any top-level role information. - logger.debug('Updated ' + repr(current_filepath) + '.') - self.metadata['previous'][metadata_role] = current_metadata_object - self.metadata['current'][metadata_role] = updated_metadata_object - self._update_versioninfo(remote_filename) - - - - - - def _update_metadata_if_changed(self, metadata_role, - referenced_metadata='snapshot'): - """ - - Non-public method that updates the metadata for 'metadata_role' if it has - changed. All top-level roles other than the 'timestamp' and 'root' - roles are updated by this method. The 'timestamp' role is always - downloaded from a mirror without first checking if it has been updated; - it is updated in refresh() by calling _update_metadata('timestamp'). - The 'root' role is always updated first and verified based on the trusted - root metadata file the client already has a copy of; it is updated in - refresh() by calling _update_root_metadata(). - This method is also called for delegated role metadata, which are - referenced by 'snapshot'. - - If the metadata needs to be updated but an update cannot be obtained, - this method will delete the file. - - Due to the way in which metadata files are updated, it is expected that - 'referenced_metadata' is not out of date and trusted. The refresh() - method updates the top-level roles in 'root -> timestamp -> snapshot -> - targets' order. For delegated metadata, the parent role is - updated before the delegated role. Taking into account that - 'referenced_metadata' is updated and verified before 'metadata_role', - this method determines if 'metadata_role' has changed by checking - the 'meta' field of the newly updated 'referenced_metadata'. - - - metadata_role: - The name of the metadata. This is a role name and should not end - in '.json'. Examples: 'root', 'targets', 'unclaimed'. - - referenced_metadata: - This is the metadata that provides the role information for - 'metadata_role'. For the top-level roles, the 'snapshot' role - is the referenced metadata for the 'root', and 'targets' roles. - The 'timestamp' metadata is always downloaded regardless. In - other words, it is updated by calling _update_metadata('timestamp') - and not by this method. The referenced metadata for 'snapshot' - is 'timestamp'. See refresh(). - - - tuf.exceptions.ExpiredMetadataError: - If local metadata is expired and newer metadata is not available. - - tuf.exceptions.NoWorkingMirrorError: - If 'metadata_role' could not be downloaded after determining that it - had changed. - - tuf.exceptions.RepositoryError: - If the referenced metadata is missing. - - - If it is determined that 'metadata_role' has been updated, the metadata - store (i.e., self.metadata) is updated with the new metadata and the - affected stores modified (i.e., the previous metadata store is updated). - If the metadata is 'targets' or a delegated targets role, the role - database is updated with the new information, including its delegated - roles. - - - None. - """ - - metadata_filename = metadata_role + '.json' - expected_versioninfo = None - - # Ensure the referenced metadata has been loaded. The 'root' role may be - # updated without having 'snapshot' available. - if referenced_metadata not in self.metadata['current']: - raise exceptions.RepositoryError('Cannot update' - ' ' + repr(metadata_role) + ' because ' + referenced_metadata + ' is' - ' missing.') - - # The referenced metadata has been loaded. Extract the new versioninfo for - # 'metadata_role' from it. - else: - logger.debug(repr(metadata_role) + ' referenced in ' + - repr(referenced_metadata)+ '. ' + repr(metadata_role) + - ' may be updated.') - - # Simply return if the metadata for 'metadata_role' has not been updated, - # according to the uncompressed metadata provided by the referenced - # metadata. The metadata is considered updated if its version number is - # strictly greater than its currently trusted version number. - expected_versioninfo = self.metadata['current'][referenced_metadata] \ - ['meta'][metadata_filename] - - if not self._versioninfo_has_been_updated(metadata_filename, - expected_versioninfo): - logger.info(repr(metadata_filename) + ' up-to-date.') - - # Since we have not downloaded a new version of this metadata, we should - # check to see if our local version is stale and notify the user if so. - # This raises tuf.exceptions.ExpiredMetadataError if the metadata we have - # is expired. Resolves issue #322. - self._ensure_not_expired(self.metadata['current'][metadata_role], - metadata_role) - - # TODO: If metadata role is snapshot, we should verify that snapshot's - # hash matches what's listed in timestamp.json per step 3.1 of the - # detailed workflows in the specification - - return - - logger.debug('Metadata ' + repr(metadata_filename) + ' has changed.') - - # The file lengths of metadata are unknown, only their version numbers are - # known. Set an upper limit for the length of the downloaded file for each - # expected role. Note: The Timestamp role is not updated via this - # function. - if metadata_role == 'snapshot': - upperbound_filelength = settings.DEFAULT_SNAPSHOT_REQUIRED_LENGTH - - # The metadata is considered Targets (or delegated Targets metadata). - else: - upperbound_filelength = settings.DEFAULT_TARGETS_REQUIRED_LENGTH - - try: - self._update_metadata(metadata_role, upperbound_filelength, - expected_versioninfo['version']) - - except Exception: - # The current metadata we have is not current but we couldn't get new - # metadata. We shouldn't use the old metadata anymore. This will get rid - # of in-memory knowledge of the role and delegated roles, but will leave - # delegated metadata files as current files on disk. - # - # TODO: Should we get rid of the delegated metadata files? We shouldn't - # need to, but we need to check the trust implications of the current - # implementation. - self._delete_metadata(metadata_role) - logger.warning('Metadata for ' + repr(metadata_role) + ' cannot' - ' be updated.') - raise - - else: - # We need to import the delegated roles of 'metadata_role', since its - # list of delegations might have changed from what was previously - # loaded.. - # TODO: Should we remove the keys of the delegated roles? - self._import_delegations(metadata_role) - - - - - - def _versioninfo_has_been_updated(self, metadata_filename, new_versioninfo): - """ - - Non-public method that determines whether the current versioninfo of - 'metadata_filename' is less than 'new_versioninfo' (i.e., the version - number has been incremented). The 'new_versioninfo' argument should be - extracted from the latest copy of the metadata that references - 'metadata_filename'. Example: 'root.json' would be referenced by - 'snapshot.json'. - - 'new_versioninfo' should only be 'None' if this is for updating - 'root.json' without having 'snapshot.json' available. - - - metadadata_filename: - The metadata filename for the role. For the 'root' role, - 'metadata_filename' would be 'root.json'. - - new_versioninfo: - A dict object representing the new file information for - 'metadata_filename'. 'new_versioninfo' may be 'None' when - updating 'root' without having 'snapshot' available. This - dict conforms to 'tuf.formats.VERSIONINFO_SCHEMA' and has - the form: - - {'version': 288} - - - None. - - - If there is no versioninfo currently loaded for 'metadata_filename', try - to load it. - - - Boolean. True if the versioninfo has changed, False otherwise. - """ - - # If there is no versioninfo currently stored for 'metadata_filename', - # try to load the file, calculate the versioninfo, and store it. - if metadata_filename not in self.versioninfo: - self._update_versioninfo(metadata_filename) - - # Return true if there is no versioninfo for 'metadata_filename'. - # 'metadata_filename' is not in the 'self.versioninfo' store - # and it doesn't exist in the 'current' metadata location. - if self.versioninfo[metadata_filename] is None: - return True - - current_versioninfo = self.versioninfo[metadata_filename] - - logger.debug('New version for ' + repr(metadata_filename) + - ': ' + repr(new_versioninfo['version']) + '. Old version: ' + - repr(current_versioninfo['version'])) - - if new_versioninfo['version'] > current_versioninfo['version']: - return True - - else: - return False - - - - - - def _update_versioninfo(self, metadata_filename): - """ - - Non-public method that updates the 'self.versioninfo' entry for the - metadata belonging to 'metadata_filename'. If the current metadata for - 'metadata_filename' cannot be loaded, set its 'versioninfo' to 'None' to - signal that it is not in 'self.versioninfo' AND it also doesn't exist - locally. - - - metadata_filename: - The metadata filename for the role. For the 'root' role, - 'metadata_filename' would be 'root.json'. - - - None. - - - The version number of 'metadata_filename' is calculated and stored in its - corresponding entry in 'self.versioninfo'. - - - None. - """ - - # In case we delayed loading the metadata and didn't do it in - # __init__ (such as with delegated metadata), then get the version - # info now. - - # 'metadata_filename' is the key from meta dictionary: build the - # corresponding local filepath like _get_local_filename() - local_filename = parse.quote(metadata_filename, "") - current_filepath = os.path.join(self.metadata_directory['current'], - local_filename) - - # If the path is invalid, simply return and leave versioninfo unset. - if not os.path.exists(current_filepath): - self.versioninfo[metadata_filename] = None - return - - # Extract the version information from the trusted snapshot role and save - # it to the 'self.versioninfo' store. - if metadata_filename == 'timestamp.json': - trusted_versioninfo = \ - self.metadata['current']['timestamp']['version'] - - # When updating snapshot.json, the client either (1) has a copy of - # snapshot.json, or (2) is in the process of obtaining it by first - # downloading timestamp.json. Note: Clients are allowed to have only - # root.json initially, and perform a refresh of top-level metadata to - # obtain the remaining roles. - elif metadata_filename == 'snapshot.json': - - # Verify the version number of the currently trusted snapshot.json in - # snapshot.json itself. Checking the version number specified in - # timestamp.json may be greater than the version specified in the - # client's copy of snapshot.json. - try: - timestamp_version_number = self.metadata['current']['snapshot']['version'] - trusted_versioninfo = formats.make_versioninfo( - timestamp_version_number) - - except KeyError: - trusted_versioninfo = \ - self.metadata['current']['timestamp']['meta']['snapshot.json'] - - else: - - try: - # The metadata file names in 'self.metadata' exclude the role - # extension. Strip the '.json' extension when checking if - # 'metadata_filename' currently exists. - targets_version_number = \ - self.metadata['current'][metadata_filename[:-len('.json')]]['version'] - trusted_versioninfo = \ - formats.make_versioninfo(targets_version_number) - - except KeyError: - trusted_versioninfo = \ - self.metadata['current']['snapshot']['meta'][metadata_filename] - - self.versioninfo[metadata_filename] = trusted_versioninfo - - - - - def _move_current_to_previous(self, metadata_role): - """ - - Non-public method that moves the current metadata file for 'metadata_role' - to the previous directory. - - - metadata_role: - The name of the metadata. This is a role name and should not end - in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'. - - - None. - - - The metadata file for 'metadata_role' is removed from 'current' - and moved to the 'previous' directory. - - - None. - """ - - # Get the 'current' and 'previous' full file paths for 'metadata_role' - metadata_filepath = self._get_local_filename(metadata_role) - previous_filepath = os.path.join(self.metadata_directory['previous'], - metadata_filepath) - current_filepath = os.path.join(self.metadata_directory['current'], - metadata_filepath) - - # Remove the previous path if it exists. - if os.path.exists(previous_filepath): - os.remove(previous_filepath) - - # Move the current path to the previous path. - if os.path.exists(current_filepath): - sslib_util.ensure_parent_dir(previous_filepath) - os.rename(current_filepath, previous_filepath) - - - - - - def _delete_metadata(self, metadata_role): - """ - - Non-public method that removes all (current) knowledge of 'metadata_role'. - The metadata belonging to 'metadata_role' is removed from the current - 'self.metadata' store and from the role database. The 'root.json' role - file is never removed. - - - metadata_role: - The name of the metadata. This is a role name and should not end - in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'. - - - None. - - - The role database is modified and the metadata for 'metadata_role' - removed from the 'self.metadata' store. - - - None. - """ - - # The root metadata role is never deleted without a replacement. - if metadata_role == 'root': - return - - # Get rid of the current metadata file. - self._move_current_to_previous(metadata_role) - - # Remove knowledge of the role. - if metadata_role in self.metadata['current']: - del self.metadata['current'][metadata_role] - roledb.remove_role(metadata_role, self.repository_name) - - - - - - def _ensure_not_expired(self, metadata_object, metadata_rolename): - """ - - Non-public method that raises an exception if the current specified - metadata has expired. - - - metadata_object: - The metadata that should be expired, a 'tuf.formats.ANYROLE_SCHEMA' - object. - - metadata_rolename: - The name of the metadata. This is a role name and should not end - in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'. - - - tuf.exceptions.ExpiredMetadataError: - If 'metadata_rolename' has expired. - securesystemslib.exceptions.FormatError: - If the expiration cannot be parsed correctly - - None. - - - None. - """ - - # Extract the expiration time. Convert it to a unix timestamp and compare it - # against the current time.time() (also in Unix/POSIX time format, although - # with microseconds attached.) - expires_datetime = formats.expiry_string_to_datetime( - metadata_object['expires']) - expires_timestamp = formats.datetime_to_unix_timestamp(expires_datetime) - - current_time = int(time.time()) - if expires_timestamp <= current_time: - message = 'Metadata '+repr(metadata_rolename)+' expired on ' + \ - expires_datetime.ctime() + ' (UTC).' - raise exceptions.ExpiredMetadataError(message) - - - - - - def all_targets(self): - """ - - - NOTE: This function is deprecated. Its behavior with regard to which - delegating Targets roles are trusted to determine how to validate a - delegated Targets role is NOT WELL DEFINED. Please transition to use of - get_one_valid_targetinfo()! - - Get a list of the target information for all the trusted targets on the - repository. This list also includes all the targets of delegated roles. - Targets of the list returned are ordered according the trusted order of - the delegated roles, where parent roles come before children. The list - conforms to 'tuf.formats.TARGETINFOS_SCHEMA' and has the form: - - [{'filepath': 'a/b/c.txt', - 'fileinfo': {'length': 13323, - 'hashes': {'sha256': dbfac345..}} - ...] - - - None. - - - tuf.exceptions.RepositoryError: - If the metadata for the 'targets' role is missing from - the 'snapshot' metadata. - - tuf.exceptions.UnknownRoleError: - If one of the roles could not be found in the role database. - - - The metadata for target roles is updated and stored. - - - A list of targets, conformant to - 'tuf.formats.TARGETINFOS_SCHEMA'. - """ - - warnings.warn( - 'Support for all_targets() will be removed in a future release.' - ' get_one_valid_targetinfo() should be used instead.', - DeprecationWarning) - - # Load the most up-to-date targets of the 'targets' role and all - # delegated roles. - self._refresh_targets_metadata(refresh_all_delegated_roles=True) - - # Fetch the targets for the 'targets' role. - all_targets = self._targets_of_role('targets', skip_refresh=True) - - # Fetch the targets of the delegated roles. get_rolenames returns - # all roles available on the repository. - delegated_targets = [] - for role in roledb.get_rolenames(self.repository_name): - if role in roledb.TOP_LEVEL_ROLES: - continue - - else: - delegated_targets.extend(self._targets_of_role(role, skip_refresh=True)) - - all_targets.extend(delegated_targets) - - return all_targets - - - - - - def _refresh_targets_metadata(self, rolename='targets', - refresh_all_delegated_roles=False): - """ - - Non-public method that refreshes the targets metadata of 'rolename'. If - 'refresh_all_delegated_roles' is True, include all the delegations that - follow 'rolename'. The metadata for the 'targets' role is updated in - refresh() by the _update_metadata_if_changed('targets') call, not here. - Delegated roles are not loaded when the repository is first initialized. - They are loaded from disk, updated if they have changed, and stored to - the 'self.metadata' store by this method. This method is called by - get_one_valid_targetinfo(). - - - rolename: - This is a delegated role name and should not end in '.json'. Example: - 'unclaimed'. - - refresh_all_delegated_roles: - Boolean indicating if all the delegated roles available in the - repository (via snapshot.json) should be refreshed. - - - tuf.exceptions.ExpiredMetadataError: - If local metadata is expired and newer metadata is not available. - - tuf.exceptions.RepositoryError: - If the metadata file for the 'targets' role is missing from the - 'snapshot' metadata. - - - The metadata for the delegated roles are loaded and updated if they - have changed. Delegated metadata is removed from the role database if - it has expired. - - - None. - """ - - roles_to_update = [] - - if rolename + '.json' in self.metadata['current']['snapshot']['meta']: - roles_to_update.append(rolename) - - if refresh_all_delegated_roles: - - for role in self.metadata['current']['snapshot']['meta'].keys(): - # snapshot.json keeps track of root.json, targets.json, and delegated - # roles (e.g., django.json, unclaimed.json). Remove the 'targets' role - # because it gets updated when the targets.json file is updated in - # _update_metadata_if_changed('targets') and root. - if role.endswith('.json'): - role = role[:-len('.json')] - if role not in ['root', 'targets', rolename]: - roles_to_update.append(role) - - else: - continue - - # If there is nothing to refresh, we are done. - if not roles_to_update: - return - - logger.debug('Roles to update: ' + repr(roles_to_update) + '.') - - # Iterate 'roles_to_update', and load and update its metadata file if it - # has changed. - for rolename in roles_to_update: - self._load_metadata_from_file('previous', rolename) - self._load_metadata_from_file('current', rolename) - - self._update_metadata_if_changed(rolename) - - - - - - def _targets_of_role(self, rolename, targets=None, skip_refresh=False): - """ - - Non-public method that returns the target information of all the targets - of 'rolename'. The returned information is a list conformant to - 'tuf.formats.TARGETINFOS_SCHEMA', and has the form: - - [{'filepath': 'a/b/c.txt', - 'fileinfo': {'length': 13323, - 'hashes': {'sha256': dbfac345..}} - ...] - - - rolename: - This is a role name and should not end in '.json'. Examples: 'targets', - 'unclaimed'. - - targets: - A list of targets containing target information, conformant to - 'tuf.formats.TARGETINFOS_SCHEMA'. - - skip_refresh: - A boolean indicating if the target metadata for 'rolename' - should be refreshed. - - - tuf.exceptions.UnknownRoleError: - If 'rolename' is not found in the role database. - - - The metadata for 'rolename' is refreshed if 'skip_refresh' is False. - - - A list of dict objects containing the target information of all the - targets of 'rolename'. Conformant to - 'tuf.formats.TARGETINFOS_SCHEMA'. - """ - - if targets is None: - targets = [] - - targets_of_role = list(targets) - logger.debug('Getting targets of role: ' + repr(rolename) + '.') - - if not roledb.role_exists(rolename, self.repository_name): - raise exceptions.UnknownRoleError(rolename) - - # We do not need to worry about the target paths being trusted because - # this is enforced before any new metadata is accepted. - if not skip_refresh: - self._refresh_targets_metadata(rolename) - - # Do we have metadata for 'rolename'? - if rolename not in self.metadata['current']: - logger.debug('No metadata for ' + repr(rolename) + '.' - ' Unable to determine targets.') - return [] - - # Get the targets specified by the role itself. - for filepath, fileinfo in self.metadata['current'][rolename].get('targets', []).items(): - new_target = {} - new_target['filepath'] = filepath - new_target['fileinfo'] = fileinfo - - targets_of_role.append(new_target) - - return targets_of_role - - - - - - def targets_of_role(self, rolename='targets'): - """ - - - NOTE: This function is deprecated. Use with rolename 'targets' is secure - and the behavior well-defined, but use with any delegated targets role is - not. Please transition use for delegated targets roles to - get_one_valid_targetinfo(). More information is below. - - Return a list of trusted targets directly specified by 'rolename'. - The returned information is a list conformant to - 'tuf.formats.TARGETINFOS_SCHEMA', and has the form: - - [{'filepath': 'a/b/c.txt', - 'fileinfo': {'length': 13323, - 'hashes': {'sha256': dbfac345..}} - ...] - - The metadata of 'rolename' is updated if out of date, including the - metadata of its parent roles (i.e., the minimum roles needed to set the - chain of trust). - - - rolename: - The name of the role whose list of targets are wanted. - The name of the role should start with 'targets'. - - - securesystemslib.exceptions.FormatError: - If 'rolename' is improperly formatted. - - tuf.exceptions.RepositoryError: - If the metadata of 'rolename' cannot be updated. - - tuf.exceptions.UnknownRoleError: - If 'rolename' is not found in the role database. - - - The metadata of updated delegated roles are downloaded and stored. - - - A list of targets, conformant to - 'tuf.formats.TARGETINFOS_SCHEMA'. - """ - - warnings.warn( - 'Support for targets_of_role() will be removed in a future release.' - ' get_one_valid_targetinfo() should be used instead.', - DeprecationWarning) - - # Does 'rolename' have the correct format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.RELPATH_SCHEMA.check_match(rolename) - - # If we've been given a delegated targets role, we don't know how to - # validate it without knowing what the delegating role is -- there could - # be several roles that delegate to the given role. Behavior of this - # function for roles other than Targets is not well defined as a result. - # This function is deprecated, but: - # - Usage of this function or a future successor makes sense when the - # role of interest is Targets, since we always know exactly how to - # validate Targets (We use root.). - # - Until it's removed (hopefully soon), we'll try to provide what it has - # always provided. To do this, we fetch and "validate" all delegated - # roles listed by snapshot. For delegated roles only, the order of the - # validation impacts the security of the validation -- the most- - # recently-validated role delegating to a role you are currently - # validating determines the expected keyids and threshold of the role - # you are currently validating. That is NOT GOOD. Again, please switch - # to get_one_valid_targetinfo, which is well-defined and secure. - if rolename != 'targets': - self._refresh_targets_metadata(refresh_all_delegated_roles=True) - - - if not roledb.role_exists(rolename, self.repository_name): - raise exceptions.UnknownRoleError(rolename) - - return self._targets_of_role(rolename, skip_refresh=True) - - - - - - def get_one_valid_targetinfo(self, target_filepath): - """ - - Return the target information for 'target_filepath', and update its - corresponding metadata, if necessary. 'target_filepath' must match - exactly as it appears in metadata, and should not contain URL encoding - escapes. - - - target_filepath: - The path to the target file on the repository. This will be relative to - the 'targets' (or equivalent) directory on a given mirror. - - - tuf.exceptions.ExpiredMetadataError: - If local metadata is expired and newer metadata is not available. - - securesystemslib.exceptions.FormatError: - If 'target_filepath' is improperly formatted. - - tuf.exceptions.UnknownTargetError: - If 'target_filepath' was not found. - - Any other unforeseen runtime exception. - - - The metadata for updated delegated roles are downloaded and stored. - - - The target information for 'target_filepath', conformant to - 'tuf.formats.TARGETINFO_SCHEMA'. - """ - - # Does 'target_filepath' have the correct format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.RELPATH_SCHEMA.check_match(target_filepath) - - target_filepath = target_filepath.replace('\\', '/') - - if target_filepath.startswith('/'): - raise exceptions.FormatError('The requested target file cannot' - ' contain a leading path separator: ' + repr(target_filepath)) - - # Get target by looking at roles in order of priority tags. - target = self._preorder_depth_first_walk(target_filepath) - - # Raise an exception if the target information could not be retrieved. - if target is None: - raise exceptions.UnknownTargetError(repr(target_filepath) + ' not' - ' found.') - - # Otherwise, return the found target. - else: - return target - - - - - - def _preorder_depth_first_walk(self, target_filepath): - """ - - Non-public method that interrogates the tree of target delegations in - order of appearance (which implicitly order trustworthiness), and returns - the matching target found in the most trusted role. - - - target_filepath: - The path to the target file on the repository. This will be relative to - the 'targets' (or equivalent) directory on a given mirror. - - - tuf.exceptions.ExpiredMetadataError: - If local metadata is expired and newer metadata is not available. - - securesystemslib.exceptions.FormatError: - If 'target_filepath' is improperly formatted. - - tuf.exceptions.RepositoryError: - If 'target_filepath' is not found. - - - The metadata for updated delegated roles are downloaded and stored. - - - The target information for 'target_filepath', conformant to - 'tuf.formats.TARGETINFO_SCHEMA'. - """ - - target = None - current_metadata = self.metadata['current'] - role_names = ['targets'] - visited_role_names = set() - number_of_delegations = settings.MAX_NUMBER_OF_DELEGATIONS - - # Ensure the client has the most up-to-date version of 'targets.json'. - # Raise 'tuf.exceptions.NoWorkingMirrorError' if the changed metadata - # cannot be successfully downloaded and 'tuf.exceptions.RepositoryError' if - # the referenced metadata is missing. Target methods such as this one are - # called after the top-level metadata have been refreshed (i.e., - # updater.refresh()). - self._update_metadata_if_changed('targets') - - # Preorder depth-first traversal of the graph of target delegations. - while target is None and number_of_delegations > 0 and len(role_names) > 0: - - # Pop the role name from the top of the stack. - role_name = role_names.pop(-1) - - # Skip any visited current role to prevent cycles. - if role_name in visited_role_names: - logger.debug('Skipping visited current role ' + repr(role_name)) - continue - - # The metadata for 'role_name' must be downloaded/updated before its - # targets, delegations, and child roles can be inspected. - # self.metadata['current'][role_name] is currently missing. - # _refresh_targets_metadata() does not refresh 'targets.json', it - # expects _update_metadata_if_changed() to have already refreshed it, - # which this function has checked above. - self._refresh_targets_metadata(role_name, - refresh_all_delegated_roles=False) - - role_metadata = current_metadata[role_name] - targets = role_metadata['targets'] - delegations = role_metadata.get('delegations', {}) - child_roles = delegations.get('roles', []) - target = self._get_target_from_targets_role(role_name, targets, - target_filepath) - # After preorder check, add current role to set of visited roles. - visited_role_names.add(role_name) - - # And also decrement number of visited roles. - number_of_delegations -= 1 - - if target is None: - - child_roles_to_visit = [] - # NOTE: This may be a slow operation if there are many delegated roles. - for child_role in child_roles: - child_role_name = self._visit_child_role(child_role, target_filepath) - if child_role['terminating'] and child_role_name is not None: - logger.debug('Adding child role ' + repr(child_role_name)) - logger.debug('Not backtracking to other roles.') - role_names = [] - child_roles_to_visit.append(child_role_name) - break - - elif child_role_name is None: - logger.debug('Skipping child role ' + repr(child_role_name)) - - else: - logger.debug('Adding child role ' + repr(child_role_name)) - child_roles_to_visit.append(child_role_name) - - # Push 'child_roles_to_visit' in reverse order of appearance onto - # 'role_names'. Roles are popped from the end of the 'role_names' - # list. - child_roles_to_visit.reverse() - role_names.extend(child_roles_to_visit) - - else: - logger.debug('Found target in current role ' + repr(role_name)) - - if target is None and number_of_delegations == 0 and len(role_names) > 0: - logger.debug(repr(len(role_names)) + ' roles left to visit, ' + - 'but allowed to visit at most ' + - repr(settings.MAX_NUMBER_OF_DELEGATIONS) + ' delegations.') - - return target - - - - - - def _get_target_from_targets_role(self, role_name, targets, target_filepath): - """ - - Non-public method that determines whether the targets role with the given - 'role_name' has the target with the name 'target_filepath'. - - - role_name: - The name of the targets role that we are inspecting. - - targets: - The targets of the Targets role with the name 'role_name'. - - target_filepath: - The path to the target file on the repository. This will be relative to - the 'targets' (or equivalent) directory on a given mirror. - - - None. - - - None. - - - The target information for 'target_filepath', conformant to - 'tuf.formats.TARGETINFO_SCHEMA'. - """ - - # Does the current role name have our target? - logger.debug('Asking role ' + repr(role_name) + ' about' - ' target ' + repr(target_filepath)) - - target = targets.get(target_filepath) - - if target: - logger.debug('Found target ' + target_filepath + ' in role ' + role_name) - return {'filepath': target_filepath, 'fileinfo': target} - - else: - logger.debug( - 'Target file ' + target_filepath + ' not found in role ' + role_name) - return None - - - - - - def _visit_child_role(self, child_role, target_filepath): - """ - - Non-public method that determines whether the given 'target_filepath' - is an allowed path of 'child_role'. - - Ensure that we explore only delegated roles trusted with the target. The - metadata for 'child_role' should have been refreshed prior to this point, - however, the paths/targets that 'child_role' signs for have not been - verified (as intended). The paths/targets that 'child_role' is allowed - to specify in its metadata depends on the delegating role, and thus is - left to the caller to verify. We verify here that 'target_filepath' - is an allowed path according to the delegated 'child_role'. - - TODO: Should the TUF spec restrict the repository to one particular - algorithm? Should we allow the repository to specify in the role - dictionary the algorithm used for these generated hashed paths? - - - child_role: - The delegation targets role object of 'child_role', containing its - paths, path_hash_prefixes, keys, and so on. - - target_filepath: - The path to the target file on the repository. This will be relative to - the 'targets' (or equivalent) directory on a given mirror. - - - None. - - - None. - - - If 'child_role' has been delegated the target with the name - 'target_filepath', then we return the role name of 'child_role'. - - Otherwise, we return None. - """ - - child_role_name = child_role['name'] - child_role_paths = child_role.get('paths') - child_role_path_hash_prefixes = child_role.get('path_hash_prefixes') - - if child_role_path_hash_prefixes is not None: - target_filepath_hash = self._get_target_hash(target_filepath) - for child_role_path_hash_prefix in child_role_path_hash_prefixes: - if target_filepath_hash.startswith(child_role_path_hash_prefix): - return child_role_name - - else: - continue - - elif child_role_paths is not None: - # Is 'child_role_name' allowed to sign for 'target_filepath'? - for child_role_path in child_role_paths: - # A child role path may be an explicit path or glob pattern (Unix - # shell-style wildcards). The child role 'child_role_name' is returned - # if 'target_filepath' is equal to or matches 'child_role_path'. - # Explicit filepaths are also considered matches. A repo maintainer - # might delegate a glob pattern with a leading path separator, while - # the client requests a matching target without a leading path - # separator - make sure to strip any leading path separators so that a - # match is made. Example: "foo.tgz" should match with "/*.tgz". - if fnmatch.fnmatch(target_filepath.lstrip(os.sep), child_role_path.lstrip(os.sep)): - logger.debug('Child role ' + repr(child_role_name) + ' is allowed to' - ' sign for ' + repr(target_filepath)) - - return child_role_name - - else: - logger.debug( - 'The given target path ' + repr(target_filepath) + ' does not' - ' match the trusted path or glob pattern: ' + repr(child_role_path)) - continue - - else: - # 'role_name' should have been validated when it was downloaded. - # The 'paths' or 'path_hash_prefixes' fields should not be missing, - # so we raise a format error here in case they are both missing. - raise sslib_exceptions.FormatError(repr(child_role_name) + ' ' - 'has neither a "paths" nor "path_hash_prefixes". At least' - ' one of these attributes must be present.') - - return None - - - - def _get_target_hash(self, target_filepath, hash_function='sha256'): - """ - - Non-public method that computes the hash of 'target_filepath'. This is - useful in conjunction with the "path_hash_prefixes" attribute in a - delegated targets role, which tells us which paths it is implicitly - responsible for. - - - target_filepath: - The path to the target file on the repository. This will be relative to - the 'targets' (or equivalent) directory on a given mirror. - - hash_function: - The algorithm used by the repository to generate the hashes of the - target filepaths. The repository may optionally organize targets into - hashed bins to ease target delegations and role metadata management. - The use of consistent hashing allows for a uniform distribution of - targets into bins. - - - None. - - - None. - - - The hash of 'target_filepath'. - """ - - # Calculate the hash of the filepath to determine which bin to find the - # target. The client currently assumes the repository (i.e., repository - # tool) uses 'hash_function' to generate hashes and UTF-8. - digest_object = sslib_hash.digest(hash_function) - encoded_target_filepath = target_filepath.encode('utf-8') - digest_object.update(encoded_target_filepath) - target_filepath_hash = digest_object.hexdigest() - - return target_filepath_hash - - - - - - def remove_obsolete_targets(self, destination_directory): - """ - - Remove any files that are in 'previous' but not 'current'. This makes it - so if you remove a file from a repository, it actually goes away. The - targets for the 'targets' role and all delegated roles are checked. - - - destination_directory: - The directory containing the target files tracked by TUF. - - - securesystemslib.exceptions.FormatError: - If 'destination_directory' is improperly formatted. - - tuf.exceptions.RepositoryError: - If an error occurred removing any files. - - - Target files are removed from disk. - - - None. - """ - - # Does 'destination_directory' have the correct format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(destination_directory) - - # Iterate the rolenames and verify whether the 'previous' directory - # contains a target no longer found in 'current'. - for role in roledb.get_rolenames(self.repository_name): - if role.startswith('targets'): - if role in self.metadata['previous'] and self.metadata['previous'][role] != None: - for target in self.metadata['previous'][role]['targets']: - if target not in self.metadata['current'][role]['targets']: - # 'target' is only in 'previous', so remove it. - logger.warning('Removing obsolete file: ' + repr(target) + '.') - - # Remove the file if it hasn't been removed already. - destination = \ - os.path.join(destination_directory, target.lstrip(os.sep)) - try: - os.remove(destination) - - except OSError as e: - # If 'filename' already removed, just log it. - if e.errno == errno.ENOENT: - logger.info('File ' + repr(destination) + ' was already' - ' removed.') - - else: - logger.warning('Failed to remove obsolete target: ' + str(e) ) - - else: - logger.debug('Skipping: ' + repr(target) + '. It is still' - ' a current target.') - else: - logger.debug('Skipping: ' + repr(role) + '. Not in the previous' - ' metadata') - - - - - - def updated_targets(self, targets, destination_directory): - """ - - Checks files in the provided directory against the provided file metadata. - - Filters the provided target info, returning a subset: only the metadata - for targets for which the target file either does not exist in the - provided directory, or for which the target file in the provided directory - does not match the provided metadata. - - A principle use of this function is to determine which target files need - to be downloaded. If the caller first uses get_one_valid_target_info() - calls to obtain up-to-date, valid metadata for targets, the caller can - then call updated_targets() to determine if that metadata does not match - what exists already on disk (in the provided directory). The returned - values can then be used in download_file() calls to update the files that - didn't exist or didn't match. - - The returned information is a list conformant to - 'tuf.formats.TARGETINFOS_SCHEMA' and has the form: - - [{'filepath': 'a/b/c.txt', - 'fileinfo': {'length': 13323, - 'hashes': {'sha256': dbfac345..}} - ...] - - - targets: - Metadata about the expected state of target files, against which local - files will be checked. This should be a list of target info - dictionaries; i.e. 'targets' must be conformant to - tuf.formats.TARGETINFOS_SCHEMA. - - destination_directory: - The directory containing the target files. - - - securesystemslib.exceptions.FormatError: - If the arguments are improperly formatted. - - - The files in 'targets' are read and their hashes computed. - - - A list of target info dictionaries. The list conforms to - 'tuf.formats.TARGETINFOS_SCHEMA'. - This is a strict subset of the argument 'targets'. - """ - - # Do the arguments have the correct format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.TARGETINFOS_SCHEMA.check_match(targets) - sslib_formats.PATH_SCHEMA.check_match(destination_directory) - - # Keep track of the target objects and filepaths of updated targets. - # Return 'updated_targets' and use 'updated_targetpaths' to avoid - # duplicates. - updated_targets = [] - updated_targetpaths = [] - - for target in targets: - # Prepend 'destination_directory' to the target's relative filepath (as - # stored in metadata.) Verify the hash of 'target_filepath' against - # each hash listed for its fileinfo. Note: join() discards - # 'destination_directory' if 'filepath' contains a leading path separator - # (i.e., is treated as an absolute path). - filepath = target['filepath'] - if filepath[0] == '/': - filepath = filepath[1:] - target_filepath = os.path.join(destination_directory, filepath) - - if target_filepath in updated_targetpaths: - continue - - # Try one of the algorithm/digest combos for a mismatch. We break - # as soon as we find a mismatch. - for algorithm, digest in target['fileinfo']['hashes'].items(): - digest_object = None - try: - digest_object = sslib_hash.digest_filename(target_filepath, - algorithm=algorithm) - - # This exception would occur if the target does not exist locally. - except sslib_exceptions.StorageError: - updated_targets.append(target) - updated_targetpaths.append(target_filepath) - break - - # The file does exist locally, check if its hash differs. - if digest_object.hexdigest() != digest: - updated_targets.append(target) - updated_targetpaths.append(target_filepath) - break - - return updated_targets - - - - - - def download_target(self, target, destination_directory, - prefix_filename_with_hash=True): - """ - - Download 'target' and verify it is trusted. - - This will only store the file at 'destination_directory' if the - downloaded file matches the description of the file in the trusted - metadata. - - - target: - The target to be downloaded. Conformant to - 'tuf.formats.TARGETINFO_SCHEMA'. - - destination_directory: - The directory to save the downloaded target file. - - prefix_filename_with_hash: - Whether to prefix the targets file names with their hash when using - consistent snapshot. - This should be set to False when the served target filenames are not - prefixed with hashes (in this case the server uses other means - to ensure snapshot consistency). - Default is True. - - - securesystemslib.exceptions.FormatError: - If 'target' is not properly formatted. - - tuf.exceptions.NoWorkingMirrorError: - If a target could not be downloaded from any of the mirrors. - - Although expected to be rare, there might be OSError exceptions (except - errno.EEXIST) raised when creating the destination directory (if it - doesn't exist). - - - A target file is saved to the local system. - - - None. - """ - - # Do the arguments have the correct format? - # This check ensures the arguments have the appropriate - # number of objects and object types, and that all dict - # keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fail. - formats.TARGETINFO_SCHEMA.check_match(target) - sslib_formats.PATH_SCHEMA.check_match(destination_directory) - - # Extract the target file information. - target_filepath = target['filepath'] - trusted_length = target['fileinfo']['length'] - trusted_hashes = target['fileinfo']['hashes'] - - # Build absolute 'destination' file path. - # Note: join() discards 'destination_directory' if 'target_path' contains - # a leading path separator (i.e., is treated as an absolute path). - destination = os.path.join(destination_directory, - target_filepath.lstrip(os.sep)) - destination = os.path.abspath(destination) - target_dirpath = os.path.dirname(destination) - - # When attempting to create the leaf directory of 'target_dirpath', ignore - # any exceptions raised if the root directory already exists. All other - # exceptions potentially thrown by os.makedirs() are re-raised. - # Note: os.makedirs can raise OSError if the leaf directory already exists - # or cannot be created. - try: - os.makedirs(target_dirpath) - - except OSError as e: - if e.errno == errno.EEXIST: - pass - - else: - raise - - # '_get_target_file()' checks every mirror and returns the first target - # that passes verification. - target_file_object = self._get_target_file(target_filepath, trusted_length, - trusted_hashes, prefix_filename_with_hash) - - sslib_util.persist_temp_file(target_file_object, destination) diff --git a/tuf/developer_tool.py b/tuf/developer_tool.py deleted file mode 100755 index 82d936c072..0000000000 --- a/tuf/developer_tool.py +++ /dev/null @@ -1,1023 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - developer_tool.py - - - Santiago Torres - Zane Fisher - - Based on the work done for 'repository_tool.py' by Vladimir Diaz. - - - January 22, 2014. - - - See LICENCE-MIT OR LICENCE for licensing information. - - - See 'tuf/README-developer-tools.md' for a complete guide on using - 'developer_tool.py'. -""" - -import os -import errno -import logging -import shutil -import tempfile -import json - - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import storage as sslib_storage -from securesystemslib import util as sslib_util - -from tuf import exceptions -from tuf import formats -from tuf import keydb -from tuf import log # pylint: disable=unused-import -from tuf import repository_lib as repo_lib -from tuf import roledb -from tuf import sig - -from tuf.repository_tool import Targets -from tuf.repository_lib import _check_role_keys -from tuf.repository_lib import _metadata_is_partially_loaded - - -# Copy API -# pylint: disable=unused-import - -# Copy generic repository API functions to be used via `developer_tool` -from tuf.repository_lib import ( - generate_targets_metadata, - create_tuf_client_directory, - disable_console_log_messages) - -# Copy key-related API functions to be used via `developer_tool` -from tuf.repository_lib import ( - import_rsa_privatekey_from_file) - -from securesystemslib.keys import ( - format_keyval_to_metadata, - format_metadata_to_key) - -from securesystemslib.interface import ( - generate_and_write_rsa_keypair, - generate_and_write_rsa_keypair_with_prompt, - generate_and_write_unencrypted_rsa_keypair, - generate_and_write_ecdsa_keypair, - generate_and_write_ecdsa_keypair_with_prompt, - generate_and_write_unencrypted_ecdsa_keypair, - generate_and_write_ed25519_keypair, - generate_and_write_ed25519_keypair_with_prompt, - generate_and_write_unencrypted_ed25519_keypair, - import_rsa_publickey_from_file, - import_ed25519_publickey_from_file, - import_ed25519_privatekey_from_file) - - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - -# The extension of TUF metadata. -from tuf.repository_lib import METADATA_EXTENSION as METADATA_EXTENSION - -# Project configuration filename. This file is intended to hold all of the -# supporting information about the project that's not contained in a usual -# TUF metadata file. 'project.cfg' consists of the following fields: -# -# targets_location: the location of the targets folder. -# -# prefix: the directory location to prepend to the metadata so it -# matches the metadata signed in the repository. -# -# metadata_location: the location of the metadata files. -# -# threshold: the threshold for this project object, it is fixed to -# one in the current version. -# -# public_keys: a list of the public keys used to verify the metadata -# in this project. -# -# layout_type: a field describing the directory layout: -# -# repo-like: matches the layout of the repository tool. -# the targets and metadata folders are -# located under a common directory for the -# project. -# -# flat: the targets directory and the -# metadata directory are located in different -# paths. -# -# project_name: The name of the current project, this value is used to -# match the resulting filename with the one in upstream. -PROJECT_FILENAME = 'project.cfg' - -# The targets and metadata directory names. Metadata files are written -# to the staged metadata directory instead of the "live" one. -from tuf.repository_tool import METADATA_DIRECTORY_NAME -from tuf.repository_tool import TARGETS_DIRECTORY_NAME - - -class Project(Targets): - """ - - Simplify the publishing process of third-party projects by handling all of - the bookkeeping, signature handling, and integrity checks of delegated TUF - metadata. 'repository_tool.py' is responsible for publishing and - maintaining metadata of the top-level roles, and 'developer_tool.py' is - used by projects that have been delegated responsibility for a delegated - projects role. Metadata created by this module may then be added to other - metadata available in a TUF repository. - - Project() is the representation of a project's metadata file(s), with the - ability to modify this data in an OOP manner. Project owners do not have to - manually verify that metadata files are properly formatted or that they - contain valid data. - - - project_name: - The name of the metadata file as it should be named in the upstream - repository. - - metadata_directory: - The metadata sub-directory contains the metadata file(s) of this project, - including any of its delegated roles. - - targets_directory: - The targets sub-directory contains the project's target files that are - downloaded by clients and are referenced in its metadata. The hashes and - file lengths are listed in Metadata files so that they are securely - downloaded. Metadata files are similarly referenced in the top-level - metadata. - - file_prefix: - The path string that will be prepended to the generated metadata - (e.g., targets/foo -> targets/prefix/foo) so that it matches the actual - targets location in the upstream repository. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - - Creates a project Targets role object, with the same object attributes of - the top-level targets role. - - - None. - """ - - def __init__(self, project_name, metadata_directory, targets_directory, - file_prefix, repository_name='default'): - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly - # formatted. - sslib_formats.NAME_SCHEMA.check_match(project_name) - sslib_formats.PATH_SCHEMA.check_match(metadata_directory) - sslib_formats.PATH_SCHEMA.check_match(targets_directory) - sslib_formats.ANY_STRING_SCHEMA.check_match(file_prefix) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - self.metadata_directory = metadata_directory - self.targets_directory = targets_directory - self.project_name = project_name - self.prefix = file_prefix - self.repository_name = repository_name - - # Layout type defaults to "flat" unless explicitly specified in - # create_new_project(). - self.layout_type = 'flat' - - # Set the top-level Targets object. Set the rolename to be the project's - # name. - super(Project, self).__init__(self.targets_directory, project_name) - - - - - - def write(self, write_partial=False): - """ - - Write all the JSON Metadata objects to their corresponding files. - write() raises an exception if any of the role metadata to be written to - disk is invalid, such as an insufficient threshold of signatures, missing - private keys, etc. - - - write_partial: - A boolean indicating whether partial metadata should be written to - disk. Partial metadata may be written to allow multiple maintainters - to independently sign and update role metadata. write() raises an - exception if a metadata role cannot be written due to not having enough - signatures. - - - securesystemslib.exceptions.Error, if any of the project roles do not - have a minimum threshold of signatures. - - - Creates metadata files in the project's metadata directory. - - - None. - """ - - # Does 'write_partial' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.BOOLEAN_SCHEMA.check_match(write_partial) - - # At this point the keydb and roledb stores must be fully - # populated, otherwise write() throwns a 'tuf.Repository' exception if - # any of the project roles are missing signatures, keys, etc. - - # Write the metadata files of all the delegated roles of the project. - delegated_rolenames = roledb.get_delegated_rolenames(self.project_name, - self.repository_name) - - for delegated_rolename in delegated_rolenames: - delegated_filename = os.path.join(self.metadata_directory, - delegated_rolename + METADATA_EXTENSION) - - # Ensure the parent directories of 'metadata_filepath' exist, otherwise an - # IO exception is raised if 'metadata_filepath' is written to a - # sub-directory. - sslib_util.ensure_parent_dir(delegated_filename) - - _generate_and_write_metadata(delegated_rolename, delegated_filename, - write_partial, self.targets_directory, prefix=self.prefix, - repository_name=self.repository_name) - - - # Generate the 'project_name' metadata file. - targets_filename = self.project_name + METADATA_EXTENSION - targets_filename = os.path.join(self.metadata_directory, targets_filename) - junk, targets_filename = _generate_and_write_metadata(self.project_name, - targets_filename, write_partial, self.targets_directory, - prefix=self.prefix, repository_name=self.repository_name) - - # Save configuration information that is not stored in the project's - # metadata - _save_project_configuration(self.metadata_directory, - self.targets_directory, self.keys, self.prefix, self.threshold, - self.layout_type, self.project_name) - - - - - - def add_verification_key(self, key, expires=None): - """ - - Function as a thin wrapper call for the project._targets call - with the same name. This wrapper is only for usability purposes. - - - key: - The role key to be added, conformant to - 'securesystemslib.formats.ANYKEY_SCHEMA'. Adding a public key to a - role means that its corresponding private key must generate and add - its signture to the role. - - - securesystemslib.exceptions.FormatError, if the 'key' argument is - improperly formatted. - - securesystemslib.exceptions.Error, if the project already contains a key. - - - The role's entries in 'keydb' and 'roledb' are updated. - - - None - """ - - # Verify that this role does not already contain a key. The parent project - # role is restricted to one key. Any of its delegated roles may have - # more than one key. - # TODO: Add condition check for the requirement stated above. - if len(self.keys) > 0: - raise sslib_exceptions.Error("This project already contains a key.") - - super(Project, self).add_verification_key(key, expires) - - - - - - def status(self): - """ - - Determine the status of the project, including its delegated roles. - status() checks if each role provides sufficient public keys, signatures, - and that a valid metadata file is generated if write() were to be called. - Metadata files are temporarily written to check that proper metadata files - is written, where file hashes and lengths are calculated and referenced - by the project. status() does not do a simple check for number of - threshold keys and signatures. - - - None. - - - securesystemslib.exceptions.Error, if the project, or any of its - delegated roles, do not have a minimum threshold of signatures. - - - Generates and writes temporary metadata files. - - - None. - """ - - temp_project_directory = None - - try: - temp_project_directory = tempfile.mkdtemp() - - metadata_directory = os.path.join(temp_project_directory, 'metadata') - targets_directory = self.targets_directory - - os.makedirs(metadata_directory) - - # TODO: We should do the schema check. - filenames = {} - filenames['targets'] = os.path.join(metadata_directory, self.project_name) - - # Delegated roles. - delegated_roles = roledb.get_delegated_rolenames(self.project_name, - self.repository_name) - insufficient_keys = [] - insufficient_signatures = [] - - for delegated_role in delegated_roles: - try: - _check_role_keys(delegated_role, self.repository_name) - - except exceptions.InsufficientKeysError: - insufficient_keys.append(delegated_role) - continue - - try: - signable = _generate_and_write_metadata(delegated_role, - filenames['targets'], False, targets_directory, False, - repository_name=self.repository_name) - self._log_status(delegated_role, signable[0], self.repository_name) - - except sslib_exceptions.Error: - insufficient_signatures.append(delegated_role) - - if len(insufficient_keys): - message = 'Delegated roles with insufficient keys: ' +\ - repr(insufficient_keys) - logger.info(message) - return - - if len(insufficient_signatures): - message = 'Delegated roles with insufficient signatures: ' +\ - repr(insufficient_signatures) - logger.info(message) - return - - # Targets role. - try: - _check_role_keys(self.rolename, self.repository_name) - - except exceptions.InsufficientKeysError as e: - logger.info(str(e)) - return - - try: - signable, junk = _generate_and_write_metadata(self.project_name, - filenames['targets'], False, targets_directory, metadata_directory, - self.repository_name) - self._log_status(self.project_name, signable, self.repository_name) - - except exceptions.UnsignedMetadataError as e: - # This error is raised if the metadata has insufficient signatures to - # meet the threshold. - self._log_status(self.project_name, e.signable, self.repository_name) - return - - finally: - shutil.rmtree(temp_project_directory, ignore_errors=True) - - - - - - def _log_status(self, rolename, signable, repository_name): - """ - Non-public function prints the number of (good/threshold) signatures of - 'rolename'. - """ - - status = sig.get_signature_status(signable, rolename, repository_name) - - message = repr(rolename) + ' role contains ' +\ - repr(len(status['good_sigs'])) + ' / ' + repr(status['threshold']) +\ - ' signatures.' - logger.info(message) - - - - - -def _generate_and_write_metadata(rolename, metadata_filename, write_partial, - targets_directory, prefix='', repository_name='default'): - """ - Non-public function that can generate and write the metadata of the - specified 'rolename'. It also increments version numbers if: - - 1. write_partial==True and the metadata is the first to be written. - - 2. write_partial=False (i.e., write()), the metadata was not loaded as - partially written, and a write_partial is not needed. - """ - - metadata = None - - # Retrieve the roleinfo of 'rolename' to extract the needed metadata - # attributes, such as version number, expiration, etc. - roleinfo = roledb.get_roleinfo(rolename, repository_name) - - metadata = generate_targets_metadata(targets_directory, roleinfo['paths'], - roleinfo['version'], roleinfo['expires'], roleinfo['delegations'], - False) - - # Prepend the prefix to the project's filepath to avoid signature errors in - # upstream. - for element in list(metadata['targets']): - junk, relative_target = os.path.split(element) - prefixed_path = os.path.join(prefix, relative_target) - metadata['targets'][prefixed_path] = metadata['targets'][element] - if prefix != '': - del(metadata['targets'][element]) - - signable = repo_lib.sign_metadata(metadata, roleinfo['signing_keyids'], - metadata_filename, repository_name) - - # Check if the version number of 'rolename' may be automatically incremented, - # depending on whether if partial metadata is loaded or if the metadata is - # written with write() / write_partial(). - # Increment the version number if this is the first partial write. - if write_partial: - temp_signable = repo_lib.sign_metadata(metadata, [], metadata_filename, - repository_name) - temp_signable['signatures'].extend(roleinfo['signatures']) - status = sig.get_signature_status(temp_signable, rolename, - repository_name) - if len(status['good_sigs']) == 0: - metadata['version'] = metadata['version'] + 1 - signable = repo_lib.sign_metadata(metadata, roleinfo['signing_keyids'], - metadata_filename, repository_name) - - # non-partial write() - else: - if sig.verify(signable, rolename, repository_name): - metadata['version'] = metadata['version'] + 1 - signable = repo_lib.sign_metadata(metadata, roleinfo['signing_keyids'], - metadata_filename, repository_name) - - # Write the metadata to file if contains a threshold of signatures. - signable['signatures'].extend(roleinfo['signatures']) - - if sig.verify(signable, rolename, repository_name) or write_partial: - repo_lib._remove_invalid_and_duplicate_signatures(signable, repository_name) - storage_backend = sslib_storage.FilesystemBackend() - filename = repo_lib.write_metadata_file(signable, metadata_filename, - metadata['version'], False, storage_backend) - - # 'signable' contains an invalid threshold of signatures. - else: - message = 'Not enough signatures for ' + repr(metadata_filename) - raise sslib_exceptions.Error(message, signable) - - return signable, filename - - - - -def create_new_project(project_name, metadata_directory, - location_in_repository = '', targets_directory=None, key=None, - repository_name='default'): - """ - - Create a new project object, instantiate barebones metadata for the - targets, and return a blank project object. On disk, create_new_project() - only creates the directories needed to hold the metadata and targets files. - The project object returned can be directly modified to meet the designer's - criteria and then written using the method project.write(). - - The project name provided is the one that will be added to the resulting - metadata file as it should be named in upstream. - - - project_name: - The name of the project as it should be called in upstream. For example, - targets/unclaimed/django should have its project_name set to "django" - - metadata_directory: - The directory that will eventually hold the metadata and target files of - the project. - - location_in_repository: - An optional argument to hold the "prefix" or the expected location for - the project files in the "upstream" repository. This value is only - used to sign metadata in a way that it matches the future location - of the files. - - For example, targets/unclaimed/django should have its project name set to - "targets/unclaimed" - - targets_directory: - An optional argument to point the targets directory somewhere else than - the metadata directory if, for example, a project structure already - exists and the user does not want to move it. - - key: - The public key to verify the project's metadata. Projects can only - handle one key with a threshold of one. If a project were to modify it's - key it should be removed and updated. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted or if the public key is not a valid one (if it's not none.) - - OSError, if the filepaths provided do not have write permissions. - - - The 'metadata_directory' and 'targets_directory' directories are created - if they do not exist. - - - A 'tuf.developer_tool.Project' object. - """ - - # Does 'metadata_directory' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(metadata_directory) - - # Do the same for the location in the repo and the project name, we must - # ensure they are valid pathnames. - sslib_formats.NAME_SCHEMA.check_match(project_name) - sslib_formats.ANY_STRING_SCHEMA.check_match(location_in_repository) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # for the targets directory we do the same, but first, let's find out what - # layout the user needs, layout_type is a variable that is usually set to - # 1, which means "flat" (i.e. the cfg file is where the metadata folder is - # located), with a two, the cfg file goes to the "metadata" folder, and a - # new metadata folder is created inside the tree, to separate targets and - # metadata. - layout_type = 'flat' - if targets_directory is None: - targets_directory = os.path.join(metadata_directory, TARGETS_DIRECTORY_NAME) - metadata_directory = \ - os.path.join(metadata_directory, METADATA_DIRECTORY_NAME) - layout_type = 'repo-like' - - if targets_directory is not None: - sslib_formats.PATH_SCHEMA.check_match(targets_directory) - - if key is not None: - sslib_formats.KEY_SCHEMA.check_match(key) - - # Set the metadata and targets directories. These directories - # are created if they do not exist. - metadata_directory = os.path.abspath(metadata_directory) - targets_directory = os.path.abspath(targets_directory) - - # Try to create the metadata directory that will hold all of the metadata - # files, such as 'root.txt' and 'release.txt'. - try: - message = 'Creating ' + repr(metadata_directory) - logger.info(message) - os.makedirs(metadata_directory) - - # 'OSError' raised if the leaf directory already exists or cannot be created. - # Check for case where 'repository_directory' has already been created. - except OSError as e: - if e.errno == errno.EEXIST: - # Should check if we have write permissions here. - pass - - # Testing of non-errno.EEXIST exceptions have been verified on all - # supported # OSs. An unexpected exception (the '/' directory exists, - # rather than disallowed path) is possible on Travis, so the '#pragma: no - # branch' below is included to prevent coverage failure. - else: #pragma: no branch - raise - - # Try to create the targets directory that will hold all of the target files. - try: - message = 'Creating ' + repr(targets_directory) - logger.info(message) - os.mkdir(targets_directory) - - except OSError as e: - if e.errno == errno.EEXIST: - pass - - else: - raise - - # Create the bare bones project object, where project role contains default - # values (e.g., threshold of 1, expires 1 year into the future, etc.) - project = Project(project_name, metadata_directory, targets_directory, - location_in_repository, repository_name) - - # Add 'key' to the project. - # TODO: Add check for expected number of keys for the project (must be 1) and - # its delegated roles (may be greater than one.) - if key is not None: - project.add_verification_key(key) - - # Save the layout information. - project.layout_type = layout_type - - return project - - - - - - -def _save_project_configuration(metadata_directory, targets_directory, - public_keys, prefix, threshold, layout_type, project_name): - """ - - Persist the project's information to a file. The saved project information - can later be loaded with Project.load_project(). - - - metadata_directory: - Where the project's metadata is located. - - targets_directory: - The location of the target files for this project. - - public_keys: - A list containing the public keys for the project role. - - prefix: - The project's prefix (if any.) - - threshold: - The threshold value for the project role. - - layout_type: - The layout type being used by the project, "flat" stands for separated - targets and metadata directories, "repo-like" emulates the layout used - by the repository tools - - project_name: - The name given to the project, this sets the metadata filename so it - matches the one stored in upstream. - - - securesystemslib.exceptions.FormatError are also expected if any of the arguments are malformed. - - OSError may rise if the metadata_directory/project.cfg file exists and - is non-writeable - - - A 'project.cfg' configuration file is created or overwritten. - - - None. - """ - - # Schema check for the arguments. - sslib_formats.PATH_SCHEMA.check_match(metadata_directory) - sslib_formats.PATH_SCHEMA.check_match(prefix) - sslib_formats.PATH_SCHEMA.check_match(targets_directory) - formats.RELPATH_SCHEMA.check_match(project_name) - - cfg_file_directory = metadata_directory - - # Check whether the layout type is 'flat' or 'repo-like'. - # If it is, the .cfg file should be saved in the previous directory. - if layout_type == 'repo-like': - cfg_file_directory = os.path.dirname(metadata_directory) - junk, targets_directory = os.path.split(targets_directory) - - junk, metadata_directory = os.path.split(metadata_directory) - - # Can the file be opened? - project_filename = os.path.join(cfg_file_directory, PROJECT_FILENAME) - - # Build the fields of the configuration file. - project_config = {} - project_config['prefix'] = prefix - project_config['public_keys'] = {} - project_config['metadata_location'] = metadata_directory - project_config['targets_location'] = targets_directory - project_config['threshold'] = threshold - project_config['layout_type'] = layout_type - project_config['project_name'] = project_name - - # Build a dictionary containing the actual keys. - for key in public_keys: - key_info = keydb.get_key(key) - key_metadata = format_keyval_to_metadata(key_info['keytype'], - key_info['scheme'], key_info['keyval']) - project_config['public_keys'][key] = key_metadata - - # Save the actual file. - with open(project_filename, 'wt', encoding='utf8') as fp: - json.dump(project_config, fp) - - - - - -def load_project(project_directory, prefix='', new_targets_location=None, - repository_name='default'): - """ - - Return a Project object initialized with the contents of the metadata - files loaded from 'project_directory'. - - - project_directory: - The path to the project's metadata and configuration file. - - prefix: - The prefix for the metadata, if defined. It will replace the current - prefix, by first removing the existing one (saved). - - new_targets_location: - For flat project configurations, project owner might want to reload the - project with a new location for the target files. This overwrites the - previous path to search for the target files. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - securesystemslib.exceptions.FormatError, if 'project_directory' or any of - the metadata files are improperly formatted. - - - All the metadata files found in the project are loaded and their contents - stored in a libtuf.Repository object. - - - A tuf.developer_tool.Project object. - """ - - # Does 'repository_directory' have the correct format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(project_directory) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Do the same for the prefix - sslib_formats.ANY_STRING_SCHEMA.check_match(prefix) - - # Clear the role and key databases since we are loading in a new project. - roledb.clear_roledb(clear_all=True) - keydb.clear_keydb(clear_all=True) - - # Locate metadata filepaths and targets filepath. - project_directory = os.path.abspath(project_directory) - - # Load the cfg file and the project. - config_filename = os.path.join(project_directory, PROJECT_FILENAME) - - project_configuration = sslib_util.load_json_file(config_filename) - formats.PROJECT_CFG_SCHEMA.check_match(project_configuration) - - targets_directory = os.path.join(project_directory, - project_configuration['targets_location']) - - if project_configuration['layout_type'] == 'flat': - project_directory, junk = os.path.split(project_directory) - targets_directory = project_configuration['targets_location'] - - if new_targets_location is not None: - targets_directory = new_targets_location - - metadata_directory = os.path.join(project_directory, - project_configuration['metadata_location']) - - new_prefix = None - - if prefix != '': - new_prefix = prefix - - prefix = project_configuration['prefix'] - - # Load the project's filename. - project_name = project_configuration['project_name'] - project_filename = project_name + METADATA_EXTENSION - - # Create a blank project on the target directory. - project = Project(project_name, metadata_directory, targets_directory, prefix, - repository_name) - - project.threshold = project_configuration['threshold'] - project.prefix = project_configuration['prefix'] - project.layout_type = project_configuration['layout_type'] - - # Traverse the public keys and add them to the project. - keydict = project_configuration['public_keys'] - - for keyid in keydict: - key, junk = format_metadata_to_key(keydict[keyid]) - project.add_verification_key(key) - - # Load the project's metadata. - targets_metadata_path = os.path.join(project_directory, metadata_directory, - project_filename) - signable = sslib_util.load_json_file(targets_metadata_path) - try: - formats.check_signable_object_format(signable) - except exceptions.UnsignedMetadataError: - # Downgrade the error to a warning because a use case exists where - # metadata may be generated unsigned on one machine and signed on another. - logger.warning('Unsigned metadata object: ' + repr(signable)) - targets_metadata = signable['signed'] - - # Remove the prefix from the metadata. - targets_metadata = _strip_prefix_from_targets_metadata(targets_metadata, - prefix) - for signature in signable['signatures']: - project.add_signature(signature) - - # Update roledb.py containing the loaded project attributes. - roleinfo = roledb.get_roleinfo(project_name, repository_name) - roleinfo['signatures'].extend(signable['signatures']) - roleinfo['version'] = targets_metadata['version'] - roleinfo['paths'] = targets_metadata['targets'] - roleinfo['delegations'] = targets_metadata['delegations'] - roleinfo['partial_loaded'] = False - - # Check if the loaded metadata was partially written and update the - # flag in 'roledb.py'. - if _metadata_is_partially_loaded(project_name, signable, - repository_name=repository_name): - roleinfo['partial_loaded'] = True - - roledb.update_roleinfo(project_name, roleinfo, mark_role_as_dirty=False, - repository_name=repository_name) - - for key_metadata in targets_metadata['delegations']['keys'].values(): - key_object, junk = format_metadata_to_key(key_metadata) - keydb.add_key(key_object, repository_name=repository_name) - - for role in targets_metadata['delegations']['roles']: - rolename = role['name'] - roleinfo = {'name': role['name'], 'keyids': role['keyids'], - 'threshold': role['threshold'], - 'signing_keyids': [], 'signatures': [], 'partial_loaded':False, - 'delegations': {'keys':{}, 'roles':[]} - } - roledb.add_role(rolename, roleinfo, repository_name=repository_name) - - # Load the delegated metadata and generate their fileinfo. - targets_objects = {} - loaded_metadata = [project_name] - targets_objects[project_name] = project - metadata_directory = os.path.join(project_directory, metadata_directory) - - if os.path.exists(metadata_directory) and \ - os.path.isdir(metadata_directory): - for metadata_role in os.listdir(metadata_directory): - metadata_path = os.path.join(metadata_directory, metadata_role) - metadata_name = \ - metadata_path[len(metadata_directory):].lstrip(os.path.sep) - - # Strip the extension. The roledb does not include an appended '.json' - # extension for each role. - if metadata_name.endswith(METADATA_EXTENSION): - extension_length = len(METADATA_EXTENSION) - metadata_name = metadata_name[:-extension_length] - - else: - continue - - if metadata_name in loaded_metadata: - continue - - signable = None - signable = sslib_util.load_json_file(metadata_path) - - # Strip the prefix from the local working copy, it will be added again - # when the targets metadata is written to disk. - metadata_object = signable['signed'] - metadata_object = _strip_prefix_from_targets_metadata(metadata_object, - prefix) - - roleinfo = roledb.get_roleinfo(metadata_name, repository_name) - roleinfo['signatures'].extend(signable['signatures']) - roleinfo['version'] = metadata_object['version'] - roleinfo['expires'] = metadata_object['expires'] - roleinfo['paths'] = {} - - for filepath, fileinfo in metadata_object['targets'].items(): - roleinfo['paths'].update({filepath: fileinfo.get('custom', {})}) - roleinfo['delegations'] = metadata_object['delegations'] - roleinfo['partial_loaded'] = False - - # If the metadata was partially loaded, update the roleinfo flag. - if _metadata_is_partially_loaded(metadata_name, signable, - repository_name=repository_name): - roleinfo['partial_loaded'] = True - - - roledb.update_roleinfo(metadata_name, roleinfo, - mark_role_as_dirty=False, repository_name=repository_name) - - # Append to list of elements to avoid reloading repeated metadata. - loaded_metadata.append(metadata_name) - - # Generate the Targets objects of the delegated roles. - new_targets_object = Targets(targets_directory, metadata_name, roleinfo, - repository_name=repository_name) - targets_object = targets_objects[project_name] - - targets_object._delegated_roles[metadata_name] = new_targets_object - - # Add the keys specified in the delegations field of the Targets role. - for key_metadata in metadata_object['delegations']['keys'].values(): - key_object, junk = format_metadata_to_key(key_metadata) - - try: - keydb.add_key(key_object, repository_name=repository_name) - - except exceptions.KeyAlreadyExistsError: - pass - - for role in metadata_object['delegations']['roles']: - rolename = role['name'] - roleinfo = {'name': role['name'], 'keyids': role['keyids'], - 'threshold': role['threshold'], - 'signing_keyids': [], 'signatures': [], - 'partial_loaded': False, - 'delegations': {'keys': {}, - 'roles': []}} - roledb.add_role(rolename, roleinfo, repository_name=repository_name) - - if new_prefix: - project.prefix = new_prefix - - return project - - - - - -def _strip_prefix_from_targets_metadata(targets_metadata, prefix): - """ - Non-public method that removes the prefix from each of the target paths in - 'targets_metadata' so they can be used again in compliance with the local - copies. The prefix is needed in metadata to match the layout of the remote - repository. - """ - - unprefixed_targets_metadata = {} - - for targets in targets_metadata['targets'].keys(): - unprefixed_target = os.path.relpath(targets, prefix) - unprefixed_targets_metadata[unprefixed_target] = \ - targets_metadata['targets'][targets] - targets_metadata['targets'] = unprefixed_targets_metadata - - return targets_metadata - - - - - -if __name__ == '__main__': - # The interactive sessions of the documentation strings can - # be tested by running 'developer_tool.py' as a standalone module: - # $ python3 developer_tool.py - import doctest - doctest.testmod() diff --git a/tuf/download.py b/tuf/download.py deleted file mode 100755 index af12af614b..0000000000 --- a/tuf/download.py +++ /dev/null @@ -1,314 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - download.py - - - February 21, 2012. Based on previous version by Geremy Condra. - - - Konstantin Andrianov - Vladimir Diaz - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Download metadata and target files and check their validity. The hash and - length of a downloaded file has to match the hash and length supplied by the - metadata of that file. -""" - -import logging -import timeit -import tempfile -from urllib import parse - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import formats as sslib_formats - -from tuf import exceptions -from tuf import formats -from tuf import settings - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - - -def safe_download(url, required_length, fetcher): - """ - - Given the 'url' and 'required_length' of the desired file, open a connection - to 'url', download it, and return the contents of the file. Also ensure - the length of the downloaded file matches 'required_length' exactly. - download.unsafe_download() may be called if an upper download limit is - preferred. - - - url: - A URL string that represents the location of the file. - - required_length: - An integer value representing the length of the file. This is an exact - limit. - - fetcher: - An object implementing FetcherInterface that performs the network IO - operations. - - - A file object is created on disk to store the contents of 'url'. - - - tuf.ssl_commons.exceptions.DownloadLengthMismatchError, if there was a - mismatch of observed vs expected lengths while downloading the file. - - securesystemslib.exceptions.FormatError, if any of the arguments are - improperly formatted. - - Any other unforeseen runtime exception. - - - A file object that points to the contents of 'url'. - """ - - # Do all of the arguments have the appropriate format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.URL_SCHEMA.check_match(url) - formats.LENGTH_SCHEMA.check_match(required_length) - - return _download_file(url, required_length, fetcher, STRICT_REQUIRED_LENGTH=True) - - - - - -def unsafe_download(url, required_length, fetcher): - """ - - Given the 'url' and 'required_length' of the desired file, open a connection - to 'url', download it, and return the contents of the file. Also ensure - the length of the downloaded file is up to 'required_length', and no larger. - download.safe_download() may be called if an exact download limit is - preferred. - - - url: - A URL string that represents the location of the file. - - required_length: - An integer value representing the length of the file. This is an upper - limit. - - fetcher: - An object implementing FetcherInterface that performs the network IO - operations. - - - A file object is created on disk to store the contents of 'url'. - - - tuf.ssl_commons.exceptions.DownloadLengthMismatchError, if there was a - mismatch of observed vs expected lengths while downloading the file. - - securesystemslib.exceptions.FormatError, if any of the arguments are - improperly formatted. - - Any other unforeseen runtime exception. - - - A file object that points to the contents of 'url'. - """ - - # Do all of the arguments have the appropriate format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.URL_SCHEMA.check_match(url) - formats.LENGTH_SCHEMA.check_match(required_length) - - return _download_file(url, required_length, fetcher, STRICT_REQUIRED_LENGTH=False) - - - - - -def _download_file(url, required_length, fetcher, STRICT_REQUIRED_LENGTH=True): - """ - - Given the url and length of the desired file, this function opens a - connection to 'url' and downloads the file while ensuring its length - matches 'required_length' if 'STRICT_REQUIRED_LENGH' is True (If False, - the file's length is not checked and a slow retrieval exception is raised - if the downloaded rate falls below the acceptable rate). - - - url: - A URL string that represents the location of the file. - - required_length: - An integer value representing the length of the file. - - STRICT_REQUIRED_LENGTH: - A Boolean indicator used to signal whether we should perform strict - checking of required_length. True by default. We explicitly set this to - False when we know that we want to turn this off for downloading the - timestamp metadata, which has no signed required_length. - - - A file object is created on disk to store the contents of 'url'. - - - tuf.exceptions.DownloadLengthMismatchError, if there was a - mismatch of observed vs expected lengths while downloading the file. - - securesystemslib.exceptions.FormatError, if any of the arguments are - improperly formatted. - - Any other unforeseen runtime exception. - - - A file object that points to the contents of 'url'. - """ - # 'url.replace('\\', '/')' is needed for compatibility with Windows-based - # systems, because they might use back-slashes in place of forward-slashes. - # This converts it to the common format. unquote() replaces %xx escapes in a - # url with their single-character equivalent. A back-slash may be encoded as - # %5c in the url, which should also be replaced with a forward slash. - url = parse.unquote(url).replace('\\', '/') - logger.info('Downloading: ' + repr(url)) - - # This is the temporary file that we will return to contain the contents of - # the downloaded file. - temp_file = tempfile.TemporaryFile() - - average_download_speed = 0 - number_of_bytes_received = 0 - - try: - chunks = fetcher.fetch(url, required_length) - start_time = timeit.default_timer() - for chunk in chunks: - - stop_time = timeit.default_timer() - temp_file.write(chunk) - - # Measure the average download speed. - number_of_bytes_received += len(chunk) - seconds_spent_receiving = stop_time - start_time - average_download_speed = number_of_bytes_received / seconds_spent_receiving - - if average_download_speed < settings.MIN_AVERAGE_DOWNLOAD_SPEED: - logger.debug('The average download speed dropped below the minimum' - ' average download speed set in settings. Stopping the download!.') - break - - else: - logger.debug('The average download speed has not dipped below the' - ' minimum average download speed set in settings.') - - # Does the total number of downloaded bytes match the required length? - _check_downloaded_length(number_of_bytes_received, required_length, - STRICT_REQUIRED_LENGTH=STRICT_REQUIRED_LENGTH, - average_download_speed=average_download_speed) - - except Exception: - # Close 'temp_file'. Any written data is lost. - temp_file.close() - logger.debug('Could not download URL: ' + repr(url)) - raise - - else: - return temp_file - - - - -def _check_downloaded_length(total_downloaded, required_length, - STRICT_REQUIRED_LENGTH=True, - average_download_speed=None): - """ - - A helper function which checks whether the total number of downloaded bytes - matches our expectation. - - - total_downloaded: - The total number of bytes supposedly downloaded for the file in question. - - required_length: - The total number of bytes expected of the file as seen from its metadata. - The Timestamp role is always downloaded without a known file length, and - the Root role when the client cannot download any of the required - top-level roles. In both cases, 'required_length' is actually an upper - limit on the length of the downloaded file. - - STRICT_REQUIRED_LENGTH: - A Boolean indicator used to signal whether we should perform strict - checking of required_length. True by default. We explicitly set this to - False when we know that we want to turn this off for downloading the - timestamp metadata, which has no signed required_length. - - average_download_speed: - The average download speed for the downloaded file. - - - None. - - - securesystemslib.exceptions.DownloadLengthMismatchError, if - STRICT_REQUIRED_LENGTH is True and total_downloaded is not equal - required_length. - - tuf.exceptions.SlowRetrievalError, if the total downloaded was - done in less than the acceptable download speed (as set in - tuf.settings). - - - None. - """ - - if total_downloaded == required_length: - logger.info('Downloaded ' + str(total_downloaded) + ' bytes out of the' - ' expected ' + str(required_length) + ' bytes.') - - else: - difference_in_bytes = abs(total_downloaded - required_length) - - # What we downloaded is not equal to the required length, but did we ask - # for strict checking of required length? - if STRICT_REQUIRED_LENGTH: - logger.info('Downloaded ' + str(total_downloaded) + ' bytes, but' - ' expected ' + str(required_length) + ' bytes. There is a difference' - ' of ' + str(difference_in_bytes) + ' bytes.') - - # If the average download speed is below a certain threshold, we flag - # this as a possible slow-retrieval attack. - logger.debug('Average download speed: ' + repr(average_download_speed)) - logger.debug('Minimum average download speed: ' + repr(settings.MIN_AVERAGE_DOWNLOAD_SPEED)) - - if average_download_speed < settings.MIN_AVERAGE_DOWNLOAD_SPEED: - raise exceptions.SlowRetrievalError(average_download_speed) - - else: - logger.debug('Good average download speed: ' + - repr(average_download_speed) + ' bytes per second') - - raise exceptions.DownloadLengthMismatchError(required_length, total_downloaded) - - else: - # We specifically disabled strict checking of required length, but we - # will log a warning anyway. This is useful when we wish to download the - # Timestamp or Root metadata, for which we have no signed metadata; so, - # we must guess a reasonable required_length for it. - if average_download_speed < settings.MIN_AVERAGE_DOWNLOAD_SPEED: - raise exceptions.SlowRetrievalError(average_download_speed) - - else: - logger.debug('Good average download speed: ' + - repr(average_download_speed) + ' bytes per second') - - logger.info('Downloaded ' + str(total_downloaded) + ' bytes out of an' - ' upper limit of ' + str(required_length) + ' bytes.') diff --git a/tuf/exceptions.py b/tuf/exceptions.py deleted file mode 100755 index 8ebc92c7d1..0000000000 --- a/tuf/exceptions.py +++ /dev/null @@ -1,338 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - exceptions.py - - - Vladimir Diaz - - - January 10, 2017 - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Define TUF Exceptions. - The names chosen for TUF Exception classes should end in 'Error' except where - there is a good reason not to, and provide that reason in those cases. -""" - -from urllib import parse - -from typing import Any, Dict, Optional - -import logging -logger = logging.getLogger(__name__) - - -class Error(Exception): - """Indicate a generic error.""" - - -class UnsupportedSpecificationError(Error): - """ - Metadata received claims to conform to a version of the specification that is - not supported by this client. - """ - -class FormatError(Error): - """Indicate an error while validating an object's format.""" - - -class InvalidMetadataJSONError(FormatError): - """Indicate that a metadata file is not valid JSON.""" - - def __init__(self, exception: BaseException): - super(InvalidMetadataJSONError, self).__init__() - - # Store the original exception. - self.exception = exception - - def __str__(self) -> str: - return repr(self) - - def __repr__(self) -> str: - # Show the original exception. - return self.__class__.__name__ + ' : wraps error: ' + repr(self.exception) - - # # Directly instance-reproducing: - # return self.__class__.__name__ + '(' + repr(self.exception) + ')' - - -class UnsupportedAlgorithmError(Error): - """Indicate an error while trying to identify a user-specified algorithm.""" - -class LengthOrHashMismatchError(Error): - """Indicate an error while checking the length and hash values of an object""" - -class RepositoryError(Error): - """Indicate an error with a repository's state, such as a missing file.""" - -class BadHashError(RepositoryError): - """Indicate an error while checking the value of a hash object.""" - - def __init__(self, expected_hash: str, observed_hash: str): - super(BadHashError, self).__init__() - - self.expected_hash = expected_hash - self.observed_hash = observed_hash - - def __str__(self) -> str: - return ( - 'Observed hash (' + repr(self.observed_hash) + ') != expected hash (' + - repr(self.expected_hash) + ')') - - def __repr__(self) -> str: - return self.__class__.__name__ + ' : ' + str(self) - - # # Directly instance-reproducing: - # return ( - # self.__class__.__name__ + '(' + repr(self.expected_hash) + ', ' + - # repr(self.observed_hash) + ')') - - -class BadPasswordError(Error): - """Indicate an error after encountering an invalid password.""" - - -class UnknownKeyError(Error): - """Indicate an error while verifying key-like objects (e.g., keyids).""" - - -class BadVersionNumberError(RepositoryError): - """Indicate an error for metadata that contains an invalid version number.""" - - -class MissingLocalRepositoryError(RepositoryError): - """Raised when a local repository could not be found.""" - - -class InsufficientKeysError(Error): - """Indicate that metadata role lacks a threshold of pubic or private keys.""" - - -class ForbiddenTargetError(RepositoryError): - """Indicate that a role signed for a target that it was not delegated to.""" - - -class ExpiredMetadataError(RepositoryError): - """Indicate that a TUF Metadata file has expired.""" - - -class ReplayedMetadataError(RepositoryError): - """Indicate that some metadata has been replayed to the client.""" - - def __init__(self, metadata_role: str, downloaded_version: int, current_version: int): - super(ReplayedMetadataError, self).__init__() - - self.metadata_role = metadata_role - self.downloaded_version = downloaded_version - self.current_version = current_version - - def __str__(self) -> str: - return ( - 'Downloaded ' + repr(self.metadata_role) + ' is older (' + - repr(self.downloaded_version) + ') than the version currently ' - 'installed (' + repr(self.current_version) + ').') - - def __repr__(self) -> str: - return self.__class__.__name__ + ' : ' + str(self) - - -class CryptoError(Error): - """Indicate any cryptography-related errors.""" - - -class BadSignatureError(CryptoError): - """Indicate that some metadata file has a bad signature.""" - - def __init__(self, metadata_role_name: str): - super(BadSignatureError, self).__init__() - - self.metadata_role_name = metadata_role_name - - def __str__(self) -> str: - return repr(self.metadata_role_name) + ' metadata has a bad signature.' - - def __repr__(self) -> str: - return self.__class__.__name__ + ' : ' + str(self) - - # # Directly instance-reproducing: - # return ( - # self.__class__.__name__ + '(' + repr(self.metadata_role_name) + ')') - - -class UnknownMethodError(CryptoError): - """Indicate that a user-specified cryptograpthic method is unknown.""" - - -class UnsupportedLibraryError(Error): - """Indicate that a supported library could not be located or imported.""" - - -class DownloadError(Error): - """Indicate an error occurred while attempting to download a file.""" - - -class DownloadLengthMismatchError(DownloadError): - """Indicate that a mismatch of lengths was seen while downloading a file.""" - - def __init__(self, expected_length: int, observed_length: int): - super(DownloadLengthMismatchError, self).__init__() - - self.expected_length = expected_length #bytes - self.observed_length = observed_length #bytes - - def __str__(self) -> str: - return ( - 'Observed length (' + repr(self.observed_length) + - ') < expected length (' + repr(self.expected_length) + ').') - - def __repr__(self) -> str: - return self.__class__.__name__ + ' : ' + str(self) - - # # Directly instance-reproducing: - # return ( - # self.__class__.__name__ + '(' + repr(self.expected_length) + ', ' + - # self.observed_length + ')') - - - -class SlowRetrievalError(DownloadError): - """"Indicate that downloading a file took an unreasonably long time.""" - - def __init__(self, average_download_speed: Optional[int] = None): - super(SlowRetrievalError, self).__init__() - - self.__average_download_speed = average_download_speed #bytes/second - - def __str__(self) -> str: - msg = 'Download was too slow.' - if self.__average_download_speed is not None: - msg = ('Download was too slow. Average speed: ' + - repr(self.__average_download_speed) + ' bytes per second.') - - return msg - - def __repr__(self) -> str: - return self.__class__.__name__ + ' : ' + str(self) - - # # Directly instance-reproducing: - # return ( - # self.__class__.__name__ + '(' + repr(self.__average_download_speed + ')') - - -class KeyAlreadyExistsError(Error): - """Indicate that a key already exists and cannot be added.""" - - -class RoleAlreadyExistsError(Error): - """Indicate that a role already exists and cannot be added.""" - - -class UnknownRoleError(Error): - """Indicate an error trying to locate or identify a specified TUF role.""" - - -class UnknownTargetError(Error): - """Indicate an error trying to locate or identify a specified target.""" - - -class InvalidNameError(Error): - """Indicate an error while trying to validate any type of named object.""" - - -class UnsignedMetadataError(RepositoryError): - """Indicate metadata object with insufficient threshold of signatures.""" - - # signable is not used but kept in method signature for backwards compat - def __init__(self, message: str, signable: Any = None): - super(UnsignedMetadataError, self).__init__() - - self.exception_message = message - self.signable = signable - - def __str__(self) -> str: - return self.exception_message - - def __repr__(self) -> str: - return self.__class__.__name__ + ' : ' + str(self) - - # # Directly instance-reproducing: - # return ( - # self.__class__.__name__ + '(' + repr(self.exception_message) + ', ' + - # repr(self.signable) + ')') - - -class NoWorkingMirrorError(Error): - """ - An updater will throw this exception in case it could not download a - metadata or target file. - A dictionary of Exception instances indexed by every mirror URL will also be - provided. - """ - - def __init__(self, mirror_errors: Dict[str, BaseException]): - super(NoWorkingMirrorError, self).__init__() - - # Dictionary of URL strings to Exception instances - self.mirror_errors = mirror_errors - - def __str__(self) -> str: - all_errors = 'No working mirror was found:' - - for mirror_url, mirror_error in self.mirror_errors.items(): - try: - # http://docs.python.org/2/library/urlparse.html#urlparse.urlparse - mirror_url_tokens = parse.urlparse(mirror_url) - - except Exception: - logger.exception('Failed to parse mirror URL: ' + repr(mirror_url)) - mirror_netloc = mirror_url - - else: - mirror_netloc = mirror_url_tokens.netloc - - all_errors += '\n ' + repr(mirror_netloc) + ': ' + repr(mirror_error) - - return all_errors - - def __repr__(self) -> str: - return self.__class__.__name__ + ' : ' + str(self) - - # # Directly instance-reproducing: - # return ( - # self.__class__.__name__ + '(' + repr(self.mirror_errors) + ')') - - - -class NotFoundError(Error): - """If a required configuration or resource is not found.""" - - -class URLMatchesNoPatternError(Error): - """If a URL does not match a user-specified regular expression.""" - -class URLParsingError(Error): - """If we are unable to parse a URL -- for example, if a hostname element - cannot be isoalted.""" - -class InvalidConfigurationError(Error): - """If a configuration object does not match the expected format.""" - -class FetcherHTTPError(Exception): - """ - Returned by FetcherInterface implementations for HTTP errors. - - Args: - message (str): The HTTP error messsage - status_code (int): The HTTP status code - """ - def __init__(self, message: str, status_code: int): - super(FetcherHTTPError, self).__init__(message) - self.status_code = status_code diff --git a/tuf/formats.py b/tuf/formats.py deleted file mode 100755 index ca304ca9e4..0000000000 --- a/tuf/formats.py +++ /dev/null @@ -1,1009 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - formats.py - - - Geremy Condra - Vladimir Diaz - - - Refactored April 30, 2012. -vladimir.v.diaz - - - See LICENSE-MIT OR LICENSE for licensing information. - - - A central location for all format-related checking of TUF objects. - Some crypto-related formats may also be defined in securesystemslib. - Note: 'formats.py' depends heavily on 'schema.py', so the 'schema.py' - module should be read and understood before tackling this module. - - 'formats.py' can be broken down into two sections. (1) Schemas and object - matching. (2) Functions that help produce or verify TUF objects. - - The first section deals with schemas and object matching based on format. - There are two ways of checking the format of objects. The first method - raises a 'securesystemslib.exceptions.FormatError' exception if the match - fails and the other returns a Boolean result. - - tuf.formats..check_match(object) - tuf.formats..matches(object) - - Example: - - rsa_key = {'keytype': 'rsa' - 'keyid': 34892fc465ac76bc3232fab - 'keyval': {'public': 'public_key', - 'private': 'private_key'} - - securesystemslib.formats.RSAKEY_SCHEMA.check_match(rsa_key) - securesystemslib.formats.RSAKEY_SCHEMA.matches(rsa_key) - - In this example, if a dict key or dict value is missing or incorrect, - the match fails. There are numerous variations of object checking - provided by 'formats.py' and 'schema.py'. - - The second section contains miscellaneous functions related to the format of - TUF objects. - Example: - - signable_object = make_signable(unsigned_object) -""" - -import binascii -import calendar -import datetime -import time -import copy - -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import schema as SCHEMA - -import tuf -from tuf import exceptions - -# As per TUF spec 1.0.0 the spec version field must follow the Semantic -# Versioning 2.0.0 (semver) format. The regex pattern is provided by semver. -# https://semver.org/spec/v2.0.0.html#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string -SEMVER_2_0_0_SCHEMA = SCHEMA.RegularExpression( - r'(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)' - r'(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)' - r'(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?' - r'(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?' -) -SPECIFICATION_VERSION_SCHEMA = SCHEMA.OneOf([ - # However, temporarily allow "1.0" for backwards-compatibility in tuf-0.12.PATCH. - SCHEMA.String("1.0"), - SEMVER_2_0_0_SCHEMA -]) - -# A datetime in 'YYYY-MM-DDTHH:MM:SSZ' ISO 8601 format. The "Z" zone designator -# for the zero UTC offset is always used (i.e., a numerical offset is not -# supported.) Example: '2015-10-21T13:20:00Z'. Note: This is a simple format -# check, and an ISO8601 string should be fully verified when it is parsed. -ISO8601_DATETIME_SCHEMA = SCHEMA.RegularExpression(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z') - -# An integer representing the numbered version of a metadata file. -# Must be 1, or greater. -METADATAVERSION_SCHEMA = SCHEMA.Integer(lo=0) - -# A relative file path (e.g., 'metadata/root/'). -RELPATH_SCHEMA = SCHEMA.AnyString() -RELPATHS_SCHEMA = SCHEMA.ListOf(RELPATH_SCHEMA) - -VERSIONINFO_SCHEMA = SCHEMA.Object( - object_name = 'VERSIONINFO_SCHEMA', - version = METADATAVERSION_SCHEMA) - -# A string representing a role's name. -ROLENAME_SCHEMA = SCHEMA.AnyString() - -# A role's threshold value (i.e., the minimum number -# of signatures required to sign a metadata file). -# Must be 1 and greater. -THRESHOLD_SCHEMA = SCHEMA.Integer(lo=1) - -# A hexadecimal value in '23432df87ab..' format. -HEX_SCHEMA = SCHEMA.RegularExpression(r'[a-fA-F0-9]+') - -# A path hash prefix is a hexadecimal string. -PATH_HASH_PREFIX_SCHEMA = HEX_SCHEMA - -# A list of path hash prefixes. -PATH_HASH_PREFIXES_SCHEMA = SCHEMA.ListOf(PATH_HASH_PREFIX_SCHEMA) - -# Role object in {'keyids': [keydids..], 'name': 'ABC', 'threshold': 1, -# 'paths':[filepaths..]} format. -# TODO: This is not a role. In further #660-related PRs, fix it, similar to -# the way I did in Uptane's TUF fork. -ROLE_SCHEMA = SCHEMA.Object( - object_name = 'ROLE_SCHEMA', - name = SCHEMA.Optional(ROLENAME_SCHEMA), - keyids = sslib_formats.KEYIDS_SCHEMA, - threshold = THRESHOLD_SCHEMA, - terminating = SCHEMA.Optional(sslib_formats.BOOLEAN_SCHEMA), - paths = SCHEMA.Optional(RELPATHS_SCHEMA), - path_hash_prefixes = SCHEMA.Optional(PATH_HASH_PREFIXES_SCHEMA)) - -# A dict of roles where the dict keys are role names and the dict values holding -# the role data/information. -ROLEDICT_SCHEMA = SCHEMA.DictOf( - key_schema = ROLENAME_SCHEMA, - value_schema = ROLE_SCHEMA) - -# A dictionary of ROLEDICT, where dictionary keys can be repository names, and -# dictionary values containing information for each role available on the -# repository (corresponding to the repository belonging to named repository in -# the dictionary key) -ROLEDICTDB_SCHEMA = SCHEMA.DictOf( - key_schema = sslib_formats.NAME_SCHEMA, - value_schema = ROLEDICT_SCHEMA) - -# Command argument list, as used by the CLI tool. -# Example: {'keytype': ed25519, 'expires': 365,} -COMMAND_SCHEMA = SCHEMA.DictOf( - key_schema = sslib_formats.NAME_SCHEMA, - value_schema = SCHEMA.Any()) - -# A dictionary holding version information. -VERSION_SCHEMA = SCHEMA.Object( - object_name = 'VERSION_SCHEMA', - major = SCHEMA.Integer(lo=0), - minor = SCHEMA.Integer(lo=0), - fix = SCHEMA.Integer(lo=0)) - -# A value that is either True or False, on or off, etc. -BOOLEAN_SCHEMA = SCHEMA.Boolean() - -# A hexadecimal value in '23432df87ab..' format. -HASH_SCHEMA = SCHEMA.RegularExpression(r'[a-fA-F0-9]+') - -# A key identifier (e.g., a hexadecimal value identifying an RSA key). -KEYID_SCHEMA = HASH_SCHEMA - -# A list of KEYID_SCHEMA. -KEYIDS_SCHEMA = SCHEMA.ListOf(KEYID_SCHEMA) - -# The actual values of a key, as opposed to meta data such as a key type and -# key identifier ('rsa', 233df889cb). For RSA keys, the key value is a pair of -# public and private keys in PEM Format stored as strings. -KEYVAL_SCHEMA = SCHEMA.Object( - object_name = 'KEYVAL_SCHEMA', - public = SCHEMA.AnyString(), - private = SCHEMA.Optional(SCHEMA.AnyString())) - -# A generic TUF key. All TUF keys should be saved to metadata files in this -# format. -KEY_SCHEMA = SCHEMA.Object( - object_name = 'KEY_SCHEMA', - keytype = SCHEMA.AnyString(), - keyval = KEYVAL_SCHEMA, - expires = SCHEMA.Optional(ISO8601_DATETIME_SCHEMA)) - -# A dict where the dict keys hold a keyid and the dict values a key object. -KEYDICT_SCHEMA = SCHEMA.DictOf( - key_schema = KEYID_SCHEMA, - value_schema = KEY_SCHEMA) - -# The format used by the key database to store keys. The dict keys hold a key -# identifier and the dict values any object. The key database should store -# key objects in the values (e.g., 'RSAKEY_SCHEMA', 'DSAKEY_SCHEMA'). -KEYDB_SCHEMA = SCHEMA.DictOf( - key_schema = KEYID_SCHEMA, - value_schema = SCHEMA.Any()) - -# A schema holding the result of checking the signatures of a particular -# 'SIGNABLE_SCHEMA' role. -# For example, how many of the signatures for the 'Target' role are -# valid? This SCHEMA holds this information. See 'sig.py' for -# more information. -SIGNATURESTATUS_SCHEMA = SCHEMA.Object( - object_name = 'SIGNATURESTATUS_SCHEMA', - threshold = SCHEMA.Integer(), - good_sigs = KEYIDS_SCHEMA, - bad_sigs = KEYIDS_SCHEMA, - unknown_sigs = KEYIDS_SCHEMA, - untrusted_sigs = KEYIDS_SCHEMA) - -# An integer representing length. Must be 0, or greater. -LENGTH_SCHEMA = SCHEMA.Integer(lo=0) - -# A dict in {'sha256': '23432df87ab..', 'sha512': '34324abc34df..', ...} format. -HASHDICT_SCHEMA = SCHEMA.DictOf( - key_schema = SCHEMA.AnyString(), - value_schema = HASH_SCHEMA) - -# Information about target files, like file length and file hash(es). This -# schema allows the storage of multiple hashes for the same file (e.g., sha256 -# and sha512 may be computed for the same file and stored). -TARGETS_FILEINFO_SCHEMA = SCHEMA.Object( - object_name = 'TARGETS_FILEINFO_SCHEMA', - length = LENGTH_SCHEMA, - hashes = HASHDICT_SCHEMA, - custom = SCHEMA.Optional(SCHEMA.Object())) - -# Information about snapshot and timestamp files. This schema allows for optional -# length and hashes, but version is mandatory. -METADATA_FILEINFO_SCHEMA = SCHEMA.Object( - object_name = 'METADATA_FILEINFO_SCHEMA', - length = SCHEMA.Optional(LENGTH_SCHEMA), - hashes = SCHEMA.Optional(HASHDICT_SCHEMA), - version = METADATAVERSION_SCHEMA) - -# A dict holding the version or file information for a particular metadata -# role. The dict keys hold the relative file paths, and the dict values the -# corresponding version numbers and/or file information. -FILEINFODICT_SCHEMA = SCHEMA.DictOf( - key_schema = RELPATH_SCHEMA, - value_schema = SCHEMA.OneOf([VERSIONINFO_SCHEMA, - METADATA_FILEINFO_SCHEMA])) - -# A dict holding the information for a particular target / file. The dict keys -# hold the relative file paths, and the dict values the corresponding file -# information. -FILEDICT_SCHEMA = SCHEMA.DictOf( - key_schema = RELPATH_SCHEMA, - value_schema = TARGETS_FILEINFO_SCHEMA) - -# A dict holding a target info. -TARGETINFO_SCHEMA = SCHEMA.Object( - object_name = 'TARGETINFO_SCHEMA', - filepath = RELPATH_SCHEMA, - fileinfo = TARGETS_FILEINFO_SCHEMA) - -# A list of TARGETINFO_SCHEMA. -TARGETINFOS_SCHEMA = SCHEMA.ListOf(TARGETINFO_SCHEMA) - -# A string representing a named oject. -NAME_SCHEMA = SCHEMA.AnyString() - -# A dict of repository names to mirrors. -REPO_NAMES_TO_MIRRORS_SCHEMA = SCHEMA.DictOf( - key_schema = NAME_SCHEMA, - value_schema = SCHEMA.ListOf(sslib_formats.URL_SCHEMA)) - -# An object containing the map file's "mapping" attribute. -MAPPING_SCHEMA = SCHEMA.ListOf(SCHEMA.Object( - paths = RELPATHS_SCHEMA, - repositories = SCHEMA.ListOf(NAME_SCHEMA), - terminating = BOOLEAN_SCHEMA, - threshold = THRESHOLD_SCHEMA)) - -# A dict containing the map file (named 'map.json', by default). The format of -# the map file is covered in TAP 4: Multiple repository consensus on entrusted -# targets. -MAPFILE_SCHEMA = SCHEMA.Object( - repositories = REPO_NAMES_TO_MIRRORS_SCHEMA, - mapping = MAPPING_SCHEMA) - -# Like ROLEDICT_SCHEMA, except that ROLE_SCHEMA instances are stored in order. -ROLELIST_SCHEMA = SCHEMA.ListOf(ROLE_SCHEMA) - -# The delegated roles of a Targets role (a parent). -DELEGATIONS_SCHEMA = SCHEMA.Object( - keys = KEYDICT_SCHEMA, - roles = ROLELIST_SCHEMA) - -# The number of hashed bins, or the number of delegated roles. See -# delegate_hashed_bins() in 'repository_tool.py' for an example. Note: -# Tools may require further restrictions on the number of bins, such -# as requiring them to be a power of 2. -NUMBINS_SCHEMA = SCHEMA.Integer(lo=1) - -# The fileinfo format of targets specified in the repository and -# developer tools. The fields match that of TARGETS_FILEINFO_SCHEMA, only all -# fields are optional. -CUSTOM_SCHEMA = SCHEMA.DictOf( - key_schema = SCHEMA.AnyString(), - value_schema = SCHEMA.Any() -) -LOOSE_TARGETS_FILEINFO_SCHEMA = SCHEMA.Object( - object_name = "LOOSE_TARGETS_FILEINFO_SCHEMA", - length = SCHEMA.Optional(LENGTH_SCHEMA), - hashes = SCHEMA.Optional(HASHDICT_SCHEMA), - version = SCHEMA.Optional(METADATAVERSION_SCHEMA), - custom = SCHEMA.Optional(SCHEMA.Object()) -) - -PATH_FILEINFO_SCHEMA = SCHEMA.DictOf( - key_schema = RELPATH_SCHEMA, - value_schema = LOOSE_TARGETS_FILEINFO_SCHEMA) - -# TUF roledb -ROLEDB_SCHEMA = SCHEMA.Object( - object_name = 'ROLEDB_SCHEMA', - keyids = SCHEMA.Optional(KEYIDS_SCHEMA), - signing_keyids = SCHEMA.Optional(KEYIDS_SCHEMA), - previous_keyids = SCHEMA.Optional(KEYIDS_SCHEMA), - threshold = SCHEMA.Optional(THRESHOLD_SCHEMA), - previous_threshold = SCHEMA.Optional(THRESHOLD_SCHEMA), - version = SCHEMA.Optional(METADATAVERSION_SCHEMA), - expires = SCHEMA.Optional(ISO8601_DATETIME_SCHEMA), - signatures = SCHEMA.Optional(sslib_formats.SIGNATURES_SCHEMA), - paths = SCHEMA.Optional(SCHEMA.OneOf([RELPATHS_SCHEMA, PATH_FILEINFO_SCHEMA])), - path_hash_prefixes = SCHEMA.Optional(PATH_HASH_PREFIXES_SCHEMA), - delegations = SCHEMA.Optional(DELEGATIONS_SCHEMA), - partial_loaded = SCHEMA.Optional(BOOLEAN_SCHEMA)) - -# A signable object. Holds the signing role and its associated signatures. -SIGNABLE_SCHEMA = SCHEMA.Object( - object_name = 'SIGNABLE_SCHEMA', - signed = SCHEMA.Any(), - signatures = SCHEMA.ListOf(sslib_formats.SIGNATURE_SCHEMA)) - -# Root role: indicates root keys and top-level roles. -ROOT_SCHEMA = SCHEMA.Object( - object_name = 'ROOT_SCHEMA', - _type = SCHEMA.String('root'), - spec_version = SPECIFICATION_VERSION_SCHEMA, - version = METADATAVERSION_SCHEMA, - consistent_snapshot = BOOLEAN_SCHEMA, - expires = ISO8601_DATETIME_SCHEMA, - keys = KEYDICT_SCHEMA, - roles = ROLEDICT_SCHEMA) - -# Targets role: Indicates targets and delegates target paths to other roles. -TARGETS_SCHEMA = SCHEMA.Object( - object_name = 'TARGETS_SCHEMA', - _type = SCHEMA.String('targets'), - spec_version = SPECIFICATION_VERSION_SCHEMA, - version = METADATAVERSION_SCHEMA, - expires = ISO8601_DATETIME_SCHEMA, - targets = FILEDICT_SCHEMA, - delegations = SCHEMA.Optional(DELEGATIONS_SCHEMA)) - -# Snapshot role: indicates the latest versions of all metadata (except -# timestamp). -SNAPSHOT_SCHEMA = SCHEMA.Object( - object_name = 'SNAPSHOT_SCHEMA', - _type = SCHEMA.String('snapshot'), - version = METADATAVERSION_SCHEMA, - expires = sslib_formats.ISO8601_DATETIME_SCHEMA, - spec_version = SPECIFICATION_VERSION_SCHEMA, - meta = FILEINFODICT_SCHEMA) - -# Timestamp role: indicates the latest version of the snapshot file. -TIMESTAMP_SCHEMA = SCHEMA.Object( - object_name = 'TIMESTAMP_SCHEMA', - _type = SCHEMA.String('timestamp'), - spec_version = SPECIFICATION_VERSION_SCHEMA, - version = METADATAVERSION_SCHEMA, - expires = sslib_formats.ISO8601_DATETIME_SCHEMA, - meta = FILEINFODICT_SCHEMA) - - -# project.cfg file: stores information about the project in a json dictionary -PROJECT_CFG_SCHEMA = SCHEMA.Object( - object_name = 'PROJECT_CFG_SCHEMA', - project_name = SCHEMA.AnyString(), - layout_type = SCHEMA.OneOf([SCHEMA.String('repo-like'), SCHEMA.String('flat')]), - targets_location = sslib_formats.PATH_SCHEMA, - metadata_location = sslib_formats.PATH_SCHEMA, - prefix = sslib_formats.PATH_SCHEMA, - public_keys = sslib_formats.KEYDICT_SCHEMA, - threshold = SCHEMA.Integer(lo = 0, hi = 2) - ) - -# A schema containing information a repository mirror may require, -# such as a url, the path of the directory metadata files, etc. -MIRROR_SCHEMA = SCHEMA.Object( - object_name = 'MIRROR_SCHEMA', - url_prefix = sslib_formats.URL_SCHEMA, - metadata_path = SCHEMA.Optional(RELPATH_SCHEMA), - targets_path = SCHEMA.Optional(RELPATH_SCHEMA), - confined_target_dirs = SCHEMA.Optional(RELPATHS_SCHEMA), - custom = SCHEMA.Optional(SCHEMA.Object())) - -# A dictionary of mirrors where the dict keys hold the mirror's name and -# and the dict values the mirror's data (i.e., 'MIRROR_SCHEMA'). -# The repository class of 'updater.py' accepts dictionaries -# of this type provided by the TUF client. -MIRRORDICT_SCHEMA = SCHEMA.DictOf( - key_schema = SCHEMA.AnyString(), - value_schema = MIRROR_SCHEMA) - -# A Mirrorlist: indicates all the live mirrors, and what documents they -# serve. -MIRRORLIST_SCHEMA = SCHEMA.Object( - object_name = 'MIRRORLIST_SCHEMA', - _type = SCHEMA.String('mirrors'), - version = METADATAVERSION_SCHEMA, - expires = sslib_formats.ISO8601_DATETIME_SCHEMA, - mirrors = SCHEMA.ListOf(MIRROR_SCHEMA)) - -# Any of the role schemas (e.g., TIMESTAMP_SCHEMA, SNAPSHOT_SCHEMA, etc.) -ANYROLE_SCHEMA = SCHEMA.OneOf([ROOT_SCHEMA, TARGETS_SCHEMA, SNAPSHOT_SCHEMA, - TIMESTAMP_SCHEMA, MIRROR_SCHEMA]) - -# The format of the resulting "scp config dict" after extraction from the -# push configuration file (i.e., push.cfg). In the case of a config file -# utilizing the scp transfer module, it must contain the 'general' and 'scp' -# sections, where 'general' must contain a 'transfer_module' and -# 'metadata_path' entry, and 'scp' the 'host', 'user', 'identity_file', and -# 'remote_directory' entries. -SCPCONFIG_SCHEMA = SCHEMA.Object( - object_name = 'SCPCONFIG_SCHEMA', - general = SCHEMA.Object( - object_name = '[general]', - transfer_module = SCHEMA.String('scp'), - metadata_path = sslib_formats.PATH_SCHEMA, - targets_directory = sslib_formats.PATH_SCHEMA), - scp=SCHEMA.Object( - object_name = '[scp]', - host = sslib_formats.URL_SCHEMA, - user = sslib_formats.NAME_SCHEMA, - identity_file = sslib_formats.PATH_SCHEMA, - remote_directory = sslib_formats.PATH_SCHEMA)) - -# The format of the resulting "receive config dict" after extraction from the -# receive configuration file (i.e., receive.cfg). The receive config file -# must contain a 'general' section, and this section the 'pushroots', -# 'repository_directory', 'metadata_directory', 'targets_directory', and -# 'backup_directory' entries. -RECEIVECONFIG_SCHEMA = SCHEMA.Object( - object_name = 'RECEIVECONFIG_SCHEMA', general=SCHEMA.Object( - object_name = '[general]', - pushroots = SCHEMA.ListOf(sslib_formats.PATH_SCHEMA), - repository_directory = sslib_formats.PATH_SCHEMA, - metadata_directory = sslib_formats.PATH_SCHEMA, - targets_directory = sslib_formats.PATH_SCHEMA, - backup_directory = sslib_formats.PATH_SCHEMA)) - - - -def make_signable(role_schema): - """ - - Return the role metadata 'role_schema' in 'SIGNABLE_SCHEMA' format. - 'role_schema' is added to the 'signed' key, and an empty list - initialized to the 'signatures' key. The caller adds signatures - to this second field. - Note: check_signable_object_format() should be called after - make_signable() and signatures added to ensure the final - signable object has a valid format (i.e., a signable containing - a supported role metadata). - - - role_schema: - A role schema dict (e.g., 'ROOT_SCHEMA', 'SNAPSHOT_SCHEMA'). - - - None. - - - None. - - - A dict in 'SIGNABLE_SCHEMA' format. - """ - - if not isinstance(role_schema, dict) or 'signed' not in role_schema: - return { 'signed' : role_schema, 'signatures' : [] } - - else: - return role_schema - - - - - - -def build_dict_conforming_to_schema(schema, **kwargs): - """ - - Given a schema.Object object (for example, TIMESTAMP_SCHEMA from this - module) and a set of keyword arguments, create a dictionary that conforms - to the given schema, using the keyword arguments to define the elements of - the new dict. - - Checks the result to make sure that it conforms to the given schema, raising - an error if not. - - - schema - A schema.Object, like TIMESTAMP_SCHEMA, TARGETS_FILEINFO_SCHEMA, - securesystemslib.formats.SIGNATURE_SCHEMA, etc. - - **kwargs - A keyword argument for each element of the schema. Optional arguments - may be included or skipped, but all required arguments must be included. - - For example, for TIMESTAMP_SCHEMA, a call might look like: - build_dict_conforming_to_schema( - TIMESTAMP_SCHEMA, - _type='timestamp', - spec_version='1.0.0', - version=1, - expires='2020-01-01T00:00:00Z', - meta={...}) - Some arguments will be filled in if excluded: _type, spec_version - - - A dictionary conforming to the given schema. Adds certain required fields - if they are missing and can be deduced from the schema. The data returned - is a deep copy. - - - securesystemslib.exceptions.FormatError - if the provided data does not match the schema when assembled. - - - None. In particular, the provided values are not modified, and the - returned dictionary does not include references to them. - - """ - - # Check the schema argument type (must provide check_match and _required). - if not isinstance(schema, SCHEMA.Object): - raise ValueError( - 'The first argument must be a schema.Object instance, but is not. ' - 'Given schema: ' + repr(schema)) - - # Make a copy of the provided fields so that the caller's provided values - # do not change when the returned values are changed. - dictionary = copy.deepcopy(kwargs) - - - # Automatically provide certain schema properties if they are not already - # provided and are required in objects of class . - # This includes: - # _type: - # spec_version: SPECIFICATION_VERSION_SCHEMA - # - # (Please note that _required is slightly misleading, as it includes both - # required and optional elements. It should probably be called _components.) - # - for key, element_type in schema._required: #pylint: disable=protected-access - - if key in dictionary: - # If the field has been provided, proceed normally. - continue - - elif isinstance(element_type, SCHEMA.Optional): - # If the field has NOT been provided but IS optional, proceed without it. - continue - - else: - # If the field has not been provided and is required, check to see if - # the field is one of the fields we automatically fill. - - # Currently, the list is limited to ['_type', 'spec_version']. - - if key == '_type' and isinstance(element_type, SCHEMA.String): - # A SCHEMA.String stores its expected value in _string, so use that. - dictionary[key] = element_type._string #pylint: disable=protected-access - - elif (key == 'spec_version' and - element_type == SPECIFICATION_VERSION_SCHEMA): - # If not provided, use the specification version in tuf/__init__.py - dictionary[key] = tuf.SPECIFICATION_VERSION - - - # If what we produce does not match the provided schema, raise a FormatError. - schema.check_match(dictionary) - - return dictionary - - - - - -# A dict holding the recognized schemas for the top-level roles. -SCHEMAS_BY_TYPE = { - 'root' : ROOT_SCHEMA, - 'targets' : TARGETS_SCHEMA, - 'snapshot' : SNAPSHOT_SCHEMA, - 'timestamp' : TIMESTAMP_SCHEMA, - 'mirrors' : MIRRORLIST_SCHEMA} - - - - -def expiry_string_to_datetime(expires): - """ - - Convert an expiry string to a datetime object. - - expires: - The expiry date-time string in the ISO8601 format that is defined - in securesystemslib.ISO8601_DATETIME_SCHEMA. E.g. '2038-01-19T03:14:08Z' - - securesystemslib.exceptions.FormatError, if 'expires' cannot be - parsed correctly. - - None. - - A datetime object representing the expiry time. - """ - - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.ISO8601_DATETIME_SCHEMA.check_match(expires) - - try: - return datetime.datetime.strptime(expires, "%Y-%m-%dT%H:%M:%SZ") - except ValueError as error: - raise sslib_exceptions.FormatError( - 'Failed to parse ' + repr(expires) + ' as an expiry time') from error - - - - -def datetime_to_unix_timestamp(datetime_object): - """ - - Convert 'datetime_object' (in datetime.datetime()) format) to a Unix/POSIX - timestamp. For example, Python's time.time() returns a Unix timestamp, and - includes the number of microseconds. 'datetime_object' is converted to UTC. - - >>> datetime_object = datetime.datetime(1985, 10, 26, 1, 22) - >>> timestamp = datetime_to_unix_timestamp(datetime_object) - >>> timestamp - 499137720 - - - datetime_object: - The datetime.datetime() object to convert to a Unix timestamp. - - - securesystemslib.exceptions.FormatError, if 'datetime_object' is not a - datetime.datetime() object. - - - None. - - - A unix (posix) timestamp (e.g., 499137660). - """ - - # Is 'datetime_object' a datetime.datetime() object? - # Raise 'securesystemslib.exceptions.FormatError' if not. - if not isinstance(datetime_object, datetime.datetime): - message = repr(datetime_object) + ' is not a datetime.datetime() object.' - raise sslib_exceptions.FormatError(message) - - unix_timestamp = calendar.timegm(datetime_object.timetuple()) - - return unix_timestamp - - - - - -def unix_timestamp_to_datetime(unix_timestamp): - """ - - Convert 'unix_timestamp' (i.e., POSIX time, in UNIX_TIMESTAMP_SCHEMA format) - to a datetime.datetime() object. 'unix_timestamp' is the number of seconds - since the epoch (January 1, 1970.) - - >>> datetime_object = unix_timestamp_to_datetime(1445455680) - >>> datetime_object - datetime.datetime(2015, 10, 21, 19, 28) - - - unix_timestamp: - An integer representing the time (e.g., 1445455680). Conformant to - 'securesystemslib.formats.UNIX_TIMESTAMP_SCHEMA'. - - - securesystemslib.exceptions.FormatError, if 'unix_timestamp' is improperly - formatted. - - - None. - - - A datetime.datetime() object corresponding to 'unix_timestamp'. - """ - - # Is 'unix_timestamp' properly formatted? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.UNIX_TIMESTAMP_SCHEMA.check_match(unix_timestamp) - - # Convert 'unix_timestamp' to a 'time.struct_time', in UTC. The Daylight - # Savings Time (DST) flag is set to zero. datetime.fromtimestamp() is not - # used because it returns a local datetime. - struct_time = time.gmtime(unix_timestamp) - - # Extract the (year, month, day, hour, minutes, seconds) arguments for the - # datetime object to be returned. - datetime_object = datetime.datetime(*struct_time[:6]) - - return datetime_object - - - -def format_base64(data): - """ - - Return the base64 encoding of 'data' with whitespace and '=' signs omitted. - - - data: - Binary or buffer of data to convert. - - - securesystemslib.exceptions.FormatError, if the base64 encoding fails or the - argument is invalid. - - - None. - - - A base64-encoded string. - """ - - try: - return binascii.b2a_base64(data).decode('utf-8').rstrip('=\n ') - - except (TypeError, binascii.Error) as e: - raise sslib_exceptions.FormatError('Invalid base64' - ' encoding: ' + str(e)) - - - - -def parse_base64(base64_string): - """ - - Parse a base64 encoding with whitespace and '=' signs omitted. - - - base64_string: - A string holding a base64 value. - - - securesystemslib.exceptions.FormatError, if 'base64_string' cannot be parsed - due to an invalid base64 encoding. - - - None. - - - A byte string representing the parsed based64 encoding of - 'base64_string'. - """ - - if not isinstance(base64_string, str): - message = 'Invalid argument: '+repr(base64_string) - raise sslib_exceptions.FormatError(message) - - extra = len(base64_string) % 4 - if extra: - padding = '=' * (4 - extra) - base64_string = base64_string + padding - - try: - return binascii.a2b_base64(base64_string.encode('utf-8')) - - except (TypeError, binascii.Error) as e: - raise sslib_exceptions.FormatError('Invalid base64' - ' encoding: ' + str(e)) - - - -def make_targets_fileinfo(length, hashes, custom=None): - """ - - Create a dictionary conformant to 'TARGETS_FILEINFO_SCHEMA'. - This dict describes a target file. - - - length: - An integer representing the size of the file. - - hashes: - A dict of hashes in 'HASHDICT_SCHEMA' format, which has the form: - {'sha256': 123df8a9b12, 'sha512': 324324dfc121, ...} - - custom: - An optional object providing additional information about the file. - - - securesystemslib.exceptions.FormatError, if the 'TARGETS_FILEINFO_SCHEMA' to be - returned does not have the correct format. - - - A dictionary conformant to 'TARGETS_FILEINFO_SCHEMA', representing the file - information of a target file. - """ - - fileinfo = {'length' : length, 'hashes' : hashes} - - if custom is not None: - fileinfo['custom'] = custom - - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - TARGETS_FILEINFO_SCHEMA.check_match(fileinfo) - - return fileinfo - - - -def make_metadata_fileinfo(version, length=None, hashes=None): - """ - - Create a dictionary conformant to 'METADATA_FILEINFO_SCHEMA'. - This dict describes one of the metadata files used for timestamp and - snapshot roles. - - - version: - An integer representing the version of the file. - - length: - An optional integer representing the size of the file. - - hashes: - An optional dict of hashes in 'HASHDICT_SCHEMA' format, which has the form: - {'sha256': 123df8a9b12, 'sha512': 324324dfc121, ...} - - - - securesystemslib.exceptions.FormatError, if the 'METADATA_FILEINFO_SCHEMA' to be - returned does not have the correct format. - - - A dictionary conformant to 'METADATA_FILEINFO_SCHEMA', representing the file - information of a metadata file. - """ - - fileinfo = {'version' : version} - - if length: - fileinfo['length'] = length - - if hashes: - fileinfo['hashes'] = hashes - - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - METADATA_FILEINFO_SCHEMA.check_match(fileinfo) - - return fileinfo - - - -def make_versioninfo(version_number): - """ - - Create a dictionary conformant to 'VERSIONINFO_SCHEMA'. This dict - describes both metadata and target files. - - - version_number: - An integer representing the version of a particular metadata role. - The dictionary returned by this function is expected to be included - in Snapshot metadata. - - - securesystemslib.exceptions.FormatError, if the dict to be returned does not - have the correct format (i.e., VERSIONINFO_SCHEMA). - - - None. - - - A dictionary conformant to 'VERSIONINFO_SCHEMA', containing the version - information of a metadata role. - """ - - versioninfo = {'version': version_number} - - # Raise 'securesystemslib.exceptions.FormatError' if 'versioninfo' is - # improperly formatted. - VERSIONINFO_SCHEMA.check_match(versioninfo) - - return versioninfo - - - - - -def expected_meta_rolename(meta_rolename): - """ - - Ensure 'meta_rolename' is properly formatted. - 'targets' is returned as 'Targets'. - 'targets role1' is returned as 'Targets Role1'. - - The words in the string (i.e., separated by whitespace) - are capitalized. - - - meta_rolename: - A string representing the rolename. - E.g., 'root', 'targets'. - - - securesystemslib.exceptions.FormatError, if 'meta_rolename' is improperly - formatted. - - - None. - - - A string (e.g., 'Root', 'Targets'). - """ - - # Does 'meta_rolename' have the correct type? - # This check ensures 'meta_rolename' conforms to - # 'securesystemslib.formats.NAME_SCHEMA'. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.NAME_SCHEMA.check_match(meta_rolename) - - return meta_rolename.lower() - - - -def check_signable_object_format(signable): - """ - - Ensure 'signable' is properly formatted, conformant to - 'SIGNABLE_SCHEMA'. Return the signing role on - success. Note: The 'signed' field of a 'SIGNABLE_SCHEMA' is checked - against securesystemslib.schema.Any(). The 'signed' field, however, should - actually hold one of the supported role schemas (e.g., 'ROOT_SCHEMA', - 'TARGETS_SCHEMA'). The role schemas all differ in their format, so this - function determines exactly which schema is listed in the 'signed' field. - - - signable: - The signable object compared against 'SIGNABLE.SCHEMA'. - - - securesystemslib.exceptions.FormatError, if 'signable' does not have the - correct format. - - tuf.exceptions.UnsignedMetadataError, if 'signable' does not have any - signatures - - - None. - - - A string representing the signing role (e.g., 'root', 'targets'). - The role string is returned with characters all lower case. - """ - - # Does 'signable' have the correct type? - # This check ensures 'signable' conforms to - # 'SIGNABLE_SCHEMA'. - SIGNABLE_SCHEMA.check_match(signable) - - try: - role_type = signable['signed']['_type'] - - except (KeyError, TypeError) as error: - raise sslib_exceptions.FormatError('Untyped signable object.') from error - - try: - schema = SCHEMAS_BY_TYPE[role_type] - - except KeyError as error: - raise sslib_exceptions.FormatError('Unrecognized type ' - + repr(role_type)) from error - - if not signable['signatures']: - raise exceptions.UnsignedMetadataError('Signable object of type ' + - repr(role_type) + ' has no signatures ', signable) - - # 'securesystemslib.exceptions.FormatError' raised if 'signable' does not - # have a properly formatted role schema. - schema.check_match(signable['signed']) - - return role_type.lower() - - - -if __name__ == '__main__': - # The interactive sessions of the documentation strings can - # be tested by running formats.py as a standalone module. - # python3 -B formats.py - import doctest - doctest.testmod() diff --git a/tuf/keydb.py b/tuf/keydb.py deleted file mode 100755 index e06571b06f..0000000000 --- a/tuf/keydb.py +++ /dev/null @@ -1,440 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - keydb.py - - - Vladimir Diaz - - - March 21, 2012. Based on a previous version of this module by Geremy Condra. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Represent a collection of keys and their organization. This module ensures - the layout of the collection remain consistent and easily verifiable. - Provided are functions to add and delete keys from the database, retrieve a - single key, and assemble a collection from keys stored in TUF 'Root' Metadata. - The Update Framework process maintains a set of role info for multiple - repositories. - - RSA keys are currently supported and a collection of keys is organized as a - dictionary indexed by key ID. Key IDs are used as identifiers for keys - (e.g., RSA key). They are the hexadecimal representations of the hash of key - objects (specifically, the key object containing only the public key). See - 'rsa_key.py' and the '_get_keyid()' function to learn precisely how keyids - are generated. One may get the keyid of a key object by simply accessing the - dictionary's 'keyid' key (i.e., rsakey['keyid']). -""" - -import logging -import copy - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import keys as sslib_keys - -from tuf import exceptions -from tuf import formats - -# List of strings representing the key types supported by TUF. -_SUPPORTED_KEY_TYPES = ['rsa', 'ed25519', 'ecdsa', 'ecdsa-sha2-nistp256'] - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - -# The key database. -_keydb_dict = {} -_keydb_dict['default'] = {} - - -def create_keydb_from_root_metadata(root_metadata, repository_name='default'): - """ - - Populate the key database with the unique keys found in 'root_metadata'. - The database dictionary will conform to - 'tuf.formats.KEYDB_SCHEMA' and have the form: {keyid: key, - ...}. The 'keyid' conforms to 'securesystemslib.formats.KEYID_SCHEMA' and - 'key' to its respective type. In the case of RSA keys, this object would - match 'RSAKEY_SCHEMA'. - - - root_metadata: - A dictionary conformant to 'tuf.formats.ROOT_SCHEMA'. The keys found - in the 'keys' field of 'root_metadata' are needed by this function. - - repository_name: - The name of the repository to store the key information. If not supplied, - the key database is populated for the 'default' repository. - - - securesystemslib.exceptions.FormatError, if 'root_metadata' does not have the correct format. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not exist in the key - database. - - - A function to add the key to the database is called. In the case of RSA - keys, this function is add_key(). - - The old keydb key database is replaced. - - - None. - """ - - # Does 'root_metadata' have the correct format? - # This check will ensure 'root_metadata' has the appropriate number of objects - # and object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - formats.ROOT_SCHEMA.check_match(root_metadata) - - # Does 'repository_name' have the correct format? - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Clear the key database for 'repository_name', or create it if non-existent. - if repository_name in _keydb_dict: - _keydb_dict[repository_name].clear() - - else: - create_keydb(repository_name) - - # Iterate the keys found in 'root_metadata' by converting them to - # 'RSAKEY_SCHEMA' if their type is 'rsa', and then adding them to the - # key database using the provided keyid. - for keyid, key_metadata in root_metadata['keys'].items(): - if key_metadata['keytype'] in _SUPPORTED_KEY_TYPES: - # 'key_metadata' is stored in 'KEY_SCHEMA' format. Call - # create_from_metadata_format() to get the key in 'RSAKEY_SCHEMA' format, - # which is the format expected by 'add_key()'. Note: This call to - # format_metadata_to_key() uses the provided keyid as the default keyid. - # All other keyids returned are ignored. - - key_dict, _ = sslib_keys.format_metadata_to_key(key_metadata, - keyid) - - # Make sure to update key_dict['keyid'] to use one of the other valid - # keyids, otherwise add_key() will have no reference to it. - try: - add_key(key_dict, repository_name=repository_name) - - # Although keyid duplicates should *not* occur (unique dict keys), log a - # warning and continue. However, 'key_dict' may have already been - # adding to the keydb elsewhere. - except exceptions.KeyAlreadyExistsError as e: # pragma: no cover - logger.warning(e) - continue - - else: - logger.warning('Root Metadata file contains a key with an invalid keytype.') - - - - - -def create_keydb(repository_name): - """ - - Create a key database for a non-default repository named 'repository_name'. - - - repository_name: - The name of the repository. An empty key database is created, and keys - may be added to via add_key(keyid, repository_name). - - - securesystemslib.exceptions.FormatError, if 'repository_name' is improperly formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' already exists. - - - None. - - - None. - """ - - # Is 'repository_name' properly formatted? Raise 'securesystemslib.exceptions.FormatError' if not. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name in _keydb_dict: - raise sslib_exceptions.InvalidNameError('Repository name already exists:' - ' ' + repr(repository_name)) - - _keydb_dict[repository_name] = {} - - - - - -def remove_keydb(repository_name): - """ - - Remove a key database for a non-default repository named 'repository_name'. - The 'default' repository cannot be removed. - - - repository_name: - The name of the repository to remove. The 'default' repository should - not be removed, so 'repository_name' cannot be 'default'. - - - securesystemslib.exceptions.FormatError, if 'repository_name' is improperly formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' is 'default'. - - - None. - - - None. - """ - - # Is 'repository_name' properly formatted? Raise 'securesystemslib.exceptions.FormatError' if not. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _keydb_dict: - logger.warning('Repository name does not exist: ' + repr(repository_name)) - return - - if repository_name == 'default': - raise sslib_exceptions.InvalidNameError('Cannot remove the default repository:' - ' ' + repr(repository_name)) - - del _keydb_dict[repository_name] - - - - -def add_key(key_dict, keyid=None, repository_name='default'): - """ - - Add 'rsakey_dict' to the key database while avoiding duplicates. - If keyid is provided, verify it is the correct keyid for 'rsakey_dict' - and raise an exception if it is not. - - - key_dict: - A dictionary conformant to 'securesystemslib.formats.ANYKEY_SCHEMA'. - It has the form: - - {'keytype': 'rsa', - 'keyid': keyid, - 'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...', - 'private': '-----BEGIN RSA PRIVATE KEY----- ...'}} - - keyid: - An object conformant to 'KEYID_SCHEMA'. It is used as an identifier - for RSA keys. - - repository_name: - The name of the repository to add the key. If not supplied, the key is - added to the 'default' repository. - - - securesystemslib.exceptions.FormatError, if the arguments do not have the correct format. - - securesystemslib.exceptions.Error, if 'keyid' does not match the keyid for 'rsakey_dict'. - - tuf.exceptions.KeyAlreadyExistsError, if 'rsakey_dict' is found in the key database. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not exist in the key - database. - - - The keydb key database is modified. - - - None. - """ - - # Does 'key_dict' have the correct format? - # This check will ensure 'key_dict' has the appropriate number of objects - # and object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError if the check fails. - sslib_formats.ANYKEY_SCHEMA.check_match(key_dict) - - # Does 'repository_name' have the correct format? - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Does 'keyid' have the correct format? - if keyid is not None: - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - sslib_formats.KEYID_SCHEMA.check_match(keyid) - - # Check if each keyid found in 'key_dict' matches 'keyid'. - if keyid != key_dict['keyid']: - raise sslib_exceptions.Error('Incorrect keyid. Got ' + key_dict['keyid'] + ' but expected ' + keyid) - - # Ensure 'repository_name' is actually set in the key database. - if repository_name not in _keydb_dict: - raise sslib_exceptions.InvalidNameError('Repository name does not exist:' - ' ' + repr(repository_name)) - - # Check if the keyid belonging to 'key_dict' is not already - # available in the key database before returning. - keyid = key_dict['keyid'] - if keyid in _keydb_dict[repository_name]: - raise exceptions.KeyAlreadyExistsError('Key: ' + keyid) - - _keydb_dict[repository_name][keyid] = copy.deepcopy(key_dict) - - - - - -def get_key(keyid, repository_name='default'): - """ - - Return the key belonging to 'keyid'. - - - keyid: - An object conformant to 'securesystemslib.formats.KEYID_SCHEMA'. It is used as an - identifier for keys. - - repository_name: - The name of the repository to get the key. If not supplied, the key is - retrieved from the 'default' repository. - - - securesystemslib.exceptions.FormatError, if the arguments do not have the correct format. - - tuf.exceptions.UnknownKeyError, if 'keyid' is not found in the keydb database. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not exist in the key - database. - - - None. - - - The key matching 'keyid'. In the case of RSA keys, a dictionary conformant - to 'securesystemslib.formats.RSAKEY_SCHEMA' is returned. - """ - - # Does 'keyid' have the correct format? - # This check will ensure 'keyid' has the appropriate number of objects - # and object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' is the match fails. - sslib_formats.KEYID_SCHEMA.check_match(keyid) - - # Does 'repository_name' have the correct format? - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _keydb_dict: - raise sslib_exceptions.InvalidNameError('Repository name does not exist:' - ' ' + repr(repository_name)) - - # Return the key belonging to 'keyid', if found in the key database. - try: - return copy.deepcopy(_keydb_dict[repository_name][keyid]) - - except KeyError as error: - raise exceptions.UnknownKeyError('Key: ' + keyid) from error - - - - - -def remove_key(keyid, repository_name='default'): - """ - - Remove the key belonging to 'keyid'. - - - keyid: - An object conformant to 'securesystemslib.formats.KEYID_SCHEMA'. It is used as an - identifier for keys. - - repository_name: - The name of the repository to remove the key. If not supplied, the key - is removed from the 'default' repository. - - - securesystemslib.exceptions.FormatError, if the arguments do not have the correct format. - - tuf.exceptions.UnknownKeyError, if 'keyid' is not found in key database. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not exist in the key - database. - - - The key, identified by 'keyid', is deleted from the key database. - - - None. - """ - - # Does 'keyid' have the correct format? - # This check will ensure 'keyid' has the appropriate number of objects - # and object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' is the match fails. - sslib_formats.KEYID_SCHEMA.check_match(keyid) - - # Does 'repository_name' have the correct format? - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _keydb_dict: - raise sslib_exceptions.InvalidNameError('Repository name does not exist:' - ' ' + repr(repository_name)) - - # Remove the key belonging to 'keyid' if found in the key database. - if keyid in _keydb_dict[repository_name]: - del _keydb_dict[repository_name][keyid] - - else: - raise exceptions.UnknownKeyError('Key: ' + keyid) - - - - - -def clear_keydb(repository_name='default', clear_all=False): - - """ - - Clear the keydb key database. - - - repository_name: - The name of the repository to clear the key database. If not supplied, - the key database is cleared for the 'default' repository. - - clear_all: - Boolean indicating whether to clear the entire keydb. - - - securesystemslib.exceptions.FormatError, if 'repository_name' is improperly formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not exist in the key - database. - - - The keydb key database is reset. - - - None. - """ - - # Do the arguments have the correct format? Raise 'securesystemslib.exceptions.FormatError' if - # 'repository_name' is improperly formatted. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - sslib_formats.BOOLEAN_SCHEMA.check_match(clear_all) - - if clear_all: - _keydb_dict.clear() - _keydb_dict['default'] = {} - - if repository_name not in _keydb_dict: - raise sslib_exceptions.InvalidNameError('Repository name does not exist:' - ' ' + repr(repository_name)) - - _keydb_dict[repository_name] = {} diff --git a/tuf/log.py b/tuf/log.py deleted file mode 100755 index f9ae6c7721..0000000000 --- a/tuf/log.py +++ /dev/null @@ -1,448 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - log.py - - - Vladimir Diaz - - - April 4, 2012. Based on a previous version of this module by Geremy Condra. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - A central location for all logging-related configuration. This module should - be imported once by the main program. If other modules wish to incorporate - 'tuf' logging, they should do the following: - - import logging - logger = logging.getLogger('tuf') - - 'logging' refers to the module name. logging.getLogger() is a function of - the module 'logging'. logging.getLogger(name) returns a Logger instance - associated with 'name'. Calling getLogger(name) will always return the same - instance. In this 'log.py' module, we perform the initial setup for the name - 'tuf'. The 'log.py' module should only be imported once by the main program. - When any other module does a logging.getLogger('tuf'), it is referring to the - same 'tuf' instance, and its associated settings, set here in 'log.py'. - See http://docs.python.org/library/logging.html#logger-objects for more - information. - - We use multiple handlers to process log messages in various ways and to - configure each one independently. Instead of using one single manner of - processing log messages, we can use two built-in handlers that have already - been configured for us. For example, the built-in FileHandler will catch - log messages and dump them to a file. If we wanted, we could set this file - handler to only catch CRITICAL (and greater) messages and save them to a - file. Other handlers (e.g., StreamHandler) could handle INFO-level - (and greater) messages. - - Logging Levels: - - --Level-- --Value-- - logging.CRITICAL 50 - logging.ERROR 40 - logging.WARNING 30 - logging.INFO 20 - logging.DEBUG 10 - logging.NOTSET 0 - - The logging module is thread-safe. Logging to a single file from - multiple threads in a single process is also thread-safe. The logging - module is NOT thread-safe when logging to a single file across multiple - processes: - http://docs.python.org/library/logging.html#thread-safety - http://docs.python.org/howto/logging-cookbook.html -""" - -import logging -import time - -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats - -from tuf import exceptions -from tuf import settings - - -# Setting a handler's log level filters only logging messages of that level -# (and above). For example, setting the built-in StreamHandler's log level to -# 'logging.WARNING' will cause the stream handler to only process messages -# of levels: WARNING, ERROR, and CRITICAL. -_DEFAULT_LOG_LEVEL = logging.DEBUG -_DEFAULT_CONSOLE_LOG_LEVEL = logging.INFO -_DEFAULT_FILE_LOG_LEVEL = logging.DEBUG - -# Set the format for logging messages. -# Example format for '_FORMAT_STRING': -# [2013-08-13 15:21:18,068 localtime] [tuf] -# [INFO][_update_metadata:851@updater.py] -_FORMAT_STRING = '[%(asctime)s UTC] [%(name)s] [%(levelname)s] '+\ - '[%(funcName)s:%(lineno)s@%(filename)s]\n%(message)s\n' - -# Ask all Formatter instances to talk GMT. Set the 'converter' attribute of -# 'logging.Formatter' so that all formatters use Greenwich Mean Time. -# http://docs.python.org/library/logging.html#logging.Formatter.formatTime -# The 2nd paragraph in the link above contains the relevant information. -# GMT = UTC (Coordinated Universal Time). TUF metadata stores timestamps in UTC. -# We previously displayed the local time but this lead to confusion when -# visually comparing logger events and metadata information. Unix time stamps -# are fine but they may be less human-readable than UTC. -logging.Formatter.converter = time.gmtime -formatter = logging.Formatter(_FORMAT_STRING) - -# Set the handlers for the logger. The console handler is unset by default. A -# module importing 'log.py' should explicitly set the console handler if -# outputting log messages to the screen is needed. Adding a console handler can -# be done with tuf.log.add_console_handler(). Logging messages to a file is not -# set by default. -console_handler = None -file_handler = None - -# Set the logger and its settings. -# Note: we're configuring the top-level hierarchy for the tuf package, -# therefore we explicitly request the 'tuf' logger, rather than following -# the standard pattern of logging.getLogger(__name__) -logger = logging.getLogger('tuf') -logger.setLevel(_DEFAULT_LOG_LEVEL) -logger.addHandler(logging.NullHandler()) - -# Set the built-in file handler. Messages will be logged to -# 'settings.LOG_FILENAME', and only those messages with a log level of -# '_DEFAULT_LOG_LEVEL'. The log level of messages handled by 'file_handler' -# may be modified with 'set_filehandler_log_level()'. 'settings.LOG_FILENAME' -# will be opened in append mode. -if settings.ENABLE_FILE_LOGGING: - file_handler = logging.FileHandler(settings.LOG_FILENAME) - file_handler.setLevel(_DEFAULT_FILE_LOG_LEVEL) - file_handler.setFormatter(formatter) - logger.addHandler(file_handler) - -else: - pass - -# Silently ignore logger exceptions. -logging.raiseExceptions = False - - - - - -class ConsoleFilter(logging.Filter): - def filter(self, record): - """ - - Use Vinay Sajip's recommendation from Python issue #6435 to modify a - LogRecord object. This is meant to be used with our console handler. - - http://stackoverflow.com/q/6177520 - http://stackoverflow.com/q/5875225 - http://bugs.python.org/issue6435 - http://docs.python.org/howto/logging-cookbook.html#filters-contextual - http://docs.python.org/library/logging.html#logrecord-attributes - - - record: - A logging.LogRecord object. - - - None. - - - Replaces the LogRecord exception text attribute. - - - True. - """ - - # If this LogRecord object has an exception, then we will replace its text. - if record.exc_info: - # We place the record's cached exception text (which usually contains the - # exception traceback) with much simpler exception information. This is - # most useful for the console handler, which we do not wish to deluge - # with too much data. Assuming that this filter is not applied to the - # file logging handler, the user may always consult the file log for the - # original exception traceback. The exc_info is explained here: - # http://docs.python.org/library/sys.html#sys.exc_info - exc_type, _, _ = record.exc_info - - # Simply set the class name as the exception text. - record.exc_text = exc_type.__name__ - - # Always return True to signal that any given record must be formatted. - return True - - - - - -def set_log_level(log_level: int=_DEFAULT_LOG_LEVEL): - """ - - Allow the default log level to be overridden. If 'log_level' is not - provided, log level defaults to 'logging.DEBUG'. - - - log_level: - The log level to set for the 'log.py' file handler. - 'log_level' examples: logging.INFO; logging.CRITICAL. - - - None. - - - Overrides the logging level for the 'log.py' file handler. - - - None. - """ - - # Does 'log_level' have the correct format? - # Raise 'securesystems.exceptions.FormatError' if there is a mismatch. - sslib_formats.LOGLEVEL_SCHEMA.check_match(log_level) - - logger.setLevel(log_level) - - - - - -def set_filehandler_log_level(log_level=_DEFAULT_FILE_LOG_LEVEL): - """ - - Allow the default file handler log level to be overridden. If 'log_level' - is not provided, log level defaults to 'logging.DEBUG'. - - - log_level: - The log level to set for the 'log.py' file handler. - 'log_level' examples: logging.INFO; logging.CRITICAL. - - - None. - - - Overrides the logging level for the 'log.py' file handler. - - - None. - """ - - # Does 'log_level' have the correct format? - # Raise 'securesystems.exceptions.FormatError' if there is a mismatch. - sslib_formats.LOGLEVEL_SCHEMA.check_match(log_level) - - if file_handler: - file_handler.setLevel(log_level) - - else: - raise exceptions.Error( - 'File handler has not been set. Enable file logging' - ' before attempting to set its log level') - - - - - -def set_console_log_level(log_level=_DEFAULT_CONSOLE_LOG_LEVEL): - """ - - Allow the default log level for console messages to be overridden. If - 'log_level' is not provided, log level defaults to 'logging.INFO'. - - - log_level: - The log level to set for the console handler. - 'log_level' examples: logging.INFO; logging.CRITICAL. - - - securesystemslib.exceptions.Error, if the 'log.py' console handler has not - been set yet with add_console_handler(). - - - Overrides the logging level for the console handler. - - - None. - """ - - # Does 'log_level' have the correct format? - # Raise 'securesystems.exceptions.FormatError' if there is a mismatch. - sslib_formats.LOGLEVEL_SCHEMA.check_match(log_level) - - if console_handler is not None: - console_handler.setLevel(log_level) - - else: - message = 'The console handler has not been set with add_console_handler().' - raise sslib_exceptions.Error(message) - - - - - -def add_console_handler(log_level=_DEFAULT_CONSOLE_LOG_LEVEL): - """ - - Add a console handler and set its log level to 'log_level'. - - - log_level: - The log level to set for the console handler. - 'log_level' examples: logging.INFO; logging.CRITICAL. - - - None. - - - Adds a console handler to the 'log.py' logger and sets its logging level to - 'log_level'. - - - None. - """ - - # Does 'log_level' have the correct format? - # Raise 'securesystems.exceptions.FormatError' if there is a mismatch. - sslib_formats.LOGLEVEL_SCHEMA.check_match(log_level) - - # Assign to the global console_handler object. - global console_handler - - if not console_handler: - # Set the console handler for the logger. The built-in console handler will - # log messages to 'sys.stderr' and capture 'log_level' messages. - console_handler = logging.StreamHandler() - - # Get our filter for the console handler. - console_filter = ConsoleFilter() - console_format_string = '%(message)s' - console_formatter = logging.Formatter(console_format_string) - - console_handler.setLevel(log_level) - console_handler.setFormatter(console_formatter) - console_handler.addFilter(console_filter) - logger.addHandler(console_handler) - logger.debug('Added a console handler.') - - else: - logger.warning('We already have a console handler.') - - - - - -def remove_console_handler(): - """ - - Remove the console handler from the logger in 'log.py', if previously added. - - - None. - - - None. - - - A handler belonging to the console is removed from the 'log.py' logger - and the console handler is marked as unset. - - - - None. - """ - - # Assign to the global 'console_handler' object. - global console_handler - - if console_handler: - logger.removeHandler(console_handler) - console_handler = None - logger.debug('Removed a console handler.') - - else: - logger.warning('We do not have a console handler.') - - - -def enable_file_logging(log_filename=settings.LOG_FILENAME): - """ - - Log messages to a file (i.e., 'log_filename'). The log level for the file - handler can be set with set_filehandler_log_level(). - - - log_filename: - Logging messages are saved to this file. If not provided, the log - filename specified in tuf.settings.LOG_FILENAME is used. - - - securesystemslib.exceptions.FormatError, if any of the arguments are - not the expected format. - - tuf.exceptions.Error, if the file handler has already been set. - - - The global file handler is set. - - - None. - """ - - # Are the arguments properly formatted? - sslib_formats.PATH_SCHEMA.check_match(log_filename) - - global file_handler - - # Add a file handler to the logger if not already set. - if not file_handler: - file_handler = logging.FileHandler(log_filename) - file_handler.setLevel(_DEFAULT_FILE_LOG_LEVEL) - file_handler.setFormatter(formatter) - logger.addHandler(file_handler) - - else: - raise exceptions.Error( - 'The file handler has already been been set. A new file handler' - ' can be set by first calling disable_file_logging()') - - - -def disable_file_logging(): - """ - - Disable file logging by removing any previously set file handler. - A warning is logged if the file handler cannot be removed. - - The file that was written to will not be deleted. - - - None. - - - None. - - - The global file handler is unset. - - - None. - """ - - # Assign to the global 'file_handler' object. - global file_handler - - if file_handler: - logger.removeHandler(file_handler) - file_handler.close() - file_handler = None - logger.debug('Removed the file handler.') - - else: - logger.warning('A file handler has not been set.') diff --git a/tuf/mirrors.py b/tuf/mirrors.py deleted file mode 100755 index c7662d3eec..0000000000 --- a/tuf/mirrors.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - mirrors.py - - - Konstantin Andrianov. - Derived from original mirrors.py written by Geremy Condra. - - - March 12, 2012. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Extract a list of mirror urls corresponding to the file type and the location - of the file with respect to the base url. -""" - -import os -from urllib import parse - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib.util import file_in_confined_directories - -from tuf import formats - - -# The type of file to be downloaded from a repository. The -# 'get_list_of_mirrors' function supports these file types. -_SUPPORTED_FILE_TYPES = ['meta', 'target'] - - -def get_list_of_mirrors(file_type, file_path, mirrors_dict): - """ - - Get a list of mirror urls from a mirrors dictionary, provided the type - and the path of the file with respect to the base url. - - - file_type: - Type of data needed for download, must correspond to one of the strings - in the list ['meta', 'target']. 'meta' for metadata file type or - 'target' for target file type. It should correspond to - NAME_SCHEMA format. - - file_path: - A relative path to the file that corresponds to RELPATH_SCHEMA format. - Ex: 'http://url_prefix/targets_path/file_path' - - mirrors_dict: - A mirrors_dict object that corresponds to MIRRORDICT_SCHEMA, where - keys are strings and values are MIRROR_SCHEMA. An example format - of MIRROR_SCHEMA: - - {'url_prefix': 'http://localhost:8001', - 'metadata_path': 'metadata/', - 'targets_path': 'targets/', - 'confined_target_dirs': ['targets/snapshot1/', ...], - 'custom': {...}} - - The 'custom' field is optional. - - - securesystemslib.exceptions.Error, on unsupported 'file_type'. - - securesystemslib.exceptions.FormatError, on bad argument. - - - List of mirror urls corresponding to the file_type and file_path. If no - match is found, empty list is returned. - """ - - # Checking if all the arguments have appropriate format. - formats.RELPATH_SCHEMA.check_match(file_path) - formats.MIRRORDICT_SCHEMA.check_match(mirrors_dict) - sslib_formats.NAME_SCHEMA.check_match(file_type) - - # Verify 'file_type' is supported. - if file_type not in _SUPPORTED_FILE_TYPES: - raise sslib_exceptions.Error('Invalid file_type argument.' - ' Supported file types: ' + repr(_SUPPORTED_FILE_TYPES)) - path_key = 'metadata_path' if file_type == 'meta' else 'targets_path' - - list_of_mirrors = [] - for junk, mirror_info in mirrors_dict.items(): - # Does mirror serve this file type at all? - path = mirror_info.get(path_key) - if path is None: - continue - - # for targets, ensure directory confinement - if path_key == 'targets_path': - full_filepath = os.path.join(path, file_path) - confined_target_dirs = mirror_info.get('confined_target_dirs') - # confined_target_dirs is optional and can used to confine the client to - # certain paths on a repository mirror when fetching target files. - if confined_target_dirs and not file_in_confined_directories(full_filepath, - confined_target_dirs): - continue - - # parse.quote(string) replaces special characters in string using the %xx - # escape. This is done to avoid parsing issues of the URL on the server - # side. Do *NOT* pass URLs with Unicode characters without first encoding - # the URL as UTF-8. We need a long-term solution with #61. - # http://bugs.python.org/issue1712522 - file_path = parse.quote(file_path) - url = os.path.join(mirror_info['url_prefix'], path, file_path) - - # The above os.path.join() result as well as input file_path may be - # invalid on windows (might contain both separator types), see #1077. - # Make sure the URL doesn't contain backward slashes on Windows. - list_of_mirrors.append(url.replace('\\', '/')) - - return list_of_mirrors diff --git a/tuf/repository_lib.py b/tuf/repository_lib.py deleted file mode 100644 index 642447d8b3..0000000000 --- a/tuf/repository_lib.py +++ /dev/null @@ -1,2306 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - repository_lib.py - - - Vladimir Diaz - - - June 1, 2014. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Provide a library for the repository tool that can create a TUF repository. - The repository tool can be used with the Python interpreter in interactive - mode, or imported directly into a Python module. See 'tuf/README' for the - complete guide to using 'tuf.repository_tool.py'. -""" - -import os -import errno -import time -import logging -import shutil -import json -import tempfile - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import hash as sslib_hash -from securesystemslib import interface as sslib_interface -from securesystemslib import keys as sslib_keys -from securesystemslib import util as sslib_util -from securesystemslib import storage as sslib_storage - -from tuf import exceptions -from tuf import formats -from tuf import keydb -from tuf import log -from tuf import roledb -from tuf import settings -from tuf import sig - - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - -# The extension of TUF metadata. -METADATA_EXTENSION = '.json' - -# The targets and metadata directory names. Metadata files are written -# to the staged metadata directory instead of the "live" one. -METADATA_STAGED_DIRECTORY_NAME = 'metadata.staged' -METADATA_DIRECTORY_NAME = 'metadata' -TARGETS_DIRECTORY_NAME = 'targets' - -# The metadata filenames of the top-level roles. -ROOT_FILENAME = 'root' + METADATA_EXTENSION -TARGETS_FILENAME = 'targets' + METADATA_EXTENSION -SNAPSHOT_FILENAME = 'snapshot' + METADATA_EXTENSION -TIMESTAMP_FILENAME = 'timestamp' + METADATA_EXTENSION - -# Log warning when metadata expires in n days, or less. -# root = 1 month, snapshot = 1 day, targets = 10 days, timestamp = 1 day. -ROOT_EXPIRES_WARN_SECONDS = 2630000 -SNAPSHOT_EXPIRES_WARN_SECONDS = 86400 -TARGETS_EXPIRES_WARN_SECONDS = 864000 -TIMESTAMP_EXPIRES_WARN_SECONDS = 86400 - -# Supported key types. -SUPPORTED_KEY_TYPES = ['rsa', 'ed25519', 'ecdsa', 'ecdsa-sha2-nistp256'] - -# The algorithm used by the repository to generate the path hash prefixes -# of hashed bin delegations. Please see delegate_hashed_bins() -HASH_FUNCTION = settings.DEFAULT_HASH_ALGORITHM - - - - -def _generate_and_write_metadata(rolename, metadata_filename, - targets_directory, metadata_directory, storage_backend, - consistent_snapshot=False, filenames=None, allow_partially_signed=False, - increment_version_number=True, repository_name='default', - use_existing_fileinfo=False, use_timestamp_length=True, - use_timestamp_hashes=True, use_snapshot_length=False, - use_snapshot_hashes=False): - """ - Non-public function that can generate and write the metadata for the - specified 'rolename'. It also increments the version number of 'rolename' if - the 'increment_version_number' argument is True. - """ - - metadata = None - - # Retrieve the roleinfo of 'rolename' to extract the needed metadata - # attributes, such as version number, expiration, etc. - roleinfo = roledb.get_roleinfo(rolename, repository_name) - previous_keyids = roleinfo.get('previous_keyids', []) - previous_threshold = roleinfo.get('previous_threshold', 1) - signing_keyids = sorted(set(roleinfo['signing_keyids'])) - - # Generate the appropriate role metadata for 'rolename'. - if rolename == 'root': - metadata = generate_root_metadata(roleinfo['version'], roleinfo['expires'], - consistent_snapshot, repository_name) - - _log_warning_if_expires_soon(ROOT_FILENAME, roleinfo['expires'], - ROOT_EXPIRES_WARN_SECONDS) - - - - elif rolename == 'snapshot': - metadata = generate_snapshot_metadata(metadata_directory, - roleinfo['version'], roleinfo['expires'], - storage_backend, consistent_snapshot, repository_name, - use_length=use_snapshot_length, use_hashes=use_snapshot_hashes) - - - _log_warning_if_expires_soon(SNAPSHOT_FILENAME, roleinfo['expires'], - SNAPSHOT_EXPIRES_WARN_SECONDS) - - elif rolename == 'timestamp': - # If filenames don't have "snapshot_filename" key, defaults to "snapshot.json" - snapshot_file_path = (filenames and filenames['snapshot']) \ - or SNAPSHOT_FILENAME - - metadata = generate_timestamp_metadata(snapshot_file_path, roleinfo['version'], - roleinfo['expires'], storage_backend, repository_name, - use_length=use_timestamp_length, use_hashes=use_timestamp_hashes) - - _log_warning_if_expires_soon(TIMESTAMP_FILENAME, roleinfo['expires'], - TIMESTAMP_EXPIRES_WARN_SECONDS) - - # All other roles are either the top-level 'targets' role, or - # a delegated role. - else: - # Only print a warning if the top-level 'targets' role expires soon. - if rolename == 'targets': - _log_warning_if_expires_soon(TARGETS_FILENAME, roleinfo['expires'], - TARGETS_EXPIRES_WARN_SECONDS) - - # Don't hash-prefix consistent target files if they are handled out of band - consistent_targets = consistent_snapshot and not use_existing_fileinfo - - metadata = generate_targets_metadata(targets_directory, - roleinfo['paths'], roleinfo['version'], roleinfo['expires'], - roleinfo['delegations'], consistent_targets, use_existing_fileinfo, - storage_backend, repository_name) - - # Update roledb with the latest delegations info collected during - # generate_targets_metadata() - roledb.update_roleinfo(rolename, roleinfo, - repository_name=repository_name) - - - # Before writing 'rolename' to disk, automatically increment its version - # number (if 'increment_version_number' is True) so that the caller does not - # have to manually perform this action. The version number should be - # incremented in both the metadata file and roledb (required so that Snapshot - # references the latest version). - - # Store the 'current_version' in case the version number must be restored - # (e.g., if 'rolename' cannot be written to disk because its metadata is not - # properly signed). - current_version = metadata['version'] - if increment_version_number: - roleinfo = roledb.get_roleinfo(rolename, repository_name) - metadata['version'] = metadata['version'] + 1 - roleinfo['version'] = roleinfo['version'] + 1 - roledb.update_roleinfo(rolename, roleinfo, - repository_name=repository_name) - - else: - logger.debug('Not incrementing ' + repr(rolename) + '\'s version number.') - - if rolename in roledb.TOP_LEVEL_ROLES and not allow_partially_signed: - # Verify that the top-level 'rolename' is fully signed. Only a delegated - # role should not be written to disk without full verification of its - # signature(s), since it can only be considered fully signed depending on - # the delegating role. - signable = sign_metadata(metadata, signing_keyids, metadata_filename, - repository_name) - - - def should_write(): - # Root must be signed by its previous keys and threshold. - if rolename == 'root' and len(previous_keyids) > 0: - if not sig.verify(signable, rolename, repository_name, - previous_threshold, previous_keyids): - return False - - else: - logger.debug('Root is signed by a threshold of its previous keyids.') - - # In the normal case, we should write metadata if the threshold is met. - return sig.verify(signable, rolename, repository_name, - roleinfo['threshold'], roleinfo['signing_keyids']) - - - if should_write(): - _remove_invalid_and_duplicate_signatures(signable, repository_name) - - # Root should always be written as if consistent_snapshot is True (i.e., - # write .root.json and root.json to disk). - if rolename == 'root': - consistent_snapshot = True - filename = write_metadata_file(signable, metadata_filename, - metadata['version'], consistent_snapshot, storage_backend) - - # 'signable' contains an invalid threshold of signatures. - else: - # Since new metadata cannot be successfully written, restore the current - # version number. - roleinfo = roledb.get_roleinfo(rolename, repository_name) - roleinfo['version'] = current_version - roledb.update_roleinfo(rolename, roleinfo, - repository_name=repository_name) - - # Note that 'signable' is an argument to tuf.UnsignedMetadataError(). - raise exceptions.UnsignedMetadataError('Not enough' - ' signatures for ' + repr(metadata_filename), signable) - - # 'rolename' is a delegated role or a top-level role that is partially - # signed, and thus its signatures should not be verified. - else: - signable = sign_metadata(metadata, signing_keyids, metadata_filename, - repository_name) - _remove_invalid_and_duplicate_signatures(signable, repository_name) - - # Root should always be written as if consistent_snapshot is True (i.e., - # .root.json and root.json). - if rolename == 'root': - filename = write_metadata_file(signable, metadata_filename, - metadata['version'], consistent_snapshot=True, - storage_backend=storage_backend) - - else: - filename = write_metadata_file(signable, metadata_filename, - metadata['version'], consistent_snapshot, storage_backend) - - return signable, filename - - - - - -def _metadata_is_partially_loaded(rolename, signable, repository_name): - """ - Non-public function that determines whether 'rolename' is loaded with - at least zero good signatures, but an insufficient threshold (which means - 'rolename' was written to disk with repository.write_partial()). A repository - maintainer may write partial metadata without including a valid signature. - However, the final repository.write() must include a threshold number of - signatures. - - If 'rolename' is found to be partially loaded, mark it as partially loaded in - its 'roledb' roleinfo. This function exists to assist in deciding whether - a role's version number should be incremented when write() or write_parital() - is called. Return True if 'rolename' was partially loaded, False otherwise. - """ - - # The signature status lists the number of good signatures, including - # bad, untrusted, unknown, etc. - status = sig.get_signature_status(signable, rolename, repository_name) - - if len(status['good_sigs']) < status['threshold'] and \ - len(status['good_sigs']) >= 0: - return True - - else: - return False - - - - - -def _check_role_keys(rolename, repository_name): - """ - Non-public function that verifies the public and signing keys of 'rolename'. - If either contain an invalid threshold of keys, raise an exception. - """ - - # Extract the total number of public and private keys of 'rolename' from its - # roleinfo in 'roledb'. - roleinfo = roledb.get_roleinfo(rolename, repository_name) - total_keyids = len(roleinfo['keyids']) - threshold = roleinfo['threshold'] - total_signatures = len(roleinfo['signatures']) - total_signing_keys = len(roleinfo['signing_keyids']) - - # Raise an exception for an invalid threshold of public keys. - if total_keyids < threshold: - raise exceptions.InsufficientKeysError(repr(rolename) + ' role contains' - ' ' + repr(total_keyids) + ' / ' + repr(threshold) + ' public keys.') - - # Raise an exception for an invalid threshold of signing keys. - if total_signatures == 0 and total_signing_keys < threshold: - raise exceptions.InsufficientKeysError(repr(rolename) + ' role contains' - ' ' + repr(total_signing_keys) + ' / ' + repr(threshold) + ' signing keys.') - - - - - -def _remove_invalid_and_duplicate_signatures(signable, repository_name): - """ - Non-public function that removes invalid or duplicate signatures from - 'signable'. 'signable' may contain signatures (invalid) from previous - versions of the metadata that were loaded with load_repository(). Invalid, - or duplicate signatures, are removed from 'signable'. - """ - - # Store the keyids of valid signatures. 'signature_keyids' is checked for - # duplicates rather than comparing signature objects because PSS may generate - # duplicate valid signatures for the same data, yet contain different - # signatures. - signature_keyids = [] - - for signature in signable['signatures']: - signed = sslib_formats.encode_canonical(signable['signed']).encode('utf-8') - keyid = signature['keyid'] - key = None - - # Remove 'signature' from 'signable' if the listed keyid does not exist - # in 'keydb'. - try: - key = keydb.get_key(keyid, repository_name=repository_name) - - except exceptions.UnknownKeyError: - signable['signatures'].remove(signature) - continue - - # Remove 'signature' from 'signable' if it is an invalid signature. - if not sslib_keys.verify_signature(key, signature, signed): - logger.debug('Removing invalid signature for ' + repr(keyid)) - signable['signatures'].remove(signature) - - # Although valid, it may still need removal if it is a duplicate. Check - # the keyid, rather than the signature, to remove duplicate PSS signatures. - # PSS may generate multiple different signatures for the same keyid. - else: - if keyid in signature_keyids: - signable['signatures'].remove(signature) - - # 'keyid' is valid and not a duplicate, so add it to 'signature_keyids'. - else: - signature_keyids.append(keyid) - - - - - -def _delete_obsolete_metadata(metadata_directory, snapshot_metadata, - consistent_snapshot, repository_name, storage_backend): - """ - Non-public function that deletes metadata files marked as removed by - 'repository_tool.py'. Revoked metadata files are not actually deleted until - this function is called. Obsolete metadata should *not* be retained in - "metadata.staged", otherwise they may be re-loaded by 'load_repository()'. - - Note: Obsolete metadata may not always be easily detected (by inspecting - top-level metadata during loading) due to partial metadata and top-level - metadata that have not been written yet. - """ - - # Walk the repository's metadata sub-directory, which is where all metadata - # is stored (including delegated roles). The 'django.json' role (e.g., - # delegated by Targets) would be located in the - # '{repository_directory}/metadata/' directory. - metadata_files = sorted(storage_backend.list_folder(metadata_directory)) - for metadata_role in metadata_files: - if metadata_role.endswith('root.json'): - continue - - metadata_path = os.path.join(metadata_directory, metadata_role) - - # Strip the version number if 'consistent_snapshot' is True. Example: - # '10.django.json' --> 'django.json'. Consistent and non-consistent - # metadata might co-exist if write() and - # write(consistent_snapshot=True) are mixed, so ensure only - # '.filename' metadata is stripped. - - # Should we check if 'consistent_snapshot' is True? It might have been - # set previously, but 'consistent_snapshot' can potentially be False - # now. We'll proceed with the understanding that 'metadata_name' can - # have a prepended version number even though the repository is now - # a non-consistent one. - if metadata_role not in snapshot_metadata['meta']: - metadata_role, junk = _strip_version_number(metadata_role, - consistent_snapshot) - - else: - logger.debug(repr(metadata_role) + ' found in the snapshot role.') - - # Strip metadata extension from filename. The role database does not - # include the metadata extension. - if metadata_role.endswith(METADATA_EXTENSION): - metadata_role = metadata_role[:-len(METADATA_EXTENSION)] - else: - logger.debug(repr(metadata_role) + ' does not match' - ' supported extension ' + repr(METADATA_EXTENSION)) - - if metadata_role in roledb.TOP_LEVEL_ROLES: - logger.debug('Not removing top-level metadata ' + repr(metadata_role)) - return - - # Delete the metadata file if it does not exist in 'roledb'. - # 'repository_tool.py' might have removed 'metadata_name,' - # but its metadata file is not actually deleted yet. Do it now. - if not roledb.role_exists(metadata_role, repository_name): - logger.info('Removing outdated metadata: ' + repr(metadata_path)) - storage_backend.remove(metadata_path) - - else: - logger.debug('Not removing metadata: ' + repr(metadata_path)) - - # TODO: Should we delete outdated consistent snapshots, or does it make - # more sense for integrators to remove outdated consistent snapshots? - - - - -def _get_written_metadata(metadata_signable): - """ - Non-public function that returns the actual content of written metadata. - """ - - # Explicitly specify the JSON separators for Python 2 + 3 consistency. - written_metadata_content = json.dumps(metadata_signable, indent=1, - separators=(',', ': '), sort_keys=True).encode('utf-8') - - return written_metadata_content - - - - - -def _strip_version_number(metadata_filename, consistent_snapshot): - """ - Strip from 'metadata_filename' any version number (in the - expected '{dirname}/.rolename.' format) that - it may contain, and return the stripped filename and version number, - as a tuple. 'consistent_snapshot' is a boolean indicating if a version - number is prepended to 'metadata_filename'. - """ - - # Strip the version number if 'consistent_snapshot' is True. - # Example: '10.django.json' --> 'django.json' - if consistent_snapshot: - dirname, basename = os.path.split(metadata_filename) - version_number, basename = basename.split('.', 1) - stripped_metadata_filename = os.path.join(dirname, basename) - - if not version_number.isdigit(): - return metadata_filename, '' - - else: - return stripped_metadata_filename, version_number - - else: - return metadata_filename, '' - - - - -def _load_top_level_metadata(repository, top_level_filenames, repository_name): - """ - Load the metadata of the Root, Timestamp, Targets, and Snapshot roles. At a - minimum, the Root role must exist and load successfully. - """ - - root_filename = top_level_filenames[ROOT_FILENAME] - targets_filename = top_level_filenames[TARGETS_FILENAME] - snapshot_filename = top_level_filenames[SNAPSHOT_FILENAME] - timestamp_filename = top_level_filenames[TIMESTAMP_FILENAME] - - root_metadata = None - targets_metadata = None - snapshot_metadata = None - timestamp_metadata = None - - # Load 'root.json'. A Root role file without a version number is always - # written. - try: - # Initialize the key and role metadata of the top-level roles. - signable = sslib_util.load_json_file(root_filename) - try: - formats.check_signable_object_format(signable) - except exceptions.UnsignedMetadataError: - # Downgrade the error to a warning because a use case exists where - # metadata may be generated unsigned on one machine and signed on another. - logger.warning('Unsigned metadata object: ' + repr(signable)) - - root_metadata = signable['signed'] - keydb.create_keydb_from_root_metadata(root_metadata, repository_name) - roledb.create_roledb_from_root_metadata(root_metadata, repository_name) - - # Load Root's roleinfo and update 'roledb'. - roleinfo = roledb.get_roleinfo('root', repository_name) - roleinfo['consistent_snapshot'] = root_metadata['consistent_snapshot'] - roleinfo['signatures'] = [] - for signature in signable['signatures']: - if signature not in roleinfo['signatures']: - roleinfo['signatures'].append(signature) - - else: - logger.debug('Found a Root signature that is already loaded:' - ' ' + repr(signature)) - - # By default, roleinfo['partial_loaded'] of top-level roles should be set - # to False in 'create_roledb_from_root_metadata()'. Update this field, if - # necessary, now that we have its signable object. - if _metadata_is_partially_loaded('root', signable, repository_name): - roleinfo['partial_loaded'] = True - - else: - logger.debug('Root was not partially loaded.') - - _log_warning_if_expires_soon(ROOT_FILENAME, roleinfo['expires'], - ROOT_EXPIRES_WARN_SECONDS) - - roledb.update_roleinfo('root', roleinfo, mark_role_as_dirty=False, - repository_name=repository_name) - - # Ensure the 'consistent_snapshot' field is extracted. - consistent_snapshot = root_metadata['consistent_snapshot'] - - except sslib_exceptions.StorageError as error: - raise exceptions.RepositoryError('Cannot load the required' - ' root file: ' + repr(root_filename)) from error - - # Load 'timestamp.json'. A Timestamp role file without a version number is - # always written. - try: - signable = sslib_util.load_json_file(timestamp_filename) - timestamp_metadata = signable['signed'] - for signature in signable['signatures']: - repository.timestamp.add_signature(signature, mark_role_as_dirty=False) - - # Load Timestamp's roleinfo and update 'roledb'. - roleinfo = roledb.get_roleinfo('timestamp', repository_name) - roleinfo['expires'] = timestamp_metadata['expires'] - roleinfo['version'] = timestamp_metadata['version'] - - if _metadata_is_partially_loaded('timestamp', signable, repository_name): - roleinfo['partial_loaded'] = True - - else: - logger.debug('The Timestamp role was not partially loaded.') - - _log_warning_if_expires_soon(TIMESTAMP_FILENAME, roleinfo['expires'], - TIMESTAMP_EXPIRES_WARN_SECONDS) - - roledb.update_roleinfo('timestamp', roleinfo, mark_role_as_dirty=False, - repository_name=repository_name) - - except sslib_exceptions.StorageError as error: - raise exceptions.RepositoryError('Cannot load the Timestamp ' - 'file: ' + repr(timestamp_filename)) from error - - # Load 'snapshot.json'. A consistent snapshot.json must be calculated if - # 'consistent_snapshot' is True. - # The Snapshot and Root roles are both accessed by their hashes. - if consistent_snapshot: - snapshot_version = timestamp_metadata['meta'][SNAPSHOT_FILENAME]['version'] - - dirname, basename = os.path.split(snapshot_filename) - basename = basename.split(METADATA_EXTENSION, 1)[0] - snapshot_filename = os.path.join(dirname, - str(snapshot_version) + '.' + basename + METADATA_EXTENSION) - - try: - signable = sslib_util.load_json_file(snapshot_filename) - try: - formats.check_signable_object_format(signable) - except exceptions.UnsignedMetadataError: - # Downgrade the error to a warning because a use case exists where - # metadata may be generated unsigned on one machine and signed on another. - logger.warning('Unsigned metadata object: ' + repr(signable)) - - snapshot_metadata = signable['signed'] - - for signature in signable['signatures']: - repository.snapshot.add_signature(signature, mark_role_as_dirty=False) - - # Load Snapshot's roleinfo and update 'roledb'. - roleinfo = roledb.get_roleinfo('snapshot', repository_name) - roleinfo['expires'] = snapshot_metadata['expires'] - roleinfo['version'] = snapshot_metadata['version'] - - if _metadata_is_partially_loaded('snapshot', signable, repository_name): - roleinfo['partial_loaded'] = True - - else: - logger.debug('Snapshot was not partially loaded.') - - _log_warning_if_expires_soon(SNAPSHOT_FILENAME, roleinfo['expires'], - SNAPSHOT_EXPIRES_WARN_SECONDS) - - roledb.update_roleinfo('snapshot', roleinfo, mark_role_as_dirty=False, - repository_name=repository_name) - - except sslib_exceptions.StorageError as error: - raise exceptions.RepositoryError('The Snapshot file ' - 'cannot be loaded: '+ repr(snapshot_filename)) from error - - # Load 'targets.json'. A consistent snapshot of the Targets role must be - # calculated if 'consistent_snapshot' is True. - if consistent_snapshot: - targets_version = snapshot_metadata['meta'][TARGETS_FILENAME]['version'] - dirname, basename = os.path.split(targets_filename) - targets_filename = os.path.join(dirname, str(targets_version) + '.' + basename) - - try: - signable = sslib_util.load_json_file(targets_filename) - try: - formats.check_signable_object_format(signable) - except exceptions.UnsignedMetadataError: - # Downgrade the error to a warning because a use case exists where - # metadata may be generated unsigned on one machine and signed on another. - logger.warning('Unsigned metadata object: ' + repr(signable)) - - targets_metadata = signable['signed'] - - for signature in signable['signatures']: - repository.targets.add_signature(signature, mark_role_as_dirty=False) - - # Update 'targets.json' in 'roledb' - roleinfo = roledb.get_roleinfo('targets', repository_name) - roleinfo['paths'] = targets_metadata['targets'] - roleinfo['version'] = targets_metadata['version'] - roleinfo['expires'] = targets_metadata['expires'] - roleinfo['delegations'] = targets_metadata['delegations'] - - if _metadata_is_partially_loaded('targets', signable, repository_name): - roleinfo['partial_loaded'] = True - - else: - logger.debug('Targets file was not partially loaded.') - - _log_warning_if_expires_soon(TARGETS_FILENAME, roleinfo['expires'], - TARGETS_EXPIRES_WARN_SECONDS) - - roledb.update_roleinfo('targets', roleinfo, mark_role_as_dirty=False, - repository_name=repository_name) - - # Add the keys specified in the delegations field of the Targets role. - for keyid, key_metadata in targets_metadata['delegations']['keys'].items(): - - # Use the keyid found in the delegation - key_object, _ = sslib_keys.format_metadata_to_key(key_metadata, - keyid) - - # Add 'key_object' to the list of recognized keys. Keys may be shared, - # so do not raise an exception if 'key_object' has already been loaded. - # In contrast to the methods that may add duplicate keys, do not log - # a warning as there may be many such duplicate key warnings. The - # repository maintainer should have also been made aware of the duplicate - # key when it was added. - try: - keydb.add_key(key_object, keyid=None, repository_name=repository_name) - - except exceptions.KeyAlreadyExistsError: - pass - - except sslib_exceptions.StorageError as error: - raise exceptions.RepositoryError('The Targets file ' - 'can not be loaded: ' + repr(targets_filename)) from error - - return repository, consistent_snapshot - - - - -def _log_warning_if_expires_soon(rolename, expires_iso8601_timestamp, - seconds_remaining_to_warn): - """ - Non-public function that logs a warning if 'rolename' expires in - 'seconds_remaining_to_warn' seconds, or less. - """ - - # Metadata stores expiration datetimes in ISO8601 format. Convert to - # unix timestamp, subtract from current time.time() (also in POSIX time) - # and compare against 'seconds_remaining_to_warn'. Log a warning message - # to console if 'rolename' expires soon. - datetime_object = formats.expiry_string_to_datetime( - expires_iso8601_timestamp) - expires_unix_timestamp = \ - formats.datetime_to_unix_timestamp(datetime_object) - seconds_until_expires = expires_unix_timestamp - int(time.time()) - - if seconds_until_expires <= seconds_remaining_to_warn: - if seconds_until_expires <= 0: - logger.warning( - repr(rolename) + ' expired ' + repr(datetime_object.ctime() + ' (UTC).')) - - else: - days_until_expires = seconds_until_expires / 86400 - logger.warning(repr(rolename) + ' expires ' + datetime_object.ctime() + '' - ' (UTC). ' + repr(days_until_expires) + ' day(s) until it expires.') - - else: - pass - - - - - -def import_rsa_privatekey_from_file(filepath, password=None): - """ - - Import the encrypted PEM file in 'filepath', decrypt it, and return the key - object in 'securesystemslib.RSAKEY_SCHEMA' format. - - - filepath: - file, an RSA encrypted PEM file. Unlike the public RSA PEM - key file, 'filepath' does not have an extension. - - password: - The passphrase to decrypt 'filepath'. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - securesystemslib.exceptions.CryptoError, if 'filepath' is not a valid - encrypted key file. - - - The contents of 'filepath' is read, decrypted, and the key stored. - - - An RSA key object, conformant to 'securesystemslib.RSAKEY_SCHEMA'. - """ - - # Note: securesystemslib.interface.import_rsa_privatekey_from_file() does not - # allow both 'password' and 'prompt' to be True, nor does it automatically - # prompt for a password if the key file is encrypted and a password isn't - # given. - try: - private_key = sslib_interface.import_rsa_privatekey_from_file( - filepath, password) - - # The user might not have given a password for an encrypted private key. - # Prompt for a password for convenience. - except sslib_exceptions.CryptoError: - if password is None: - private_key = sslib_interface.import_rsa_privatekey_from_file( - filepath, password, prompt=True) - - else: - raise - - return private_key - - - - - - - -def import_ed25519_privatekey_from_file(filepath, password=None): - """ - - Import the encrypted ed25519 TUF key file in 'filepath', decrypt it, and - return the key object in 'securesystemslib.ED25519KEY_SCHEMA' format. - - The TUF private key (may also contain the public part) is encrypted with - AES 256 and CTR the mode of operation. The password is strengthened with - PBKDF2-HMAC-SHA256. - - - filepath: - file, an RSA encrypted TUF key file. - - password: - The password, or passphrase, to import the private key (i.e., the - encrypted key file 'filepath' must be decrypted before the ed25519 key - object can be returned. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted or the imported key object contains an invalid key type (i.e., - not 'ed25519'). - - securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted. - - securesystemslib.exceptions.UnsupportedLibraryError, if 'filepath' cannot be - decrypted due to an invalid configuration setting (i.e., invalid - 'tuf.settings' setting). - - - 'password' is used to decrypt the 'filepath' key file. - - - An ed25519 key object of the form: 'securesystemslib.ED25519KEY_SCHEMA'. - """ - - # Note: securesystemslib.interface.import_ed25519_privatekey_from_file() does - # not allow both 'password' and 'prompt' to be True, nor does it - # automatically prompt for a password if the key file is encrypted and a - # password isn't given. - try: - private_key = sslib_interface.import_ed25519_privatekey_from_file( - filepath, password) - - # The user might not have given a password for an encrypted private key. - # Prompt for a password for convenience. - except sslib_exceptions.CryptoError: - if password is None: - private_key = sslib_interface.import_ed25519_privatekey_from_file( - filepath, password, prompt=True) - - else: - raise - - return private_key - - - -def get_delegated_roles_metadata_filenames(metadata_directory, - consistent_snapshot, storage_backend=None): - """ - Return a dictionary containing all filenames in 'metadata_directory' - except the top-level roles. - If multiple versions of a file exist because of a consistent snapshot, - only the file with biggest version prefix is included. - """ - - filenames = {} - metadata_files = sorted(storage_backend.list_folder(metadata_directory), - reverse=True) - - # Iterate over role metadata files, sorted by their version-number prefix, with - # more recent versions first, and only add the most recent version of any - # (non top-level) metadata to the list of returned filenames. Note that there - # should only be one version of each file, if consistent_snapshot is False. - for metadata_role in metadata_files: - metadata_path = os.path.join(metadata_directory, metadata_role) - - # Strip the version number if 'consistent_snapshot' is True, - # or if 'metadata_role' is Root. - # Example: '10.django.json' --> 'django.json' - consistent = \ - metadata_role.endswith('root.json') or consistent_snapshot == True - metadata_name, junk = _strip_version_number(metadata_role, - consistent) - - if metadata_name.endswith(METADATA_EXTENSION): - extension_length = len(METADATA_EXTENSION) - metadata_name = metadata_name[:-extension_length] - - else: - logger.debug('Skipping file with unsupported metadata' - ' extension: ' + repr(metadata_path)) - continue - - # Skip top-level roles, only interested in delegated roles. - if metadata_name in roledb.TOP_LEVEL_ROLES: - continue - - # Prevent reloading duplicate versions if consistent_snapshot is True - if metadata_name not in filenames: - filenames[metadata_name] = metadata_path - - return filenames - - - -def get_top_level_metadata_filenames(metadata_directory): - """ - - Return a dictionary containing the filenames of the top-level roles. - If 'metadata_directory' is set to 'metadata', the dictionary - returned would contain: - - filenames = {'root.json': 'metadata/root.json', - 'targets.json': 'metadata/targets.json', - 'snapshot.json': 'metadata/snapshot.json', - 'timestamp.json': 'metadata/timestamp.json'} - - If 'metadata_directory' is not set by the caller, the current directory is - used. - - - metadata_directory: - The directory containing the metadata files. - - - securesystemslib.exceptions.FormatError, if 'metadata_directory' is - improperly formatted. - - - None. - - - A dictionary containing the expected filenames of the top-level - metadata files, such as 'root.json' and 'snapshot.json'. - """ - - # Does 'metadata_directory' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(metadata_directory) - - # Store the filepaths of the top-level roles, including the - # 'metadata_directory' for each one. - filenames = {} - - filenames[ROOT_FILENAME] = \ - os.path.join(metadata_directory, ROOT_FILENAME) - - filenames[TARGETS_FILENAME] = \ - os.path.join(metadata_directory, TARGETS_FILENAME) - - filenames[SNAPSHOT_FILENAME] = \ - os.path.join(metadata_directory, SNAPSHOT_FILENAME) - - filenames[TIMESTAMP_FILENAME] = \ - os.path.join(metadata_directory, TIMESTAMP_FILENAME) - - return filenames - - - - - -def get_targets_metadata_fileinfo(filename, storage_backend, custom=None): - """ - - Retrieve the file information of 'filename'. The object returned - conforms to 'tuf.formats.TARGETS_FILEINFO_SCHEMA'. The information - generated for 'filename' is stored in metadata files like 'targets.json'. - The fileinfo object returned has the form: - - fileinfo = {'length': 1024, - 'hashes': {'sha256': 1233dfba312, ...}, - 'custom': {...}} - - - filename: - The metadata file whose file information is needed. It must exist. - - custom: - An optional object providing additional information about the file. - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. - - - securesystemslib.exceptions.FormatError, if 'filename' is improperly - formatted. - - - The file is opened and information about the file is generated, - such as file size and its hash. - - - A dictionary conformant to 'tuf.formats.TARGETS_FILEINFO_SCHEMA'. This - dictionary contains the length, hashes, and custom data about the - 'filename' metadata file. SHA256 hashes are generated by default. - """ - - # Does 'filename' and 'custom' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(filename) - if custom is not None: - formats.CUSTOM_SCHEMA.check_match(custom) - - # Note: 'filehashes' is a dictionary of the form - # {'sha256': 1233dfba312, ...}. 'custom' is an optional - # dictionary that a client might define to include additional - # file information, such as the file's author, version/revision - # numbers, etc. - filesize, filehashes = sslib_util.get_file_details(filename, - settings.FILE_HASH_ALGORITHMS, storage_backend) - - return formats.make_targets_fileinfo(filesize, filehashes, custom=custom) - - - - - -def get_metadata_versioninfo(rolename, repository_name): - """ - - Retrieve the version information of 'rolename'. The object returned - conforms to 'tuf.formats.VERSIONINFO_SCHEMA'. The information - generated for 'rolename' is stored in 'snapshot.json'. - The versioninfo object returned has the form: - - versioninfo = {'version': 14} - - - rolename: - The metadata role whose versioninfo is needed. It must exist, otherwise - a 'tuf.exceptions.UnknownRoleError' exception is raised. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - securesystemslib.exceptions.FormatError, if 'rolename' is improperly - formatted. - - tuf.exceptions.UnknownRoleError, if 'rolename' does not exist. - - - None. - - - A dictionary conformant to 'tuf.formats.VERSIONINFO_SCHEMA'. - This dictionary contains the version number of 'rolename'. - """ - - # Does 'rolename' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - formats.ROLENAME_SCHEMA.check_match(rolename) - - roleinfo = roledb.get_roleinfo(rolename, repository_name) - versioninfo = {'version': roleinfo['version']} - - return versioninfo - - - - - -def create_bin_name(low, high, prefix_len): - """ - - Create a string name of a delegated hash bin, where name will be a range of - zero-padded (up to prefix_len) strings i.e. for low=00, high=07, - prefix_len=3 the returned name would be '000-007'. - - - low: - The low end of the prefix range to be binned - - high: - The high end of the prefix range to be binned - - prefix_len: - The length of the prefix range components - - - A string bin name, with each end of the range zero-padded up to prefix_len - """ - if low == high: - return "{low:0{len}x}".format(low=low, len=prefix_len) - - return "{low:0{len}x}-{high:0{len}x}".format(low=low, high=high, - len=prefix_len) - - - - - -def get_bin_numbers(number_of_bins): - """ - - Given the desired number of bins (number_of_bins) calculate the prefix - length (prefix_length), total number of prefixes (prefix_count) and the - number of prefixes to be stored in each bin (bin_size). - Example: number_of_bins = 32 - prefix_length = 2 - prefix_count = 256 - bin_size = 8 - That is, each of the 32 hashed bins are responsible for 8 hash prefixes, - i.e. 00-07, 08-0f, ..., f8-ff. - - - number_of_bins: - The number of hashed bins in use - - - A tuple of three values: - 1. prefix_length: the length of each prefix - 2. prefix_count: the total number of prefixes in use - 3. bin_size: the number of hash prefixes to be stored in each bin - """ - # Convert 'number_of_bins' to hexadecimal and determine the number of - # hexadecimal digits needed by each hash prefix - prefix_length = len("{:x}".format(number_of_bins - 1)) - # Calculate the total number of hash prefixes (e.g., 000 - FFF total values) - prefix_count = 16 ** prefix_length - # Determine how many prefixes to assign to each bin - bin_size = prefix_count // number_of_bins - - # For simplicity, ensure that 'prefix_count' (16 ^ n) can be evenly - # distributed over 'number_of_bins' (must be 2 ^ n). Each bin will contain - # (prefix_count / number_of_bins) hash prefixes. - if prefix_count % number_of_bins != 0: - # Note: x % y != 0 does not guarantee that y is not a power of 2 for - # arbitrary x and y values. However, due to the relationship between - # number_of_bins and prefix_count, it is true for them. - raise sslib_exceptions.Error('The "number_of_bins" argument' - ' must be a power of 2.') - - return prefix_length, prefix_count, bin_size - - - - - -def find_bin_for_target_hash(target_hash, number_of_bins): - """ - - For a given hashed filename, target_hash, calculate the name of a hashed bin - into which this file would be delegated given number_of_bins bins are in - use. - - - target_hash: - The hash of the target file's path - - number_of_bins: - The number of hashed_bins in use - - - The name of the hashed bin target_hash would be binned into - """ - - prefix_length, _, bin_size = get_bin_numbers(number_of_bins) - - prefix = int(target_hash[:prefix_length], 16) - - low = prefix - (prefix % bin_size) - high = (low + bin_size - 1) - - return create_bin_name(low, high, prefix_length) - - - - - -def get_target_hash(target_filepath): - """ - - Compute the hash of 'target_filepath'. This is useful in conjunction with - the "path_hash_prefixes" attribute in a delegated targets role, which - tells us which paths a role is implicitly responsible for. - - The repository may optionally organize targets into hashed bins to ease - target delegations and role metadata management. The use of consistent - hashing allows for a uniform distribution of targets into bins. - - - target_filepath: - The path to the target file on the repository. This will be relative to - the 'targets' (or equivalent) directory on a given mirror. - - - None. - - - None. - - - The hash of 'target_filepath'. - - """ - formats.RELPATH_SCHEMA.check_match(target_filepath) - - digest_object = sslib_hash.digest(algorithm=HASH_FUNCTION) - digest_object.update(target_filepath.encode('utf-8')) - return digest_object.hexdigest() - - - - -def generate_root_metadata(version, expiration_date, consistent_snapshot, - repository_name='default'): - """ - - Create the root metadata. 'roledb' and 'keydb' - are read and the information returned by these modules is used to generate - the root metadata object. - - - version: - The metadata version number. Clients use the version number to - determine if the downloaded version is newer than the one currently - trusted. - - expiration_date: - The expiration date of the metadata file. Conformant to - 'securesystemslib.formats.ISO8601_DATETIME_SCHEMA'. - - consistent_snapshot: - Boolean. If True, a file digest is expected to be prepended to the - filename of any target file located in the targets directory. Each digest - is stripped from the target filename and listed in the snapshot metadata. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - securesystemslib.exceptions.FormatError, if the generated root metadata - object could not be generated with the correct format. - - securesystemslib.exceptions.Error, if an error is encountered while - generating the root metadata object (e.g., a required top-level role not - found in 'roledb'.) - - - The contents of 'keydb' and 'roledb' are read. - - - A root metadata object, conformant to 'tuf.formats.ROOT_SCHEMA'. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any of the arguments are - # improperly formatted. - formats.METADATAVERSION_SCHEMA.check_match(version) - sslib_formats.ISO8601_DATETIME_SCHEMA.check_match(expiration_date) - sslib_formats.BOOLEAN_SCHEMA.check_match(consistent_snapshot) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # The role and key dictionaries to be saved in the root metadata object. - # Conformant to 'ROLEDICT_SCHEMA' and 'KEYDICT_SCHEMA', respectively. - roledict = {} - keydict = {} - keylist = [] - - # Extract the role, threshold, and keyid information of the top-level roles, - # which Root stores in its metadata. The necessary role metadata is generated - # from this information. - for rolename in roledb.TOP_LEVEL_ROLES: - - # If a top-level role is missing from 'roledb', raise an exception. - if not roledb.role_exists(rolename, repository_name): - raise sslib_exceptions.Error(repr(rolename) + ' not in' - ' "roledb".') - - # Collect keys from all roles in a list - keyids = roledb.get_role_keyids(rolename, repository_name) - for keyid in keyids: - key = keydb.get_key(keyid, repository_name=repository_name) - keylist.append(key) - - # Generate the authentication information Root establishes for each - # top-level role. - role_threshold = roledb.get_role_threshold(rolename, repository_name) - role_metadata = formats.build_dict_conforming_to_schema( - formats.ROLE_SCHEMA, - keyids=keyids, - threshold=role_threshold) - roledict[rolename] = role_metadata - - # Create the root metadata 'keys' dictionary - _, keydict = keys_to_keydict(keylist) - - # Use generalized build_dict_conforming_to_schema func to produce a dict that - # contains all the appropriate information for this type of metadata, - # checking that the result conforms to the appropriate schema. - # TODO: Later, probably after the rewrite for TUF Issue #660, generalize - # further, upward, by replacing generate_targets_metadata, - # generate_root_metadata, etc. with one function that generates - # metadata, possibly rolling that upwards into the calling function. - # There are very few things that really need to be done differently. - return formats.build_dict_conforming_to_schema( - formats.ROOT_SCHEMA, - version=version, - expires=expiration_date, - keys=keydict, - roles=roledict, - consistent_snapshot=consistent_snapshot) - - - - - -def generate_targets_metadata(targets_directory, target_files, version, - expiration_date, delegations=None, write_consistent_targets=False, - use_existing_fileinfo=False, storage_backend=None, - repository_name='default'): - """ - - Generate the targets metadata object. The targets in 'target_files' must - exist at the same path they should on the repo. 'target_files' is a list - of targets. The 'custom' field of the targets metadata is not currently - supported. - - - targets_directory: - The absolute path to a directory containing the target files and - directories of the repository. - - target_files: - The target files tracked by 'targets.json'. 'target_files' is a - dictionary mapping target paths (relative to the targets directory) to - a dict matching tuf.formats.LOOSE_FILEINFO_SCHEMA. LOOSE_FILEINFO_SCHEMA - can support multiple different value patterns: - 1) an empty dictionary - for when fileinfo should be generated - 2) a dictionary matching tuf.formats.CUSTOM_SCHEMA - for when fileinfo - should be generated, with the supplied custom metadata attached - 3) a dictionary matching tuf.formats.FILEINFO_SCHEMA - for when full - fileinfo is provided in conjunction with use_existing_fileinfo - - version: - The metadata version number. Clients use the version number to - determine if the downloaded version is newer than the one currently - trusted. - - expiration_date: - The expiration date of the metadata file. Conformant to - 'securesystemslib.formats.ISO8601_DATETIME_SCHEMA'. - - delegations: - The delegations made by the targets role to be generated. 'delegations' - must match 'tuf.formats.DELEGATIONS_SCHEMA'. - - write_consistent_targets: - Boolean that indicates whether file digests should be prepended to the - target files. - NOTE: it is an error for write_consistent_targets to be True when - use_existing_fileinfo is also True. We can not create consistent targets - for a target file where the fileinfo isn't generated by tuf. - - use_existing_fileinfo: - Boolean that indicates whether to use the complete fileinfo, including - hashes, as already exists in the roledb (True) or whether to generate - hashes (False). - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. - - repository_name: - The name of the repository. If not supplied, 'default' repository - is used. - - - securesystemslib.exceptions.FormatError, if an error occurred trying to - generate the targets metadata object. - - securesystemslib.exceptions.Error, if use_existing_fileinfo is False and - any of the target files cannot be read. - - securesystemslib.exceptions.Error, if use_existing_fileinfo is True and - some of the target files do not have corresponding hashes in the roledb. - - securesystemslib.exceptions.Error, if both of use_existing_fileinfo and - write_consistent_targets are True. - - - If use_existing_fileinfo is False, the target files are read from storage - and file information about them is generated. - If 'write_consistent_targets' is True, each target in 'target_files' will be - copied to a file with a digest prepended to its filename. For example, if - 'some_file.txt' is one of the targets of 'target_files', consistent targets - .some_file.txt, .some_file.txt, etc., are created - and the content of 'some_file.txt' will be copied into them. - - - A targets metadata object, conformant to - 'tuf.formats.TARGETS_SCHEMA'. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(targets_directory) - formats.PATH_FILEINFO_SCHEMA.check_match(target_files) - formats.METADATAVERSION_SCHEMA.check_match(version) - sslib_formats.ISO8601_DATETIME_SCHEMA.check_match(expiration_date) - sslib_formats.BOOLEAN_SCHEMA.check_match(write_consistent_targets) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_existing_fileinfo) - - if write_consistent_targets and use_existing_fileinfo: - raise sslib_exceptions.Error('Cannot support writing consistent' - ' targets and using existing fileinfo.') - - if delegations is not None: - formats.DELEGATIONS_SCHEMA.check_match(delegations) - # If targets role has delegations, collect the up-to-date 'keyids' and - # 'threshold' for each role. Update the delegations keys dictionary. - delegations_keys = [] - # Update 'keyids' and 'threshold' for each delegated role - for role in delegations['roles']: - role['keyids'] = roledb.get_role_keyids(role['name'], - repository_name) - role['threshold'] = roledb.get_role_threshold(role['name'], - repository_name) - - # Collect all delegations keys for generating the delegations keydict - for keyid in role['keyids']: - key = keydb.get_key(keyid, repository_name=repository_name) - delegations_keys.append(key) - - _, delegations['keys'] = keys_to_keydict(delegations_keys) - - - # Store the file attributes of targets in 'target_files'. 'filedict', - # conformant to 'tuf.formats.FILEDICT_SCHEMA', is added to the - # targets metadata object returned. - filedict = {} - - if use_existing_fileinfo: - # Use the provided fileinfo dicts, conforming to FILEINFO_SCHEMA, rather than - # generating fileinfo - for target, fileinfo in target_files.items(): - - # Ensure all fileinfo entries in target_files have a non-empty hashes dict - if not fileinfo.get('hashes', None): - raise sslib_exceptions.Error('use_existing_fileinfo option' - ' set but no hashes exist in fileinfo for ' + repr(target)) - - # and a non-empty length - if fileinfo.get('length', -1) < 0: - raise sslib_exceptions.Error('use_existing_fileinfo option' - ' set but no length exists in fileinfo for ' + repr(target)) - - filedict[target] = fileinfo - - else: - # Generate the fileinfo dicts by accessing the target files on storage. - # Default to accessing files on local storage. - if storage_backend is None: - storage_backend = sslib_storage.FilesystemBackend() - - filedict = _generate_targets_fileinfo(target_files, targets_directory, - write_consistent_targets, storage_backend) - - # Generate the targets metadata object. - # Use generalized build_dict_conforming_to_schema func to produce a dict that - # contains all the appropriate information for targets metadata, - # checking that the result conforms to the appropriate schema. - # TODO: Later, probably after the rewrite for TUF Issue #660, generalize - # further, upward, by replacing generate_targets_metadata, - # generate_root_metadata, etc. with one function that generates - # metadata, possibly rolling that upwards into the calling function. - # There are very few things that really need to be done differently. - if delegations is not None: - return formats.build_dict_conforming_to_schema( - formats.TARGETS_SCHEMA, - version=version, - expires=expiration_date, - targets=filedict, - delegations=delegations) - else: - return formats.build_dict_conforming_to_schema( - formats.TARGETS_SCHEMA, - version=version, - expires=expiration_date, - targets=filedict) - # TODO: As an alternative to the odd if/else above where we decide whether or - # not to include the delegations argument based on whether or not it is - # None, consider instead adding a check in - # build_dict_conforming_to_schema that skips a keyword if that keyword - # is optional in the schema and the value passed in is set to None.... - - - - - -def _generate_targets_fileinfo(target_files, targets_directory, - write_consistent_targets, storage_backend): - """ - Iterate over target_files and: - * ensure they exist in the targets_directory - * generate a fileinfo dict for the target file, including hashes - * copy 'target_path' to 'digest_target' if write_consistent_targets - add all generated fileinfo dicts to a dictionary mapping - targetpath: fileinfo and return the dict. - """ - - filedict = {} - - # Generate the fileinfo of all the target files listed in 'target_files'. - for target, fileinfo in target_files.items(): - - # The root-most folder of the targets directory should not be included in - # target paths listed in targets metadata. - # (e.g., 'targets/more_targets/somefile.txt' -> 'more_targets/somefile.txt') - relative_targetpath = target - - # Note: join() discards 'targets_directory' if 'target' contains a leading - # path separator (i.e., is treated as an absolute path). - target_path = os.path.join(targets_directory, target.lstrip(os.sep)) - - # Add 'custom' if it has been provided. Custom data about the target is - # optional and will only be included in metadata (i.e., a 'custom' field in - # the target's fileinfo dictionary) if specified here. - custom_data = fileinfo.get('custom', None) - - filedict[relative_targetpath] = \ - get_targets_metadata_fileinfo(target_path, storage_backend, custom_data) - - # Copy 'target_path' to 'digest_target' if consistent hashing is enabled. - if write_consistent_targets: - for target_digest in filedict[relative_targetpath]['hashes'].values(): - dirname, basename = os.path.split(target_path) - digest_filename = target_digest + '.' + basename - digest_target = os.path.join(dirname, digest_filename) - shutil.copyfile(target_path, digest_target) - - return filedict - - - -def _get_hashes_and_length_if_needed(use_length, use_hashes, full_file_path, - storage_backend): - """ - Calculate length and hashes only if they are required, - otherwise, for adopters of tuf with lots of delegations, - this will cause unnecessary overhead. - """ - - length = None - hashes = None - if use_length: - length = sslib_util.get_file_length(full_file_path, - storage_backend) - - if use_hashes: - hashes = sslib_util.get_file_hashes(full_file_path, - settings.FILE_HASH_ALGORITHMS, storage_backend) - - return length, hashes - - - -def generate_snapshot_metadata(metadata_directory, version, expiration_date, - storage_backend, consistent_snapshot=False, - repository_name='default', use_length=False, use_hashes=False): - """ - - Create the snapshot metadata. The minimum metadata must exist (i.e., - 'root.json' and 'targets.json'). This function searches - 'metadata_directory' and the resulting snapshot file will list all the - delegated roles found there. - - - metadata_directory: - The directory containing the 'root.json' and 'targets.json' metadata - files. - - version: - The metadata version number. Clients use the version number to - determine if the downloaded version is newer than the one currently - trusted. - - expiration_date: - The expiration date of the metadata file. - Conformant to 'securesystemslib.formats.ISO8601_DATETIME_SCHEMA'. - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. - - consistent_snapshot: - Boolean. If True, a file digest is expected to be prepended to the - filename of any target file located in the targets directory. Each digest - is stripped from the target filename and listed in the snapshot metadata. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - use_length: - Whether to include the optional length attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - use_hashes: - Whether to include the optional hashes attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - securesystemslib.exceptions.Error, if an error occurred trying to generate - the snapshot metadata object. - - - The 'root.json' and 'targets.json' files are read. - - - The snapshot metadata object, conformant to 'tuf.formats.SNAPSHOT_SCHEMA'. - """ - - # Do the arguments have the correct format? - # This check ensures arguments have the appropriate number of objects and - # object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - sslib_formats.PATH_SCHEMA.check_match(metadata_directory) - formats.METADATAVERSION_SCHEMA.check_match(version) - sslib_formats.ISO8601_DATETIME_SCHEMA.check_match(expiration_date) - sslib_formats.BOOLEAN_SCHEMA.check_match(consistent_snapshot) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_length) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_hashes) - - # Snapshot's 'fileinfodict' shall contain the version number of Root, - # Targets, and all delegated roles of the repository. - fileinfodict = {} - - length, hashes = _get_hashes_and_length_if_needed(use_length, use_hashes, - os.path.join(metadata_directory, TARGETS_FILENAME), storage_backend) - - targets_role = TARGETS_FILENAME[:-len(METADATA_EXTENSION)] - - targets_file_version = get_metadata_versioninfo(targets_role, - repository_name) - - # Make file info dictionary with make_metadata_fileinfo because - # in the tuf spec length and hashes are optional for all - # METAFILES in snapshot.json including the top-level targets file. - fileinfodict[TARGETS_FILENAME] = formats.make_metadata_fileinfo( - targets_file_version['version'], length, hashes) - - # Search the metadata directory and generate the versioninfo of all the role - # files found there. This information is stored in the 'meta' field of - # 'snapshot.json'. - - metadata_files = sorted(storage_backend.list_folder(metadata_directory), - reverse=True) - for metadata_filename in metadata_files: - # Strip the version number if 'consistent_snapshot' is True. - # Example: '10.django.json' --> 'django.json' - metadata_name, junk = _strip_version_number(metadata_filename, - consistent_snapshot) - - # All delegated roles are added to the snapshot file. - if metadata_filename.endswith(METADATA_EXTENSION): - rolename = metadata_filename[:-len(METADATA_EXTENSION)] - - # Obsolete role files may still be found. Ensure only roles loaded - # in the roledb are included in the Snapshot metadata. Since the - # snapshot and timestamp roles are not listed in snapshot.json, do not - # list these roles found in the metadata directory. - if roledb.role_exists(rolename, repository_name) and \ - rolename not in roledb.TOP_LEVEL_ROLES: - - length, hashes = _get_hashes_and_length_if_needed(use_length, use_hashes, - os.path.join(metadata_directory, metadata_filename), storage_backend) - - file_version = get_metadata_versioninfo(rolename, - repository_name) - - fileinfodict[metadata_name] = formats.make_metadata_fileinfo( - file_version['version'], length, hashes) - - else: - logger.debug('Metadata file has an unsupported file' - ' extension: ' + metadata_filename) - - # Generate the Snapshot metadata object. - # Use generalized build_dict_conforming_to_schema func to produce a dict that - # contains all the appropriate information for snapshot metadata, - # checking that the result conforms to the appropriate schema. - # TODO: Later, probably after the rewrite for TUF Issue #660, generalize - # further, upward, by replacing generate_targets_metadata, - # generate_root_metadata, etc. with one function that generates - # metadata, possibly rolling that upwards into the calling function. - # There are very few things that really need to be done differently. - return formats.build_dict_conforming_to_schema( - formats.SNAPSHOT_SCHEMA, - version=version, - expires=expiration_date, - meta=fileinfodict) - - - - - - -def generate_timestamp_metadata(snapshot_file_path, version, expiration_date, - storage_backend, repository_name, use_length=True, use_hashes=True): - """ - - Generate the timestamp metadata object. The 'snapshot.json' file must - exist. - - - snapshot_file_path: - Path to the required snapshot metadata file. The timestamp role - needs to the calculate the file size and hash of this file. - - version: - The timestamp's version number. Clients use the version number to - determine if the downloaded version is newer than the one currently - trusted. - - expiration_date: - The expiration date of the metadata file, conformant to - 'securesystemslib.formats.ISO8601_DATETIME_SCHEMA'. - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - use_length: - Whether to include the optional length attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - use_hashes: - Whether to include the optional hashes attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - - securesystemslib.exceptions.FormatError, if the generated timestamp metadata - object cannot be formatted correctly, or one of the arguments is improperly - formatted. - - - None. - - - A timestamp metadata object, conformant to 'tuf.formats.TIMESTAMP_SCHEMA'. - """ - - # Do the arguments have the correct format? - # This check ensures arguments have the appropriate number of objects and - # object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - sslib_formats.PATH_SCHEMA.check_match(snapshot_file_path) - formats.METADATAVERSION_SCHEMA.check_match(version) - sslib_formats.ISO8601_DATETIME_SCHEMA.check_match(expiration_date) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_length) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_hashes) - - snapshot_fileinfo = {} - - length, hashes = _get_hashes_and_length_if_needed(use_length, use_hashes, - snapshot_file_path, storage_backend) - - snapshot_filename = os.path.basename(snapshot_file_path) - # Retrieve the versioninfo of the Snapshot metadata file. - snapshot_version = get_metadata_versioninfo('snapshot', repository_name) - snapshot_fileinfo[snapshot_filename] = \ - formats.make_metadata_fileinfo(snapshot_version['version'], - length, hashes) - - # Generate the timestamp metadata object. - # Use generalized build_dict_conforming_to_schema func to produce a dict that - # contains all the appropriate information for timestamp metadata, - # checking that the result conforms to the appropriate schema. - # TODO: Later, probably after the rewrite for TUF Issue #660, generalize - # further, upward, by replacing generate_targets_metadata, - # generate_root_metadata, etc. with one function that generates - # metadata, possibly rolling that upwards into the calling function. - # There are very few things that really need to be done differently. - return formats.build_dict_conforming_to_schema( - formats.TIMESTAMP_SCHEMA, - version=version, - expires=expiration_date, - meta=snapshot_fileinfo) - - - - - -def sign_metadata(metadata_object, keyids, filename, repository_name): - """ - - Sign a metadata object. If any of the keyids have already signed the file, - the old signature is replaced. The keys in 'keyids' must already be - loaded in 'keydb'. - - - metadata_object: - The metadata object to sign. For example, 'metadata' might correspond to - 'tuf.formats.ROOT_SCHEMA' or - 'tuf.formats.TARGETS_SCHEMA'. - - keyids: - The keyids list of the signing keys. - - filename: - The intended filename of the signed metadata object. - For example, 'root.json' or 'targets.json'. This function - does NOT save the signed metadata to this filename. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - securesystemslib.exceptions.FormatError, if a valid 'signable' object could - not be generated or the arguments are improperly formatted. - - securesystemslib.exceptions.Error, if an invalid keytype was found in the - keystore. - - - None. - - - A signable object conformant to 'tuf.formats.SIGNABLE_SCHEMA'. - """ - - # Do the arguments have the correct format? - # This check ensures arguments have the appropriate number of objects and - # object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - formats.ANYROLE_SCHEMA.check_match(metadata_object) - sslib_formats.KEYIDS_SCHEMA.check_match(keyids) - sslib_formats.PATH_SCHEMA.check_match(filename) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Make sure the metadata is in 'signable' format. That is, - # it contains a 'signatures' field containing the result - # of signing the 'signed' field of 'metadata' with each - # keyid of 'keyids'. - signable = formats.make_signable(metadata_object) - - # Sign the metadata with each keyid in 'keyids'. 'signable' should have - # zero signatures (metadata_object contained none). - for keyid in keyids: - - # Load the signing key. - key = keydb.get_key(keyid, repository_name=repository_name) - # Generate the signature using the appropriate signing method. - if key['keytype'] in SUPPORTED_KEY_TYPES: - if 'private' in key['keyval']: - signed = sslib_formats.encode_canonical(signable['signed']).encode('utf-8') - try: - signature = sslib_keys.create_signature(key, signed) - signable['signatures'].append(signature) - - except Exception: - logger.warning('Unable to create signature for keyid: ' + repr(keyid)) - - else: - logger.debug('Private key unset. Skipping: ' + repr(keyid)) - - else: - raise sslib_exceptions.Error('The keydb contains a key with' - ' an invalid key type.' + repr(key['keytype'])) - - # Raise 'securesystemslib.exceptions.FormatError' if the resulting 'signable' - # is not formatted correctly. - try: - formats.check_signable_object_format(signable) - except exceptions.UnsignedMetadataError: - # Downgrade the error to a warning because a use case exists where - # metadata may be generated unsigned on one machine and signed on another. - logger.warning('Unsigned metadata object: ' + repr(signable)) - - - return signable - - - - - -def write_metadata_file(metadata, filename, version_number, consistent_snapshot, - storage_backend): - """ - - If necessary, write the 'metadata' signable object to 'filename'. - - - metadata: - The object that will be saved to 'filename', conformant to - 'tuf.formats.SIGNABLE_SCHEMA'. - - filename: - The filename of the metadata to be written (e.g., 'root.json'). - - version_number: - The version number of the metadata file to be written. The version - number is needed for consistent snapshots, which prepend the version - number to 'filename'. - - consistent_snapshot: - Boolean that determines whether the metadata file's digest should be - prepended to the filename. - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - securesystemslib.exceptions.Error, if the directory of 'filename' does not - exist. - - Any other runtime (e.g., IO) exception. - - - The 'filename' file is created, or overwritten if it exists. - - - The filename of the written file. - """ - - # Do the arguments have the correct format? - # This check ensures arguments have the appropriate number of objects and - # object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - formats.SIGNABLE_SCHEMA.check_match(metadata) - sslib_formats.PATH_SCHEMA.check_match(filename) - formats.METADATAVERSION_SCHEMA.check_match(version_number) - sslib_formats.BOOLEAN_SCHEMA.check_match(consistent_snapshot) - - if storage_backend is None: - storage_backend = sslib_storage.FilesystemBackend() - - # Generate the actual metadata file content of 'metadata'. Metadata is - # saved as JSON and includes formatting, such as indentation and sorted - # objects. The new digest of 'metadata' is also calculated to help determine - # if re-saving is required. - file_content = _get_written_metadata(metadata) - - # We previously verified whether new metadata needed to be written (i.e., has - # not been previously written or has changed). It is now assumed that the - # caller intends to write changes that have been marked as dirty. - - # The 'metadata' object is written to 'file_object'. To avoid partial - # metadata from being written, 'metadata' is first written to a temporary - # location (i.e., 'file_object') and then moved to 'filename'. - file_object = tempfile.TemporaryFile() - - # Serialize 'metadata' to the file-like object and then write 'file_object' - # to disk. The dictionary keys of 'metadata' are sorted and indentation is - # used. - file_object.write(file_content) - - if consistent_snapshot: - dirname, basename = os.path.split(filename) - basename = basename.split(METADATA_EXTENSION, 1)[0] - version_and_filename = str(version_number) + '.' + basename + METADATA_EXTENSION - written_consistent_filename = os.path.join(dirname, version_and_filename) - - # If we were to point consistent snapshots to 'written_filename', they - # would always point to the current version. Example: 1.root.json and - # 2.root.json -> root.json. If consistent snapshot is True, we should save - # the consistent snapshot and point 'written_filename' to it. - logger.debug('Creating a consistent file for ' + repr(filename)) - logger.debug('Saving ' + repr(written_consistent_filename)) - sslib_util.persist_temp_file(file_object, - written_consistent_filename, should_close=False) - - else: - logger.debug('Not creating a consistent snapshot for ' + repr(filename)) - - logger.debug('Saving ' + repr(filename)) - storage_backend.put(file_object, filename) - - file_object.close() - - return filename - - - - - -def _log_status_of_top_level_roles(targets_directory, metadata_directory, - repository_name, storage_backend): - """ - Non-public function that logs whether any of the top-level roles contain an - invalid number of public and private keys, or an insufficient threshold of - signatures. Considering that the top-level metadata have to be verified in - the expected root -> targets -> snapshot -> timestamp order, this function - logs the error message and returns as soon as a required metadata file is - found to be invalid. It is assumed here that the delegated roles have been - written and verified. Example output: - - 'root' role contains 1 / 1 signatures. - 'targets' role contains 1 / 1 signatures. - 'snapshot' role contains 1 / 1 signatures. - 'timestamp' role contains 1 / 1 signatures. - - Note: Temporary metadata is generated so that file hashes & sizes may be - computed and verified against the attached signatures. 'metadata_directory' - should be a directory in a temporary repository directory. - """ - - # The expected full filenames of the top-level roles needed to write them to - # disk. - filenames = get_top_level_metadata_filenames(metadata_directory) - root_filename = filenames[ROOT_FILENAME] - targets_filename = filenames[TARGETS_FILENAME] - snapshot_filename = filenames[SNAPSHOT_FILENAME] - timestamp_filename = filenames[TIMESTAMP_FILENAME] - - # Verify that the top-level roles contain a valid number of public keys and - # that their corresponding private keys have been loaded. - for rolename in ['root', 'targets', 'snapshot', 'timestamp']: - try: - _check_role_keys(rolename, repository_name) - - except exceptions.InsufficientKeysError as e: - logger.info(str(e)) - - # Do the top-level roles contain a valid threshold of signatures? Top-level - # metadata is verified in Root -> Targets -> Snapshot -> Timestamp order. - # Verify the metadata of the Root role. - dirty_rolenames = roledb.get_dirty_roles(repository_name) - - root_roleinfo = roledb.get_roleinfo('root', repository_name) - root_is_dirty = None - if 'root' in dirty_rolenames: - root_is_dirty = True - - else: - root_is_dirty = False - - try: - signable, root_filename = \ - _generate_and_write_metadata('root', root_filename, targets_directory, - metadata_directory, storage_backend, repository_name=repository_name) - _log_status('root', signable, repository_name) - - # 'tuf.exceptions.UnsignedMetadataError' raised if metadata contains an - # invalid threshold of signatures. log the valid/threshold message, where - # valid < threshold. - except exceptions.UnsignedMetadataError as e: - _log_status('root', e.signable, repository_name) - return - - finally: - roledb.unmark_dirty(['root'], repository_name) - roledb.update_roleinfo('root', root_roleinfo, - mark_role_as_dirty=root_is_dirty, repository_name=repository_name) - - # Verify the metadata of the Targets role. - targets_roleinfo = roledb.get_roleinfo('targets', repository_name) - targets_is_dirty = None - if 'targets' in dirty_rolenames: - targets_is_dirty = True - - else: - targets_is_dirty = False - - try: - signable, targets_filename = \ - _generate_and_write_metadata('targets', targets_filename, - targets_directory, metadata_directory, storage_backend, - repository_name=repository_name) - _log_status('targets', signable, repository_name) - - except exceptions.UnsignedMetadataError as e: - _log_status('targets', e.signable, repository_name) - return - - finally: - roledb.unmark_dirty(['targets'], repository_name) - roledb.update_roleinfo('targets', targets_roleinfo, - mark_role_as_dirty=targets_is_dirty, repository_name=repository_name) - - # Verify the metadata of the snapshot role. - snapshot_roleinfo = roledb.get_roleinfo('snapshot', repository_name) - snapshot_is_dirty = None - if 'snapshot' in dirty_rolenames: - snapshot_is_dirty = True - - else: - snapshot_is_dirty = False - - filenames = {'root': root_filename, 'targets': targets_filename} - try: - signable, snapshot_filename = \ - _generate_and_write_metadata('snapshot', snapshot_filename, - targets_directory, metadata_directory, storage_backend, False, - filenames, repository_name=repository_name) - _log_status('snapshot', signable, repository_name) - - except exceptions.UnsignedMetadataError as e: - _log_status('snapshot', e.signable, repository_name) - return - - finally: - roledb.unmark_dirty(['snapshot'], repository_name) - roledb.update_roleinfo('snapshot', snapshot_roleinfo, - mark_role_as_dirty=snapshot_is_dirty, repository_name=repository_name) - - # Verify the metadata of the Timestamp role. - timestamp_roleinfo = roledb.get_roleinfo('timestamp', repository_name) - timestamp_is_dirty = None - if 'timestamp' in dirty_rolenames: - timestamp_is_dirty = True - - else: - timestamp_is_dirty = False - - filenames = {'snapshot': snapshot_filename} - try: - signable, timestamp_filename = \ - _generate_and_write_metadata('timestamp', timestamp_filename, - targets_directory, metadata_directory, storage_backend, - False, filenames, repository_name=repository_name) - _log_status('timestamp', signable, repository_name) - - except exceptions.UnsignedMetadataError as e: - _log_status('timestamp', e.signable, repository_name) - return - - finally: - roledb.unmark_dirty(['timestamp'], repository_name) - roledb.update_roleinfo('timestamp', timestamp_roleinfo, - mark_role_as_dirty=timestamp_is_dirty, repository_name=repository_name) - - - -def _log_status(rolename, signable, repository_name): - """ - Non-public function logs the number of (good/threshold) signatures of - 'rolename'. - """ - - status = sig.get_signature_status(signable, rolename, repository_name) - - logger.info(repr(rolename) + ' role contains ' + \ - repr(len(status['good_sigs'])) + ' / ' + repr(status['threshold']) + \ - ' signatures.') - - - - - -def create_tuf_client_directory(repository_directory, client_directory): - """ - - Create client directory structure as 'tuf.client.updater' expects it. - Metadata files downloaded from a remote TUF repository are saved to - 'client_directory'. - The Root file must initially exist before an update request can be - satisfied. create_tuf_client_directory() ensures the minimum metadata - is copied and that required directories ('previous' and 'current') are - created in 'client_directory'. Software updaters integrating TUF may - use the client directory created as an initial copy of the repository's - metadata. - - - repository_directory: - The path of the root repository directory. The 'metadata' and 'targets' - sub-directories should be available in 'repository_directory'. The - metadata files of 'repository_directory' are copied to 'client_directory'. - - client_directory: - The path of the root client directory. The 'current' and 'previous' - sub-directories are created and will store the metadata files copied - from 'repository_directory'. 'client_directory' will store metadata - and target files downloaded from a TUF repository. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - tuf.exceptions.RepositoryError, if the metadata directory in - 'client_directory' already exists. - - - Copies metadata files and directories from 'repository_directory' to - 'client_directory'. Parent directories are created if they do not exist. - - - None. - """ - - # Do the arguments have the correct format? - # This check ensures arguments have the appropriate number of objects and - # object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - sslib_formats.PATH_SCHEMA.check_match(repository_directory) - sslib_formats.PATH_SCHEMA.check_match(client_directory) - - # Set the absolute path of the Repository's metadata directory. The metadata - # directory should be the one served by the Live repository. At a minimum, - # the repository's root file must be copied. - repository_directory = os.path.abspath(repository_directory) - metadata_directory = os.path.join(repository_directory, - METADATA_DIRECTORY_NAME) - - # Set the client's metadata directory, which will store the metadata copied - # from the repository directory set above. - client_directory = os.path.abspath(client_directory) - client_metadata_directory = os.path.join(client_directory, - METADATA_DIRECTORY_NAME) - - # If the client's metadata directory does not already exist, create it and - # any of its parent directories, otherwise raise an exception. An exception - # is raised to avoid accidentally overwriting previous metadata. - try: - os.makedirs(client_metadata_directory) - - except OSError as e: - if e.errno == errno.EEXIST: - message = 'Cannot create a fresh client metadata directory: ' +\ - repr(client_metadata_directory) + '. Already exists.' - raise exceptions.RepositoryError(message) - - # Testing of non-errno.EEXIST exceptions have been verified on all - # supported OSs. An unexpected exception (the '/' directory exists, rather - # than disallowed path) is possible on Travis, so the '#pragma: no branch' - # below is included to prevent coverage failure. - else: #pragma: no branch - raise - - # Move all metadata to the client's 'current' and 'previous' directories. - # The root metadata file MUST exist in '{client_metadata_directory}/current'. - # 'tuf.client.updater' expects the 'current' and 'previous' directories to - # exist under 'metadata'. - client_current = os.path.join(client_metadata_directory, 'current') - client_previous = os.path.join(client_metadata_directory, 'previous') - shutil.copytree(metadata_directory, client_current) - shutil.copytree(metadata_directory, client_previous) - - - -def disable_console_log_messages(): - """ - - Disable logger messages printed to the console. For example, repository - maintainers may want to call this function if many roles will be sharing - keys, otherwise detected duplicate keys will continually log a warning - message. - - - None. - - - None. - - - Removes the 'tuf.log' console handler, added by default when - 'tuf.repository_tool.py' is imported. - - - None. - """ - - log.remove_console_handler() - - - -def keys_to_keydict(keys): - """ - - Iterate over a list of keys and return a list of keyids and a dict mapping - keyid to key metadata - - - keys: - A list of key objects conforming to - securesystemslib.formats.ANYKEYLIST_SCHEMA. - - - keyids: - A list of keyids conforming to securesystemslib.formats.KEYID_SCHEMA - keydict: - A dictionary conforming to securesystemslib.formats.KEYDICT_SCHEMA - """ - keyids = [] - keydict = {} - - for key in keys: - keyid = key['keyid'] - key_metadata_format = sslib_keys.format_keyval_to_metadata( - key['keytype'], key['scheme'], key['keyval']) - - new_keydict = {keyid: key_metadata_format} - keydict.update(new_keydict) - keyids.append(keyid) - return keyids, keydict - - - - -if __name__ == '__main__': - # The interactive sessions of the documentation strings can - # be tested by running repository_lib.py as a standalone module: - # $ python repository_lib.py. - import doctest - doctest.testmod() diff --git a/tuf/repository_tool.py b/tuf/repository_tool.py deleted file mode 100755 index af78b2ba32..0000000000 --- a/tuf/repository_tool.py +++ /dev/null @@ -1,3291 +0,0 @@ - -#!/usr/bin/env python - -# Copyright 2013 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - repository_tool.py - - - Vladimir Diaz - - - October 19, 2013 - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Provide a tool that can create a TUF repository. It can be used with the - Python interpreter in interactive mode, or imported directly into a Python - module. See 'tuf/README' for the complete guide to using - 'tuf.repository_tool.py'. -""" - -import os -import time -import datetime -import logging -import tempfile -import shutil -import json - -from collections import deque - -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import util as sslib_util -from securesystemslib import storage as sslib_storage - -from tuf import exceptions -from tuf import formats -from tuf import keydb -from tuf import log -from tuf import repository_lib as repo_lib -from tuf import roledb - - -# Copy API -# pylint: disable=unused-import - -# Copy generic repository API functions to be used via `repository_tool` -from tuf.repository_lib import ( - create_tuf_client_directory, - disable_console_log_messages) - - -# Copy key-related API functions to be used via `repository_tool` -from tuf.repository_lib import ( - import_rsa_privatekey_from_file, - import_ed25519_privatekey_from_file) - -from securesystemslib.interface import ( - generate_and_write_rsa_keypair, - generate_and_write_rsa_keypair_with_prompt, - generate_and_write_unencrypted_rsa_keypair, - generate_and_write_ecdsa_keypair, - generate_and_write_ecdsa_keypair_with_prompt, - generate_and_write_unencrypted_ecdsa_keypair, - generate_and_write_ed25519_keypair, - generate_and_write_ed25519_keypair_with_prompt, - generate_and_write_unencrypted_ed25519_keypair, - import_rsa_publickey_from_file, - import_ecdsa_publickey_from_file, - import_ed25519_publickey_from_file, - import_ecdsa_privatekey_from_file) - -from securesystemslib.keys import ( - format_metadata_to_key, - generate_rsa_key, - generate_ecdsa_key, - generate_ed25519_key, - import_rsakey_from_pem, - import_ecdsakey_from_pem) - - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - -# Add a console handler so that users are aware of potentially unintended -# states, such as multiple roles that share keys. -log.add_console_handler() -log.set_console_log_level(logging.INFO) - -# Recommended RSA key sizes: -# https://en.wikipedia.org/wiki/Key_size#Asymmetric_algorithm_key_lengths -# Based on the above, RSA keys of size 3072 are expected to provide security -# through 2031 and beyond. -DEFAULT_RSA_KEY_BITS=3072 - -# The default number of hashed bin delegations -DEFAULT_NUM_BINS=1024 - -# The targets and metadata directory names. Metadata files are written -# to the staged metadata directory instead of the "live" one. -METADATA_STAGED_DIRECTORY_NAME = 'metadata.staged' -METADATA_DIRECTORY_NAME = 'metadata' -TARGETS_DIRECTORY_NAME = 'targets' - -# The extension of TUF metadata. -METADATA_EXTENSION = '.json' - -# Expiration date delta, in seconds, of the top-level roles. A metadata -# expiration date is set by taking the current time and adding the expiration -# seconds listed below. - -# Initial 'root.json' expiration time of 1 year. -ROOT_EXPIRATION = 31556900 - -# Initial 'targets.json' expiration time of 3 months. -TARGETS_EXPIRATION = 7889230 - -# Initial 'snapshot.json' expiration time of 1 week. -SNAPSHOT_EXPIRATION = 604800 - -# Initial 'timestamp.json' expiration time of 1 day. -TIMESTAMP_EXPIRATION = 86400 - - -class Repository(object): - """ - - Represent a TUF repository that contains the metadata of the top-level - roles, including all those delegated from the 'targets.json' role. The - repository object returned provides access to the top-level roles, and any - delegated targets that are added as the repository is modified. For - example, a Repository object named 'repository' provides the following - access by default: - - repository.root.version = 2 - repository.timestamp.expiration = datetime.datetime(2015, 8, 8, 12, 0) - repository.snapshot.add_verification_key(...) - repository.targets.delegate('unclaimed', ...) - - Delegating a role from 'targets' updates the attributes of the parent - delegation, which then provides: - - repository.targets('unclaimed').add_verification_key(...) - - - - repository_directory: - The root folder of the repository that contains the metadata and targets - sub-directories. - - metadata_directory: - The metadata sub-directory contains the files of the top-level - roles, including all roles delegated from 'targets.json'. - - targets_directory: - The targets sub-directory contains all the target files that are - downloaded by clients and are referenced in TUF Metadata. The hashes and - file lengths are listed in Metadata files so that they are securely - downloaded. Metadata files are similarly referenced in the top-level - metadata. - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - use_timestamp_length: - Whether to include the optional length attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - use_timestamp_hashes: - Whether to include the optional hashes attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - use_snapshot_length: - Whether to include the optional length attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - use_snapshot_hashes: - Whether to include the optional hashes attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - - Creates top-level role objects and assigns them as attributes. - - - A Repository object that contains default Metadata objects for the top-level - roles. - """ - - def __init__(self, repository_directory, metadata_directory, - targets_directory, storage_backend, repository_name='default', - use_timestamp_length=True, use_timestamp_hashes=True, - use_snapshot_length=False, use_snapshot_hashes=False): - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.PATH_SCHEMA.check_match(repository_directory) - sslib_formats.PATH_SCHEMA.check_match(metadata_directory) - sslib_formats.PATH_SCHEMA.check_match(targets_directory) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_timestamp_length) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_timestamp_hashes) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_snapshot_length) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_snapshot_hashes) - - self._repository_directory = repository_directory - self._metadata_directory = metadata_directory - self._targets_directory = targets_directory - self._repository_name = repository_name - self._storage_backend = storage_backend - self._use_timestamp_length = use_timestamp_length - self._use_timestamp_hashes = use_timestamp_hashes - self._use_snapshot_length = use_snapshot_length - self._use_snapshot_hashes = use_snapshot_hashes - - try: - roledb.create_roledb(repository_name) - keydb.create_keydb(repository_name) - - except sslib_exceptions.InvalidNameError: - logger.debug(repr(repository_name) + ' already exists. Overwriting' - ' its contents.') - - # Set the top-level role objects. - self.root = Root(self._repository_name) - self.snapshot = Snapshot(self._repository_name) - self.timestamp = Timestamp(self._repository_name) - self.targets = Targets(self._targets_directory, 'targets', - repository_name=self._repository_name) - - - - def writeall(self, consistent_snapshot=False, use_existing_fileinfo=False): - """ - - Write all the JSON Metadata objects to their corresponding files for - roles which have changed. - writeall() raises an exception if any of the role metadata to be written - to disk is invalid, such as an insufficient threshold of signatures, - missing private keys, etc. - - - consistent_snapshot: - A boolean indicating whether role metadata files should have their - version numbers as filename prefix when written to disk, i.e - 'VERSION.ROLENAME.json', and target files should be copied to a - filename that has their hex digest as filename prefix, i.e - 'HASH.FILENAME'. Note that: - - root metadata is always written with a version prefix, independently - of 'consistent_snapshot' - - the latest version of each metadata file is always also written - without version prefix - - target files are only copied to a hash-prefixed filename if - 'consistent_snapshot' is True and 'use_existing_fileinfo' is False. - If both are True hash-prefixed target file copies must be created - out-of-band. - - use_existing_fileinfo: - Boolean indicating whether the fileinfo dicts in the roledb should be - written as-is (True) or whether hashes should be generated (False, - requires access to the targets files on-disk). - - - tuf.exceptions.UnsignedMetadataError, if any of the top-level - and delegated roles do not have the minimum threshold of signatures. - - - Creates metadata files in the repository's metadata directory. - - - None. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly - # formatted. - sslib_formats.BOOLEAN_SCHEMA.check_match(consistent_snapshot) - - # At this point, keydb and roledb must be fully populated, - # otherwise writeall() throws a 'tuf.exceptions.UnsignedMetadataError' for - # the top-level roles. exception if any of the top-level roles are missing - # signatures, keys, etc. - - # Write the metadata files of all the Targets roles that are dirty (i.e., - # have been modified via roledb.update_roleinfo()). - filenames = {'root': os.path.join(self._metadata_directory, - repo_lib.ROOT_FILENAME), 'targets': os.path.join(self._metadata_directory, - repo_lib.TARGETS_FILENAME), 'snapshot': os.path.join(self._metadata_directory, - repo_lib.SNAPSHOT_FILENAME), 'timestamp': os.path.join(self._metadata_directory, - repo_lib.TIMESTAMP_FILENAME)} - - snapshot_signable = None - dirty_rolenames = roledb.get_dirty_roles(self._repository_name) - - for dirty_rolename in dirty_rolenames: - - # Ignore top-level roles, they will be generated later in this method. - if dirty_rolename in roledb.TOP_LEVEL_ROLES: - continue - - dirty_filename = os.path.join(self._metadata_directory, - dirty_rolename + METADATA_EXTENSION) - repo_lib._generate_and_write_metadata(dirty_rolename, dirty_filename, - self._targets_directory, self._metadata_directory, - self._storage_backend, consistent_snapshot, filenames, - repository_name=self._repository_name, - use_existing_fileinfo=use_existing_fileinfo) - - # Metadata should be written in (delegated targets -> root -> targets -> - # snapshot -> timestamp) order. Begin by generating the 'root.json' - # metadata file. _generate_and_write_metadata() raises a - # 'securesystemslib.exceptions.Error' exception if the metadata cannot be - # written. - root_roleinfo = roledb.get_roleinfo('root', self._repository_name) - old_consistent_snapshot = root_roleinfo['consistent_snapshot'] - if 'root' in dirty_rolenames or consistent_snapshot != old_consistent_snapshot: - repo_lib._generate_and_write_metadata('root', filenames['root'], - self._targets_directory, self._metadata_directory, - self._storage_backend, consistent_snapshot, filenames, - repository_name=self._repository_name) - - # Generate the 'targets.json' metadata file. - if 'targets' in dirty_rolenames: - repo_lib._generate_and_write_metadata('targets', filenames['targets'], - self._targets_directory, self._metadata_directory, - self._storage_backend, consistent_snapshot, - repository_name=self._repository_name, - use_existing_fileinfo=use_existing_fileinfo) - - # Generate the 'snapshot.json' metadata file. - if 'snapshot' in dirty_rolenames: - snapshot_signable, junk = repo_lib._generate_and_write_metadata('snapshot', - filenames['snapshot'], self._targets_directory, - self._metadata_directory, self._storage_backend, - consistent_snapshot, filenames, - repository_name=self._repository_name, - use_snapshot_length=self._use_snapshot_length, - use_snapshot_hashes=self._use_snapshot_hashes) - - # Generate the 'timestamp.json' metadata file. - if 'timestamp' in dirty_rolenames: - repo_lib._generate_and_write_metadata('timestamp', filenames['timestamp'], - self._targets_directory, self._metadata_directory, - self._storage_backend, consistent_snapshot, - filenames, repository_name=self._repository_name, - use_timestamp_length=self._use_timestamp_length, - use_timestamp_hashes=self._use_timestamp_hashes) - - roledb.unmark_dirty(dirty_rolenames, self._repository_name) - - # Delete the metadata of roles no longer in 'roledb'. Obsolete roles - # may have been revoked and should no longer have their metadata files - # available on disk, otherwise loading a repository may unintentionally - # load them. - if snapshot_signable is not None: - repo_lib._delete_obsolete_metadata(self._metadata_directory, - snapshot_signable['signed'], consistent_snapshot, self._repository_name, - self._storage_backend) - - - - def write(self, rolename, consistent_snapshot=False, increment_version_number=True, - use_existing_fileinfo=False): - """ - - Write the JSON metadata for 'rolename' to its corresponding file on disk. - Unlike writeall(), write() allows the metadata file to contain an invalid - threshold of signatures. - - - rolename: - The name of the role to be written to disk. - - consistent_snapshot: - A boolean indicating whether the role metadata file should have its - version number as filename prefix when written to disk, i.e - 'VERSION.ROLENAME.json'. Note that: - - root metadata is always written with a version prefix, independently - of 'consistent_snapshot' - - the latest version of the metadata file is always also written - without version prefix - - if the metadata is targets metadata and 'consistent_snapshot' is - True, the corresponding target files are copied to a filename with - their hex digest as filename prefix, i.e 'HASH.FILENAME', unless - 'use_existing_fileinfo' is also True. - If 'consistent_snapshot' and 'use_existing_fileinfo' both are True, - hash-prefixed target file copies must be created out-of-band. - - increment_version_number: - Boolean indicating whether the version number of 'rolename' should be - automatically incremented. - - use_existing_fileinfo: - Boolean indicating whether the fileinfo dicts in the roledb should be - written as-is (True) or whether hashes should be generated (False, - requires access to the targets files on-disk). - - - None. - - - Creates metadata files in the repository's metadata directory. - - - None. - """ - - rolename_filename = os.path.join(self._metadata_directory, - rolename + METADATA_EXTENSION) - - filenames = {'root': os.path.join(self._metadata_directory, repo_lib.ROOT_FILENAME), - 'targets': os.path.join(self._metadata_directory, repo_lib.TARGETS_FILENAME), - 'snapshot': os.path.join(self._metadata_directory, repo_lib.SNAPSHOT_FILENAME), - 'timestamp': os.path.join(self._metadata_directory, repo_lib.TIMESTAMP_FILENAME)} - - repo_lib._generate_and_write_metadata(rolename, rolename_filename, - self._targets_directory, self._metadata_directory, - self._storage_backend, consistent_snapshot, - filenames=filenames, allow_partially_signed=True, - increment_version_number=increment_version_number, - repository_name=self._repository_name, - use_existing_fileinfo=use_existing_fileinfo) - - # Ensure 'rolename' is no longer marked as dirty after the successful write(). - roledb.unmark_dirty([rolename], self._repository_name) - - - - - - def status(self): - """ - - Determine the status of the top-level roles. status() checks if each - role provides sufficient public and private keys, signatures, and that a - valid metadata file is generated if writeall() or write() were to be - called. Metadata files are temporarily written so that file hashes and - lengths may be verified, determine if delegated role trust is fully - obeyed, and target paths valid according to parent roles. status() does - not do a simple check for number of threshold keys and signatures. - - - None. - - - None. - - - Generates and writes temporary metadata files. - - - None. - """ - - temp_repository_directory = None - - # Generate and write temporary metadata so that full verification of - # metadata is possible, such as verifying signatures, digests, and file - # content. Ensure temporary files are removed after verification results - # are completed. - try: - temp_repository_directory = tempfile.mkdtemp() - targets_directory = self._targets_directory - metadata_directory = os.path.join(temp_repository_directory, - METADATA_STAGED_DIRECTORY_NAME) - os.mkdir(metadata_directory) - - # Verify the top-level roles and log the results. - repo_lib._log_status_of_top_level_roles(targets_directory, - metadata_directory, self._repository_name, self._storage_backend) - - finally: - shutil.rmtree(temp_repository_directory, ignore_errors=True) - - - - def dirty_roles(self): - """ - - Print/log the roles that have been modified. For example, if some role's - version number is changed (repository.timestamp.version = 2), it is - considered dirty and will be included in the list of dirty roles - printed/logged here. Unlike status(), signatures, public keys, targets, - etc. are not verified. status() should be called instead if the caller - would like to verify if a valid role file is generated if writeall() were - to be called. - - - None. - - - None. - - - None. - - - None. - """ - - logger.info('Dirty roles: ' + str(roledb.get_dirty_roles(self._repository_name))) - - - - def mark_dirty(self, roles): - """ - - Mark the list of 'roles' as dirty. - - - roles: - A list of roles to mark as dirty. on the next write, these roles - will be written to disk. - - - None. - - - None. - - - None. - """ - - roledb.mark_dirty(roles, self._repository_name) - - - - def unmark_dirty(self, roles): - """ - - No longer mark the list of 'roles' as dirty. - - - roles: - A list of roles to mark as dirty. on the next write, these roles - will be written to disk. - - - None. - - - None. - - - None. - """ - - roledb.unmark_dirty(roles, self._repository_name) - - - - @staticmethod - def get_filepaths_in_directory(files_directory, recursive_walk=False, - followlinks=True): - """ - - Walk the given 'files_directory' and build a list of target files found. - - - files_directory: - The path to a directory of target files. - - recursive_walk: - To recursively walk the directory, set recursive_walk=True. - - followlinks: - To follow symbolic links, set followlinks=True. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - securesystemslib.exceptions.Error, if 'file_directory' is not a valid - directory. - - Python IO exceptions. - - - None. - - - A list of absolute paths to target files in the given 'files_directory'. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.PATH_SCHEMA.check_match(files_directory) - sslib_formats.BOOLEAN_SCHEMA.check_match(recursive_walk) - sslib_formats.BOOLEAN_SCHEMA.check_match(followlinks) - - # Ensure a valid directory is given. - if not os.path.isdir(files_directory): - raise sslib_exceptions.Error(repr(files_directory) + ' is not' - ' a directory.') - - # A list of the target filepaths found in 'files_directory'. - targets = [] - - # FIXME: We need a way to tell Python 2, but not Python 3, to return - # filenames in Unicode; see #61 and: - # http://docs.python.org/howto/unicode.html#unicode-filenames - for dirpath, dirnames, filenames in os.walk(files_directory, - followlinks=followlinks): - for filename in filenames: - full_target_path = os.path.join(os.path.abspath(dirpath), filename) - targets.append(full_target_path) - - # Prune the subdirectories to walk right now if we do not wish to - # recursively walk 'files_directory'. - if recursive_walk is False: - del dirnames[:] - - else: - logger.debug('Not pruning subdirectories ' + repr(dirnames)) - - return targets - - - - - -class Metadata(object): - """ - - Provide a base class to represent a TUF Metadata role. There are four - top-level roles: Root, Targets, Snapshot, and Timestamp. The Metadata - class provides methods that are needed by all top-level roles, such as - adding and removing public keys, private keys, and signatures. Metadata - attributes, such as rolename, version, threshold, expiration, and key list - are also provided by the Metadata base class. - - - None. - - - None. - - - None. - - - None. - """ - - def __init__(self): - self._rolename = None - self._repository_name = None - - - def add_verification_key(self, key, expires=None): - """ - - Add 'key' to the role. Adding a key, which should contain only the - public portion, signifies the corresponding private key and signatures - the role is expected to provide. A threshold of signatures is required - for a role to be considered properly signed. If a metadata file contains - an insufficient threshold of signatures, it must not be accepted. - - >>> - >>> - >>> - - - key: - The role key to be added, conformant to - 'securesystemslib.formats.ANYKEY_SCHEMA'. Adding a public key to a role - means that its corresponding private key must generate and add its - signature to the role. A threshold number of signatures is required - for a role to be fully signed. - - expires: - The date in which 'key' expires. 'expires' is a datetime.datetime() - object. - - - securesystemslib.exceptions.FormatError, if any of the arguments are - improperly formatted. - - securesystemslib.exceptions.Error, if the 'expires' datetime has already - expired. - - - The role's entries in 'keydb' and 'roledb' are updated. - - - None. - """ - - # Does 'key' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.ANYKEY_SCHEMA.check_match(key) - - # If 'expires' is unset, choose a default expiration for 'key'. By - # default, Root, Targets, Snapshot, and Timestamp keys are set to expire - # 1 year, 3 months, 1 week, and 1 day from the current time, respectively. - if expires is None: - if self.rolename == 'root': - expires = \ - formats.unix_timestamp_to_datetime(int(time.time() + ROOT_EXPIRATION)) - - elif self.rolename == 'Targets': - expires = \ - formats.unix_timestamp_to_datetime(int(time.time() + TARGETS_EXPIRATION)) - - elif self.rolename == 'Snapshot': - expires = \ - formats.unix_timestamp_to_datetime(int(time.time() + SNAPSHOT_EXPIRATION)) - - elif self.rolename == 'Timestamp': - expires = \ - formats.unix_timestamp_to_datetime(int(time.time() + TIMESTAMP_EXPIRATION)) - - else: - expires = \ - formats.unix_timestamp_to_datetime(int(time.time() + TIMESTAMP_EXPIRATION)) - - # Is 'expires' a datetime.datetime() object? - # Raise 'securesystemslib.exceptions.FormatError' if not. - if not isinstance(expires, datetime.datetime): - raise sslib_exceptions.FormatError(repr(expires) + ' is not a' - ' datetime.datetime() object.') - - # Truncate the microseconds value to produce a correct schema string - # of the form 'yyyy-mm-ddThh:mm:ssZ'. - expires = expires.replace(microsecond = 0) - - # Ensure the expiration has not already passed. - current_datetime = \ - formats.unix_timestamp_to_datetime(int(time.time())) - - if expires < current_datetime: - raise sslib_exceptions.Error(repr(key) + ' has already' - ' expired.') - - # Update the key's 'expires' entry. - expires = expires.isoformat() + 'Z' - key['expires'] = expires - - # Ensure 'key', which should contain the public portion, is added to - # 'keydb'. Add 'key' to the list of recognized keys. - # Keys may be shared, so do not raise an exception if 'key' has already - # been loaded. - try: - keydb.add_key(key, repository_name=self._repository_name) - - except exceptions.KeyAlreadyExistsError: - logger.warning('Adding a verification key that has already been used.') - - keyid = key['keyid'] - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - - # Save the keyids that are being replaced since certain roles will need to - # re-sign metadata with these keys (e.g., root). Use list() to make a copy - # of roleinfo['keyids'] to ensure we're modifying distinct lists. - previous_keyids = list(roleinfo['keyids']) - - # Add 'key' to the role's entry in 'roledb', and avoid duplicates. - if keyid not in roleinfo['keyids']: - roleinfo['keyids'].append(keyid) - roleinfo['previous_keyids'] = previous_keyids - - roledb.update_roleinfo(self._rolename, roleinfo, - repository_name=self._repository_name) - - - - def remove_verification_key(self, key): - """ - - Remove 'key' from the role's currently recognized list of role keys. - The role expects a threshold number of signatures. - - >>> - >>> - >>> - - - key: - The role's key, conformant to 'securesystemslib.formats.ANYKEY_SCHEMA'. - 'key' should contain only the public portion, as only the public key is - needed. The 'add_verification_key()' method should have previously - added 'key'. - - - securesystemslib.exceptions.FormatError, if the 'key' argument is - improperly formatted. - - securesystemslib.exceptions.Error, if the 'key' argument has not been - previously added. - - - Updates the role's 'roledb' entry. - - - None. - """ - - # Does 'key' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.ANYKEY_SCHEMA.check_match(key) - - keyid = key['keyid'] - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - - if keyid in roleinfo['keyids']: - roleinfo['keyids'].remove(keyid) - - roledb.update_roleinfo(self._rolename, roleinfo, - repository_name=self._repository_name) - - else: - raise sslib_exceptions.Error('Verification key not found.') - - - - def load_signing_key(self, key): - """ - - Load the role key, which must contain the private portion, so that role - signatures may be generated when the role's metadata file is eventually - written to disk. - - >>> - >>> - >>> - - - key: - The role's key, conformant to 'securesystemslib.formats.ANYKEY_SCHEMA'. - It must contain the private key, so that role signatures may be - generated when writeall() or write() is eventually called to generate - valid metadata files. - - - securesystemslib.exceptions.FormatError, if 'key' is improperly formatted. - - securesystemslib.exceptions.Error, if the private key is not found in 'key'. - - - Updates the role's 'keydb' and 'roledb' entries. - - - None. - """ - - # Does 'key' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.ANYKEY_SCHEMA.check_match(key) - - # Ensure the private portion of the key is available, otherwise signatures - # cannot be generated when the metadata file is written to disk. - if 'private' not in key['keyval'] or not len(key['keyval']['private']): - raise sslib_exceptions.Error('This is not a private key.') - - # Has the key, with the private portion included, been added to the keydb? - # The public version of the key may have been previously added. - try: - keydb.add_key(key, repository_name=self._repository_name) - - except exceptions.KeyAlreadyExistsError: - keydb.remove_key(key['keyid'], self._repository_name) - keydb.add_key(key, repository_name=self._repository_name) - - # Update the role's 'signing_keys' field in 'roledb'. - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - if key['keyid'] not in roleinfo['signing_keyids']: - roleinfo['signing_keyids'].append(key['keyid']) - - roledb.update_roleinfo(self.rolename, roleinfo, - repository_name=self._repository_name) - - - - def unload_signing_key(self, key): - """ - - Remove a previously loaded role private key (i.e., load_signing_key()). - The keyid of the 'key' is removed from the list of recognized signing - keys. - - >>> - >>> - >>> - - - key: - The role key to be unloaded, conformant to - 'securesystemslib.formats.ANYKEY_SCHEMA'. - - - securesystemslib.exceptions.FormatError, if the 'key' argument is - improperly formatted. - - securesystemslib.exceptions.Error, if the 'key' argument has not been - previously loaded. - - - Updates the signing keys of the role in 'roledb'. - - - None. - """ - - # Does 'key' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.ANYKEY_SCHEMA.check_match(key) - - # Update the role's 'signing_keys' field in 'roledb'. - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - - # TODO: Should we consider removing keys from keydb that are no longer - # associated with any roles? There could be many no-longer-used keys - # stored in the keydb if not. For now, just unload the key. - if key['keyid'] in roleinfo['signing_keyids']: - roleinfo['signing_keyids'].remove(key['keyid']) - - roledb.update_roleinfo(self.rolename, roleinfo, - repository_name=self._repository_name) - - else: - raise sslib_exceptions.Error('Signing key not found.') - - - - def add_signature(self, signature, mark_role_as_dirty=True): - """ - - Add a signature to the role. A role is considered fully signed if it - contains a threshold of signatures. The 'signature' should have been - generated by the private key corresponding to one of the role's expected - keys. - - >>> - >>> - >>> - - - signature: - The signature to be added to the role, conformant to - 'securesystemslib.formats.SIGNATURE_SCHEMA'. - - mark_role_as_dirty: - A boolean indicating whether the updated 'roleinfo' for 'rolename' - should be marked as dirty. The caller might not want to mark - 'rolename' as dirty if it is loading metadata from disk and only wants - to populate roledb.py. Likewise, add_role() would support a similar - boolean to allow the repository tools to successfully load roles via - load_repository() without needing to mark these roles as dirty (default - behavior). - - - securesystemslib.exceptions.FormatError, if the 'signature' argument is - improperly formatted. - - - Adds 'signature', if not already added, to the role's 'signatures' field - in 'roledb'. - - - None. - """ - - # Does 'signature' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.SIGNATURE_SCHEMA.check_match(signature) - sslib_formats.BOOLEAN_SCHEMA.check_match(mark_role_as_dirty) - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - - # Ensure the roleinfo contains a 'signatures' field. - if 'signatures' not in roleinfo: - roleinfo['signatures'] = [] - - # Update the role's roleinfo by adding 'signature', if it has not been - # added. - if signature not in roleinfo['signatures']: - roleinfo['signatures'].append(signature) - roledb.update_roleinfo(self.rolename, roleinfo, mark_role_as_dirty, - repository_name=self._repository_name) - - else: - logger.debug('Signature already exists for role: ' + repr(self.rolename)) - - - - def remove_signature(self, signature): - """ - - Remove a previously loaded, or added, role 'signature'. A role must - contain a threshold number of signatures to be considered fully signed. - - >>> - >>> - >>> - - - signature: - The role signature to remove, conformant to - 'securesystemslib.formats.SIGNATURE_SCHEMA'. - - - securesystemslib.exceptions.FormatError, if the 'signature' argument is - improperly formatted. - - securesystemslib.exceptions.Error, if 'signature' has not been previously - added to this role. - - - Updates the 'signatures' field of the role in 'roledb'. - - - None. - """ - - # Does 'signature' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.SIGNATURE_SCHEMA.check_match(signature) - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - - if signature in roleinfo['signatures']: - roleinfo['signatures'].remove(signature) - - roledb.update_roleinfo(self.rolename, roleinfo, - repository_name=self._repository_name) - - else: - raise sslib_exceptions.Error('Signature not found.') - - - - @property - def signatures(self): - """ - - A getter method that returns the role's signatures. A role is considered - fully signed if it contains a threshold number of signatures, where each - signature must be provided by the generated by the private key. Keys - are added to a role with the add_verification_key() method. - - - None. - - - None. - - - None. - - - A list of signatures, conformant to - 'securesystemslib.formats.SIGNATURES_SCHEMA'. - """ - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - signatures = roleinfo['signatures'] - - return signatures - - - - @property - def keys(self): - """ - - A getter method that returns the role's keyids of the keys. The role - is expected to eventually contain a threshold of signatures generated - by the private keys of each of the role's keys (returned here as a keyid.) - - - None. - - - None. - - - None. - - - A list of the role's keyids (i.e., keyids of the keys). - """ - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - keyids = roleinfo['keyids'] - - return keyids - - - - @property - def rolename(self): - """ - - Return the role's name. - Examples: 'root', 'timestamp', 'targets/unclaimed/django'. - - - None. - - - None. - - - None. - - - The role's name, conformant to 'tuf.formats.ROLENAME_SCHEMA'. - Examples: 'root', 'timestamp', 'targets/unclaimed/django'. - """ - - return self._rolename - - - - @property - def version(self): - """ - - A getter method that returns the role's version number, conformant to - 'tuf.formats.VERSION_SCHEMA'. - - - None. - - - None. - - - None. - - - The role's version number, conformant to - 'tuf.formats.VERSION_SCHEMA'. - """ - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - version = roleinfo['version'] - - return version - - - - @version.setter - def version(self, version): - """ - - A setter method that updates the role's version number. TUF clients - download new metadata with version number greater than the version - currently trusted. New metadata start at version 1 when either write() - or write_partial() is called. Version numbers are automatically - incremented, when the write methods are called, as follows: - - 1. write_partial==True and the metadata is the first to be written. - - 2. write_partial=False (i.e., write()), the metadata was not loaded as - partially written, and a write_partial is not needed. - - >>> - >>> - >>> - - - version: - The role's version number, conformant to - 'tuf.formats.VERSION_SCHEMA'. - - - securesystemslib.exceptions.FormatError, if the 'version' argument is - improperly formatted. - - - Modifies the 'version' attribute of the Repository object and updates the - role's version in 'roledb'. - - - None. - """ - - # Does 'version' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - formats.METADATAVERSION_SCHEMA.check_match(version) - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - roleinfo['version'] = version - - roledb.update_roleinfo(self._rolename, roleinfo, - repository_name=self._repository_name) - - - - @property - def threshold(self): - """ - - Return the role's threshold value. A role is considered fully signed if - a threshold number of signatures is available. - - - None. - - - None. - - - None. - - - The role's threshold value, conformant to - 'tuf.formats.THRESHOLD_SCHEMA'. - """ - - roleinfo = roledb.get_roleinfo(self._rolename, self._repository_name) - threshold = roleinfo['threshold'] - - return threshold - - - - @threshold.setter - def threshold(self, threshold): - """ - - A setter method that modified the threshold value of the role. Metadata - is considered fully signed if a 'threshold' number of signatures is - available. - - >>> - >>> - >>> - - - threshold: - An integer value that sets the role's threshold value, or the minimum - number of signatures needed for metadata to be considered fully - signed. Conformant to 'tuf.formats.THRESHOLD_SCHEMA'. - - - securesystemslib.exceptions.FormatError, if the 'threshold' argument is - improperly formatted. - - - Modifies the threshold attribute of the Repository object and updates - the roles threshold in 'roledb'. - - - None. - """ - - # Does 'threshold' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - formats.THRESHOLD_SCHEMA.check_match(threshold) - - roleinfo = roledb.get_roleinfo(self._rolename, self._repository_name) - roleinfo['previous_threshold'] = roleinfo['threshold'] - roleinfo['threshold'] = threshold - - roledb.update_roleinfo(self._rolename, roleinfo, - repository_name=self._repository_name) - - - @property - def expiration(self): - """ - - A getter method that returns the role's expiration datetime. - - - None. - - - securesystemslib.exceptions.FormatError, if the expiration cannot be - parsed correctly - - - None. - - - The role's expiration datetime, a datetime.datetime() object. - """ - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - expires = roleinfo['expires'] - - return formats.expiry_string_to_datetime(expires) - - - - @expiration.setter - def expiration(self, datetime_object): - """ - - A setter method for the role's expiration datetime. The top-level - roles have a default expiration (e.g., ROOT_EXPIRATION), but may later - be modified by this setter method. - - >>> - >>> - >>> - - - datetime_object: - The datetime expiration of the role, a datetime.datetime() object. - - - securesystemslib.exceptions.FormatError, if 'datetime_object' is not a - datetime.datetime() object. - - securesystemslib.exceptions.Error, if 'datetime_object' has already - expired. - - - Modifies the expiration attribute of the Repository object. - The datetime given will be truncated to microseconds = 0 - - - None. - """ - - # Is 'datetime_object' a datetime.datetime() object? - # Raise 'securesystemslib.exceptions.FormatError' if not. - if not isinstance(datetime_object, datetime.datetime): - raise sslib_exceptions.FormatError( - repr(datetime_object) + ' is not a datetime.datetime() object.') - - # truncate the microseconds value to produce a correct schema string - # of the form yyyy-mm-ddThh:mm:ssZ - datetime_object = datetime_object.replace(microsecond = 0) - - # Ensure the expiration has not already passed. - current_datetime_object = \ - formats.unix_timestamp_to_datetime(int(time.time())) - - if datetime_object < current_datetime_object: - raise sslib_exceptions.Error(repr(self.rolename) + ' has' - ' already expired.') - - # Update the role's 'expires' entry in 'roledb'. - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - expires = datetime_object.isoformat() + 'Z' - roleinfo['expires'] = expires - - roledb.update_roleinfo(self.rolename, roleinfo, - repository_name=self._repository_name) - - - - @property - def signing_keys(self): - """ - - A getter method that returns a list of the role's signing keys. - - >>> - >>> - >>> - - - None. - - - None. - - - None. - - - A list of keyids of the role's signing keys, conformant to - 'securesystemslib.formats.KEYIDS_SCHEMA'. - """ - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - signing_keyids = roleinfo['signing_keyids'] - - return signing_keyids - - - - - -class Root(Metadata): - """ - - Represent a Root role object. The root role is responsible for - listing the public keys and threshold of all the top-level roles, including - itself. Top-level metadata is rejected if it does not comply with what is - specified by the Root role. - - This Root object sub-classes Metadata, so the expected Metadata - operations like adding/removing public keys, signatures, private keys, and - updating metadata attributes (e.g., version and expiration) is supported. - Since Root is a top-level role and must exist, a default Root object - is instantiated when a new Repository object is created. - - >>> - >>> - >>> - - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - tuf.exceptions.FormatError, if the argument is improperly formatted. - - - A 'root' role is added to 'roledb'. - - - None. - """ - - def __init__(self, repository_name): - - super(Root, self).__init__() - - self._rolename = 'root' - self._repository_name = repository_name - - # Is 'repository_name' properly formatted? Otherwise, raise a - # tuf.exceptions.FormatError exception. - formats.ROLENAME_SCHEMA.check_match(repository_name) - - # By default, 'snapshot' metadata is set to expire 1 week from the current - # time. The expiration may be modified. - expiration = formats.unix_timestamp_to_datetime( - int(time.time() + ROOT_EXPIRATION)) - expiration = expiration.isoformat() + 'Z' - - roleinfo = {'keyids': [], 'signing_keyids': [], 'threshold': 1, - 'signatures': [], 'version': 0, 'consistent_snapshot': False, - 'expires': expiration, 'partial_loaded': False} - try: - roledb.add_role(self._rolename, roleinfo, self._repository_name) - - except exceptions.RoleAlreadyExistsError: - pass - - - - - -class Timestamp(Metadata): - """ - - Represent a Timestamp role object. The timestamp role is responsible for - referencing the latest version of the Snapshot role. Under normal - conditions, it is the only role to be downloaded from a remote repository - without a known file length and hash. An upper length limit is set, though. - Also, its signatures are also verified to be valid according to the Root - role. If invalid metadata can only be downloaded by the client, Root - is the only other role that is downloaded without a known length and hash. - This case may occur if a role's signing keys have been revoked and a newer - Root file is needed to list the updated keys. - - This Timestamp object sub-classes Metadata, so the expected Metadata - operations like adding/removing public keys, signatures, private keys, and - updating metadata attributes (e.g., version and expiration) is supported. - Since Snapshot is a top-level role and must exist, a default Timestamp - object is instantiated when a new Repository object is created. - - >>> - >>> - >>> - - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - tuf.exceptions.FormatError, if the argument is improperly formatted. - - - A 'timestamp' role is added to 'roledb'. - - - None. - """ - - def __init__(self, repository_name): - - super(Timestamp, self).__init__() - - self._rolename = 'timestamp' - self._repository_name = repository_name - - # Is 'repository_name' properly formatted? Otherwise, raise a - # tuf.exceptions.FormatError exception. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # By default, 'root' metadata is set to expire 1 year from the current - # time. The expiration may be modified. - expiration = formats.unix_timestamp_to_datetime( - int(time.time() + TIMESTAMP_EXPIRATION)) - expiration = expiration.isoformat() + 'Z' - - roleinfo = {'keyids': [], 'signing_keyids': [], 'threshold': 1, - 'signatures': [], 'version': 0, 'expires': expiration, - 'partial_loaded': False} - - try: - roledb.add_role(self.rolename, roleinfo, self._repository_name) - - except exceptions.RoleAlreadyExistsError: - pass - - - - - -class Snapshot(Metadata): - """ - - Represent a Snapshot role object. The snapshot role is responsible for - referencing the other top-level roles (excluding Timestamp) and all - delegated roles. - - This Snapshot object sub-classes Metadata, so the expected - Metadata operations like adding/removing public keys, signatures, private - keys, and updating metadata attributes (e.g., version and expiration) is - supported. Since Snapshot is a top-level role and must exist, a default - Snapshot object is instantiated when a new Repository object is created. - - >>> - >>> - >>> - - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - tuf.exceptions.FormatError, if the argument is improperly formatted. - - - A 'snapshot' role is added to 'roledb'. - - - None. - """ - - def __init__(self, repository_name): - - super(Snapshot, self).__init__() - - self._rolename = 'snapshot' - self._repository_name = repository_name - - # Is 'repository_name' properly formatted? Otherwise, raise a - # tuf.exceptions.FormatError exception. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # By default, 'snapshot' metadata is set to expire 1 week from the current - # time. The expiration may be modified. - expiration = formats.unix_timestamp_to_datetime( - int(time.time() + SNAPSHOT_EXPIRATION)) - expiration = expiration.isoformat() + 'Z' - - roleinfo = {'keyids': [], 'signing_keyids': [], 'threshold': 1, - 'signatures': [], 'version': 0, 'expires': expiration, - 'partial_loaded': False} - - try: - roledb.add_role(self._rolename, roleinfo, self._repository_name) - - except exceptions.RoleAlreadyExistsError: - pass - - - - - -class Targets(Metadata): - """ - - Represent a Targets role object. Targets roles include the top-level role - 'targets.json' and all delegated roles (e.g., 'targets/unclaimed/django'). - The expected operations of Targets metadata is included, such as adding - and removing repository target files, making and revoking delegations, and - listing the target files provided by it. - - Adding or removing a delegation causes the attributes of the Targets object - to be updated. That is, if the 'django' Targets object is delegated by - 'targets/unclaimed', a new attribute is added so that the following - code statement is supported: - repository.targets('unclaimed')('django').version = 2 - - Likewise, revoking a delegation causes removal of the delegation attribute. - - This Targets object sub-classes Metadata, so the expected Metadata - operations like adding/removing public keys, signatures, private keys, and - updating metadata attributes (e.g., version and expiration) is supported. - Since Targets is a top-level role and must exist, a default Targets object - (for 'targets.json', not delegated roles) is instantiated when a new - Repository object is created. - - >>> - >>> - >>> - - - targets_directory: - The targets directory of the Repository object. - - rolename: - The rolename of this Targets object. - - roleinfo: - An already populated roleinfo object of 'rolename'. Conformant to - 'tuf.formats.ROLEDB_SCHEMA'. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - - Modifies the roleinfo of the targets role in 'roledb', or creates - a default one named 'targets'. - - - None. - """ - - def __init__(self, targets_directory, rolename='targets', roleinfo=None, - parent_targets_object=None, repository_name='default'): - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.PATH_SCHEMA.check_match(targets_directory) - formats.ROLENAME_SCHEMA.check_match(rolename) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if roleinfo is not None: - formats.ROLEDB_SCHEMA.check_match(roleinfo) - - super(Targets, self).__init__() - self._targets_directory = targets_directory - self._rolename = rolename - self._target_files = [] - self._delegated_roles = {} - self._parent_targets_object = self - self._repository_name = repository_name - - # Keep a reference to the top-level 'targets' object. Any delegated roles - # that may be created, can be added to and accessed via the top-level - # 'targets' object. - if parent_targets_object is not None: - self._parent_targets_object = parent_targets_object - - # By default, Targets objects are set to expire 3 months from the current - # time. May be later modified. - expiration = formats.unix_timestamp_to_datetime( - int(time.time() + TARGETS_EXPIRATION)) - expiration = expiration.isoformat() + 'Z' - - # If 'roleinfo' is not provided, set an initial default. - if roleinfo is None: - roleinfo = {'keyids': [], 'signing_keyids': [], 'threshold': 1, - 'version': 0, 'expires': expiration, - 'signatures': [], 'paths': {}, 'path_hash_prefixes': [], - 'partial_loaded': False, 'delegations': {'keys': {}, - 'roles': []}} - - # Add the new role to the 'roledb'. - try: - roledb.add_role(self.rolename, roleinfo, self._repository_name) - - except exceptions.RoleAlreadyExistsError: - pass - - - - def __call__(self, rolename): - """ - - Allow callable Targets object so that delegated roles may be referenced - by their string rolenames. Rolenames may include characters like '-' and - are not restricted to Python identifiers. - - - rolename: - The rolename of the delegated role. 'rolename' must be a role - previously delegated by this Targets role. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - tuf.exceptions.UnknownRoleError, if 'rolename' has not been - delegated by this Targets object. - - - Modifies the roleinfo of the targets role in 'roledb'. - - - The Targets object of 'rolename'. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - formats.ROLENAME_SCHEMA.check_match(rolename) - - if rolename in self._delegated_roles: - return self._delegated_roles[rolename] - - else: - raise exceptions.UnknownRoleError(repr(rolename) + ' has' - ' not been delegated by ' + repr(self.rolename)) - - - - def add_delegated_role(self, rolename, targets_object): - """ - - Add 'targets_object' to this Targets object's list of known delegated - roles. Specifically, delegated Targets roles should call 'super(Targets, - self).add_delegated_role(...)' so that the top-level 'targets' role - contains a dictionary of all the available roles on the repository. - - - rolename: - The rolename of the delegated role. 'rolename' must be a role - previously delegated by this Targets role. - - targets_object: - A Targets() object. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - - Updates the Target object's dictionary of delegated targets. - - - The Targets object of 'rolename'. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - formats.ROLENAME_SCHEMA.check_match(rolename) - - if not isinstance(targets_object, Targets): - raise sslib_exceptions.FormatError(repr(targets_object) + ' is' - ' not a Targets object.') - - - if rolename in self._delegated_roles: - logger.debug(repr(rolename) + ' already exists.') - - else: - self._delegated_roles[rolename] = targets_object - - - - def remove_delegated_role(self, rolename): - """ - Remove 'rolename' from this Targets object's list of delegated roles. - This method does not update roledb and others. - - - rolename: - The rolename of the delegated role to remove. 'rolename' should be a - role previously delegated by this Targets role. - - - securesystemslib.exceptions.FormatError, if the argument is improperly - formatted. - - - Updates the Target object's dictionary of delegated targets. - - - None. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - formats.ROLENAME_SCHEMA.check_match(rolename) - - if rolename not in self._delegated_roles: - logger.debug(repr(rolename) + ' has not been delegated.') - return - - else: - del self._delegated_roles[rolename] - - - - @property - def target_files(self): - """ - - A getter method that returns the target files added thus far to this - Targets object. - - >>> - >>> - >>> - - - None. - - - None. - - - None. - - - None. - """ - - target_files = roledb.get_roleinfo(self._rolename, - self._repository_name)['paths'] - return target_files - - - - def add_paths(self, paths, child_rolename): - """ - - Add 'paths' to the delegated paths of 'child_rolename'. 'paths' can be a - list of either file paths or glob patterns. The updater client verifies - the target paths specified by child roles, and searches for targets by - visiting these delegated paths. A child role may only provide targets - specifically listed in the delegations field of the delegating role, or a - target that matches a delegated path. - - >>> - >>> - >>> - - - paths: - A list of glob patterns, or file paths, that 'child_rolename' is - trusted to provide. - - child_rolename: - The child delegation that requires an update to its delegated or - trusted paths, as listed in the parent role's delegations (e.g., - 'Django' in 'unclaimed'). - - - securesystemslib.exceptions.FormatError, if a path or glob pattern in - 'paths' is not a string, or if 'child_rolename' is not a formatted - rolename. - - securesystemslib.exceptions.Error, if 'child_rolename' has not been - delegated yet. - - tuf.exceptions.InvalidNameError, if any path in 'paths' does not match - pattern. - - - Modifies this Targets' delegations field. - - - None. - """ - - # Do the argument have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATHS_SCHEMA.check_match(paths) - formats.ROLENAME_SCHEMA.check_match(child_rolename) - - # Ensure that 'child_rolename' exists, otherwise it will not have an entry - # in the parent role's delegations field. - if not roledb.role_exists(child_rolename, self._repository_name): - raise sslib_exceptions.Error(repr(child_rolename) + ' does' - ' not exist.') - - for path in paths: - # Check if the delegated paths or glob patterns are relative and use - # forward slash as a separator or raise an exception. Paths' existence - # on the file system is not verified. If the path is incorrect, - # the targetfile won't be matched successfully during a client update. - self._check_path(path) - - # Get the current role's roleinfo, so that its delegations field can be - # updated. - roleinfo = roledb.get_roleinfo(self._rolename, self._repository_name) - - # Update the delegated paths of 'child_rolename' to add relative paths. - for role in roleinfo['delegations']['roles']: - if role['name'] == child_rolename: - for relative_path in paths: - if relative_path not in role['paths']: - role['paths'].append(relative_path) - - else: - logger.debug(repr(relative_path) + ' is already a delegated path.') - else: - logger.debug(repr(role['name']) + ' does not match child rolename.') - - roledb.update_roleinfo(self._rolename, roleinfo, - repository_name=self._repository_name) - - - - def add_target(self, filepath, custom=None, fileinfo=None): - """ - - Add a filepath (must be relative to the repository's targets directory) - to the Targets object. - - If 'filepath' has already been added, it will be replaced with any new - file or 'custom' information. - - >>> - >>> - >>> - - - filepath: - The path of the target file. It must be relative to the repository's - targets directory. - - custom: - An optional dictionary providing additional information about the file. - NOTE: if a custom value is passed, the fileinfo parameter must be None. - This parameter will be deprecated in a future release of tuf, use of - the fileinfo parameter is preferred. - - fileinfo: - An optional fileinfo dictionary, conforming to - tuf.formats.TARGETS_FILEINFO_SCHEMA, providing full information about the - file, i.e: - { 'length': 101, - 'hashes': { 'sha256': '123EDF...' }, - 'custom': { 'permissions': '600'} # optional - } - NOTE: if a custom value is passed, the fileinfo parameter must be None. - - - securesystemslib.exceptions.FormatError, if 'filepath' is improperly - formatted. - - tuf.exceptions.InvalidNameError, if 'filepath' does not match pattern. - - - Adds 'filepath' to this role's list of targets. This role's - 'roledb' entry is also updated. - - - None. - """ - - # Does 'filepath' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.RELPATH_SCHEMA.check_match(filepath) - - if fileinfo and custom: - raise sslib_exceptions.Error("Can only take one of" - " custom or fileinfo, not both.") - - if fileinfo: - formats.TARGETS_FILEINFO_SCHEMA.check_match(fileinfo) - - if custom is None: - custom = {} - else: - formats.CUSTOM_SCHEMA.check_match(custom) - - # Add 'filepath' (i.e., relative to the targets directory) to the role's - # list of targets. 'filepath' will not be verified as an allowed path - # according to some delegating role. Not verifying 'filepath' here allows - # freedom to add targets and parent restrictions in any order, minimize - # the number of times these checks are performed, and allow any role to - # delegate trust of packages to this Targets role. - - # Check if the target is relative and uses forward slash as a separator - # or raise an exception. File's existence on the file system is not - # verified. If the file does not exist relative to the targets directory, - # later calls to write() will fail. - self._check_path(filepath) - - # Update the role's 'roledb' entry and avoid duplicates. - roleinfo = roledb.get_roleinfo(self._rolename, self._repository_name) - - if filepath not in roleinfo['paths']: - logger.debug('Adding new target: ' + repr(filepath)) - - else: - logger.debug('Replacing target: ' + repr(filepath)) - - if fileinfo: - roleinfo['paths'].update({filepath: fileinfo}) - else: - roleinfo['paths'].update({filepath: {'custom': custom}}) - - roledb.update_roleinfo(self._rolename, roleinfo, - repository_name=self._repository_name) - - - - def add_targets(self, list_of_targets): - """ - - Add a list of target filepaths (all relative to 'self.targets_directory'). - This method does not actually create files on the file system. The - list of targets must already exist on disk. - - >>> - >>> - >>> - - - list_of_targets: - A list of target filepaths that are added to the paths of this Targets - object. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - tuf.exceptions.InvalidNameError, if any target in 'list_of_targets' - does not match pattern. - - - This Targets' roleinfo is updated with the paths in 'list_of_targets'. - - - None. - """ - - # Does 'list_of_targets' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.RELPATHS_SCHEMA.check_match(list_of_targets) - - # Ensure the paths in 'list_of_targets' are relative and use forward slash - # as a separator or raise an exception. The paths of 'list_of_targets' - # will be verified as existing and allowed paths according to this Targets - # parent role when write() or writeall() is called. Not verifying - # filepaths here allows the freedom to add targets and parent restrictions - # in any order and minimize the number of times these checks are performed. - for target in list_of_targets: - self._check_path(target) - - # Update this Targets 'roledb' entry. - roleinfo = roledb.get_roleinfo(self._rolename, self._repository_name) - for relative_target in list_of_targets: - if relative_target not in roleinfo['paths']: - logger.debug('Adding new target: ' + repr(relative_target)) - else: - logger.debug('Replacing target: ' + repr(relative_target)) - roleinfo['paths'].update({relative_target: {}}) - - roledb.update_roleinfo(self.rolename, roleinfo, - repository_name=self._repository_name) - - - - def remove_target(self, filepath): - """ - - Remove the target 'filepath' from this Targets' 'paths' field. 'filepath' - is relative to the targets directory. - - >>> - >>> - >>> - - - filepath: - The target to remove from this Targets object, relative to the - repository's targets directory. - - - securesystemslib.exceptions.FormatError, if 'filepath' is improperly - formatted. - - securesystemslib.exceptions.Error, if 'filepath' is not located in the - repository's targets directory, or not found. - - - Modifies this Targets 'roledb' entry. - - - None. - """ - - # Does 'filepath' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.RELPATH_SCHEMA.check_match(filepath) - - # Remove 'relative_filepath', if found, and update this Targets roleinfo. - fileinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - if filepath in fileinfo['paths']: - del fileinfo['paths'][filepath] - roledb.update_roleinfo(self.rolename, fileinfo, - repository_name=self._repository_name) - - else: - raise sslib_exceptions.Error('Target file path not found.') - - - - def clear_targets(self): - """ - - Remove all the target filepaths in the "paths" field of this Targets. - - >>> - >>> - >>> - - - None - - - None. - - - Modifies this Targets' 'roledb' entry. - - - None. - """ - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - roleinfo['paths'] = {} - - roledb.update_roleinfo(self.rolename, roleinfo, - repository_name=self._repository_name) - - - - - - def get_delegated_rolenames(self): - """ - - Return all delegations of a role. If ['a/b/', 'a/b/c/', 'a/b/c/d'] have - been delegated by the delegated role 'django', - repository.targets('django').get_delegated_rolenames() returns: ['a/b', - 'a/b/c', 'a/b/c/d']. - - - None. - - - None. - - - None. - - - A list of rolenames. - """ - - return roledb.get_delegated_rolenames(self.rolename, self._repository_name) - - - - - - def _create_delegated_target(self, rolename, keyids, threshold, paths): - """ - Create a new Targets object for the 'rolename' delegation. An initial - expiration is set (3 months from the current time). - """ - - expiration = formats.unix_timestamp_to_datetime( - int(time.time() + TARGETS_EXPIRATION)) - expiration = expiration.isoformat() + 'Z' - - roleinfo = {'name': rolename, 'keyids': keyids, 'signing_keyids': [], - 'threshold': threshold, 'version': 0, - 'expires': expiration, 'signatures': [], 'partial_loaded': False, - 'paths': paths, 'delegations': {'keys': {}, 'roles': []}} - - # The new targets object is added as an attribute to this Targets object. - new_targets_object = Targets(self._targets_directory, rolename, roleinfo, - parent_targets_object=self._parent_targets_object, - repository_name=self._repository_name) - - return new_targets_object - - - - - - def _update_roledb_delegations(self, keydict, delegations_roleinfo): - """ - Update the roledb to include delegations of the keys in keydict and the - roles in delegations_roleinfo - """ - - current_roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - current_roleinfo['delegations']['keys'].update(keydict) - - for roleinfo in delegations_roleinfo: - current_roleinfo['delegations']['roles'].append(roleinfo) - - roledb.update_roleinfo(self.rolename, current_roleinfo, - repository_name=self._repository_name) - - - - - - def delegate(self, rolename, public_keys, paths, threshold=1, - terminating=False, list_of_targets=None, path_hash_prefixes=None): - """ - - Create a new delegation, where 'rolename' is a child delegation of this - Targets object. The keys and roles database is updated, including the - delegations field of this Targets. The delegation of 'rolename' is added - and accessible (i.e., repository.targets(rolename)). - - Actual metadata files are not created, only when repository.writeall() or - repository.write() is called. - - >>> - >>> - >>> - - - rolename: - The name of the delegated role, as in 'django' or 'unclaimed'. - - public_keys: - A list of TUF key objects in 'ANYKEYLIST_SCHEMA' format. The list - may contain any of the supported key types: RSAKEY_SCHEMA, - ED25519KEY_SCHEMA, etc. - - paths: - The paths, or glob patterns, delegated to 'rolename'. Any targets - added to 'rolename', via add_targets() or 'list_of_targets', must - match one of the paths or glob patterns in 'paths'. Apart from the - public keys of 'rolename', the delegated 'paths' is often known and - specified when a delegation is first performed. If the delegator - is unsure of which 'paths' to delegate, 'paths' can be set to ['']. - - threshold: - The threshold number of keys of 'rolename'. - - terminating: - Boolean that indicates whether this role allows the updater client to - continue searching for targets (target files it is trusted to list but - has not yet specified) in other delegations. If 'terminating' is True - and 'updater.target()' does not find 'example_target.tar.gz' in this - role, a 'tuf.exceptions.UnknownTargetError' exception should be raised. - If 'terminating' is False (default), and 'target/other_role' is also - trusted with 'example_target.tar.gz' and has listed it, - updater.target() should backtrack and return the target file specified - by 'target/other_role'. - - list_of_targets: - A list of target filepaths that are added to 'rolename'. - 'list_of_targets' is a list of target filepaths, can be empty, and each - filepath must be located in the repository's targets directory. The - list of targets should also exist at the specified paths, otherwise - non-existent target paths might not be added when the targets file is - written to disk with writeall() or write(). - - path_hash_prefixes: - A list of hash prefixes in - 'tuf.formats.PATH_HASH_PREFIXES_SCHEMA' format, used in - hashed bin delegations. Targets may be located and stored in hashed - bins by calculating the target path's hash prefix. - - - securesystemslib.exceptions.FormatError, if any of the arguments are - improperly formatted. - - securesystemslib.exceptions.Error, if the delegated role already exists. - - tuf.exceptions.InvalidNameError, if any path in 'paths' or target in - 'list_of_targets' does not match pattern. - - - A new Target object is created for 'rolename' that is accessible to the - caller (i.e., targets.). The 'keydb' and - 'roledb' stores are updated with 'public_keys'. - - - None. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.ROLENAME_SCHEMA.check_match(rolename) - sslib_formats.ANYKEYLIST_SCHEMA.check_match(public_keys) - formats.RELPATHS_SCHEMA.check_match(paths) - formats.THRESHOLD_SCHEMA.check_match(threshold) - sslib_formats.BOOLEAN_SCHEMA.check_match(terminating) - - if list_of_targets is not None: - formats.RELPATHS_SCHEMA.check_match(list_of_targets) - - if path_hash_prefixes is not None: - formats.PATH_HASH_PREFIXES_SCHEMA.check_match(path_hash_prefixes) - - # Keep track of the valid keyids (added to the new Targets object) and - # their keydicts (added to this Targets delegations). - keyids, keydict = repo_lib.keys_to_keydict(public_keys) - - # Ensure the paths of 'list_of_targets' are located in the repository's - # targets directory. - relative_targetpaths = {} - - if list_of_targets: - for target in list_of_targets: - # Check if the target path is relative or raise an exception. File's - # existence on the file system is not verified. If the file does not - # exist relative to the targets directory, later calls to write() - # will fail. - self._check_path(target) - relative_targetpaths.update({target: {}}) - - for path in paths: - # Check if the delegated paths or glob patterns are relative or - # raise an exception. Paths' existence on the file system is not - # verified. If the path is incorrect, the targetfile won't be matched - # successfully during a client update. - self._check_path(path) - - # The new targets object is added as an attribute to this Targets object. - new_targets_object = self._create_delegated_target(rolename, keyids, - threshold, relative_targetpaths) - - # Update the roleinfo of this role. A ROLE_SCHEMA object requires only - # 'keyids', 'threshold', and 'paths'. - roleinfo = {'name': rolename, - 'keyids': keyids, - 'threshold': threshold, - 'terminating': terminating, - 'paths': list(relative_targetpaths.keys())} - - if paths: - roleinfo['paths'] = paths - - if path_hash_prefixes: - roleinfo['path_hash_prefixes'] = path_hash_prefixes - # A role in a delegations must list either 'path_hash_prefixes' - # or 'paths'. - del roleinfo['paths'] - - # Update the public keys of 'new_targets_object'. - for key in public_keys: - new_targets_object.add_verification_key(key) - - # Add the new delegation to the top-level 'targets' role object (i.e., - # 'repository.targets()'). For example, 'django', which was delegated by - # repository.target('claimed'), is added to 'repository.targets('django')). - if self.rolename != 'targets': - self._parent_targets_object.add_delegated_role(rolename, - new_targets_object) - - # Add 'new_targets_object' to the delegating role object (this object). - self.add_delegated_role(rolename, new_targets_object) - - # Update the 'delegations' field of the current role. - self._update_roledb_delegations(keydict, [roleinfo]) - - - - - - def revoke(self, rolename): - """ - - Revoke this Targets' 'rolename' delegation. Its 'rolename' attribute is - deleted, including the entries in its 'delegations' field and in - 'roledb'. - - Actual metadata files are not updated, only when repository.write() or - repository.write() is called. - - >>> - >>> - >>> - - - rolename: - The rolename (e.g., 'Django' in 'django') of the child delegation the - parent role (this role) wants to revoke. - - - securesystemslib.exceptions.FormatError, if 'rolename' is improperly - formatted. - - - The delegations dictionary of 'rolename' is modified, and its 'roledb' - entry is updated. This Targets' 'rolename' delegation attribute is also - deleted. - - - None. - """ - - # Does 'rolename' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.ROLENAME_SCHEMA.check_match(rolename) - - # Remove 'rolename' from this Target's delegations dict. - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - - for role in roleinfo['delegations']['roles']: - if role['name'] == rolename: - roleinfo['delegations']['roles'].remove(role) - - roledb.update_roleinfo(self.rolename, roleinfo, - repository_name=self._repository_name) - - # Remove 'rolename' from 'roledb'. - try: - roledb.remove_role(rolename, self._repository_name) - # Remove the rolename delegation from the current role. For example, the - # 'django' role is removed from repository.targets('django'). - del self._delegated_roles[rolename] - self._parent_targets_object.remove_delegated_role(rolename) - - except (exceptions.UnknownRoleError, KeyError): - pass - - - - def delegate_hashed_bins(self, list_of_targets, keys_of_hashed_bins, - number_of_bins=DEFAULT_NUM_BINS): - """ - - Distribute a large number of target files over multiple delegated roles - (hashed bins). The metadata files of delegated roles will be nearly - equal in size (i.e., 'list_of_targets' is uniformly distributed by - calculating the target filepath's hash and determining which bin it should - reside in. The updater client will use "lazy bin walk" to find a target - file's hashed bin destination. The parent role lists a range of path - hash prefixes each hashed bin contains. This method is intended for - repositories with a large number of target files, a way of easily - distributing and managing the metadata that lists the targets, and - minimizing the number of metadata files (and their size) downloaded by - the client. See tuf-spec.txt and the following link for more - information: - http://www.python.org/dev/peps/pep-0458/#metadata-scalability - - >>> - >>> - >>> - - - list_of_targets: - The target filepaths of the targets that should be stored in hashed - bins created (i.e., delegated roles). A repository object's - get_filepaths_in_directory() can generate a list of valid target - paths. - - keys_of_hashed_bins: - The initial public keys of the delegated roles. Public keys may be - later added or removed by calling the usual methods of the delegated - Targets object. For example: - repository.targets('000-003').add_verification_key() - - number_of_bins: - The number of delegated roles, or hashed bins, that should be generated - and contain the target file attributes listed in 'list_of_targets'. - 'number_of_bins' must be a power of 2. Each bin may contain a - range of path hash prefixes (e.g., target filepath digests that range - from [000]... - [003]..., where the series of digits in brackets is - considered the hash prefix). - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - securesystemslib.exceptions.Error, if 'number_of_bins' is not a power of - 2, or one of the targets in 'list_of_targets' is not relative to the - repository's targets directory. - - tuf.exceptions.InvalidNameError, if any target in 'list_of_targets' - does not match pattern. - - - Delegates multiple target roles from the current parent role. - - - None. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATHS_SCHEMA.check_match(list_of_targets) - sslib_formats.ANYKEYLIST_SCHEMA.check_match(keys_of_hashed_bins) - formats.NUMBINS_SCHEMA.check_match(number_of_bins) - - prefix_length, prefix_count, bin_size = repo_lib.get_bin_numbers(number_of_bins) - - logger.info('Creating hashed bin delegations.\n' + - repr(len(list_of_targets)) + ' total targets.\n' + - repr(number_of_bins) + ' hashed bins.\n' + - repr(prefix_count) + ' total hash prefixes.\n' + - 'Each bin ranges over ' + repr(bin_size) + ' hash prefixes.') - - # Generate a list of bin names, the range of prefixes to be delegated to - # that bin, along with the corresponding full list of target prefixes - # to be delegated to that bin - ordered_roles = [] - for idx in range(0, prefix_count, bin_size): - high = idx + bin_size - 1 - name = repo_lib.create_bin_name(idx, high, prefix_length) - if bin_size == 1: - target_hash_prefixes = [name] - else: - target_hash_prefixes = [] - for idy in range(idx, idx+bin_size): - target_hash_prefixes.append("{prefix:0{len}x}".format(prefix=idy, - len=prefix_length)) - - role = {"name": name, - "target_paths": [], - "target_hash_prefixes": target_hash_prefixes} - ordered_roles.append(role) - - for target_path in list_of_targets: - # Check if the target path is relative or raise an exception. File's - # existence on the file system is not verified. If the file does not - # exist relative to the targets directory, later calls to write() and - # writeall() will fail. - self._check_path(target_path) - - # Determine the hash prefix of 'target_path' by computing the digest of - # its path relative to the targets directory. - # We must hash a target path as it appears in the metadata - hash_prefix = repo_lib.get_target_hash(target_path)[:prefix_length] - ordered_roles[int(hash_prefix, 16) // bin_size]["target_paths"].append(target_path) - - keyids, keydict = repo_lib.keys_to_keydict(keys_of_hashed_bins) - - # A queue of roleinfo's that need to be updated in the roledb - delegated_roleinfos = [] - - for bin_role in ordered_roles: - # TODO: originally we just called self.delegate() for each item in this - # iteration. However, this is *extremely* slow when creating a large - # number of hashed bins, i.e. 16k as is recommended for PyPI usage in - # PEP 458: https://www.python.org/dev/peps/pep-0458/ - # The source of the slowness is the interactions with the roledb, which - # causes several deep copies of roleinfo dictionaries: - # https://github.com/theupdateframework/python-tuf/issues/1005 - # Once the underlying issues in #1005 are resolved, i.e. some combination - # of the intermediate and long-term fixes, we may simplify here by - # switching back to just calling self.delegate(), but until that time we - # queue roledb interactions and perform all updates to the roledb in one - # operation at the end of the iteration. - - relative_paths = {} - for path in bin_role['target_paths']: - relative_paths.update({path: {}}) - - # Delegate from the "unclaimed" targets role to each 'bin_role' - target = self._create_delegated_target(bin_role['name'], keyids, 1, - relative_paths) - - roleinfo = {'name': bin_role['name'], - 'keyids': keyids, - 'threshold': 1, - 'terminating': False, - 'path_hash_prefixes': bin_role['target_hash_prefixes']} - delegated_roleinfos.append(roleinfo) - - for key in keys_of_hashed_bins: - target.add_verification_key(key) - - # Add the new delegation to the top-level 'targets' role object (i.e., - # 'repository.targets()'). - if self.rolename != 'targets': - self._parent_targets_object.add_delegated_role(bin_role['name'], - target) - - # Add 'new_targets_object' to the 'targets' role object (this object). - self.add_delegated_role(bin_role['name'], target) - logger.debug('Delegated from ' + repr(self.rolename) + ' to ' + repr(bin_role)) - - - self._update_roledb_delegations(keydict, delegated_roleinfos) - - - - - def add_target_to_bin(self, target_filepath, number_of_bins=DEFAULT_NUM_BINS, - fileinfo=None): - """ - - Add the fileinfo of 'target_filepath' to the expected hashed bin, if the - bin is available. The hashed bin should have been created by - {targets_role}.delegate_hashed_bins(). Assuming the target filepath is - located in the repository's targets directory, determine the filepath's - hash prefix, locate the expected bin (if any), and then add the fileinfo - to the expected bin. Example: 'targets/foo.tar.gz' may be added to the - 'targets/unclaimed/58-5f.json' role's list of targets by calling this - method. - - - target_filepath: - The filepath of the target to be added to a hashed bin. The filepath - must be located in the repository's targets directory. - - number_of_bins: - The number of delegated roles, or hashed bins, in use by the repository. - Note: 'number_of_bins' must be a power of 2. - - fileinfo: - An optional fileinfo object, conforming to tuf.formats.TARGETS_FILEINFO_SCHEMA, - providing full information about the file. - - - securesystemslib.exceptions.FormatError, if 'target_filepath' is - improperly formatted. - - securesystemslib.exceptions.Error, if 'target_filepath' cannot be added to - a hashed bin (e.g., an invalid target filepath, or the expected hashed - bin does not exist.) - - - The fileinfo of 'target_filepath' is added to a hashed bin of this Targets - object. - - - The name of the hashed bin that the target was added to. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(target_filepath) - formats.NUMBINS_SCHEMA.check_match(number_of_bins) - - # TODO: check target_filepath is sane - - path_hash = repo_lib.get_target_hash(target_filepath) - bin_name = repo_lib.find_bin_for_target_hash(path_hash, number_of_bins) - - # Ensure the Targets object has delegated to hashed bins - if not self._delegated_roles.get(bin_name, None): - raise sslib_exceptions.Error(self.rolename + ' does not have' - ' a delegated role ' + bin_name) - - self._delegated_roles[bin_name].add_target(target_filepath, - fileinfo=fileinfo) - - return bin_name - - - - def remove_target_from_bin(self, target_filepath, - number_of_bins=DEFAULT_NUM_BINS): - """ - - Remove the fileinfo of 'target_filepath' from the expected hashed bin, if - the bin is available. The hashed bin should have been created by - {targets_role}.delegate_hashed_bins(). Assuming the target filepath is - located in the repository's targets directory, determine the filepath's - hash prefix, locate the expected bin (if any), and then remove the - fileinfo from the expected bin. Example: 'targets/foo.tar.gz' may be - removed from the '58-5f.json' role's list of targets by calling this - method. - - - target_filepath: - The filepath of the target to be added to a hashed bin. The filepath - must be located in the repository's targets directory. - - number_of_bins: - The number of delegated roles, or hashed bins, in use by the repository. - Note: 'number_of_bins' must be a power of 2. - - - securesystemslib.exceptions.FormatError, if 'target_filepath' is - improperly formatted. - - securesystemslib.exceptions.Error, if 'target_filepath' cannot be removed - from a hashed bin (e.g., an invalid target filepath, or the expected - hashed bin does not exist.) - - - The fileinfo of 'target_filepath' is removed from a hashed bin of this - Targets object. - - - The name of the hashed bin that the target was added to. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(target_filepath) - formats.NUMBINS_SCHEMA.check_match(number_of_bins) - - # TODO: check target_filepath is sane? - - path_hash = repo_lib.get_target_hash(target_filepath) - bin_name = repo_lib.find_bin_for_target_hash(path_hash, number_of_bins) - - # Ensure the Targets object has delegated to hashed bins - if not self._delegated_roles.get(bin_name, None): - raise sslib_exceptions.Error(self.rolename + ' does not have' - ' a delegated role ' + bin_name) - - self._delegated_roles[bin_name].remove_target(target_filepath) - - return bin_name - - - @property - def delegations(self): - """ - - A getter method that returns the delegations made by this Targets role. - - >>> - >>> - >>> - - - None. - - - tuf.exceptions.UnknownRoleError, if this Targets' rolename - does not exist in 'roledb'. - - - None. - - - A list containing the Targets objects of this Targets' delegations. - """ - - return list(self._delegated_roles.values()) - - - - - - def _check_path(self, pathname): - """ - - Check if a path matches the definition of a PATHPATTERN or a - TARGETPATH (uses the forward slash (/) as directory separator and - does not start with a directory separator). Checks are performed only - on the path string, without accessing the file system. - - - pathname: - A file path or a glob pattern. - - - securesystemslib.exceptions.FormatError, if 'pathname' is improperly - formatted. - - tuf.exceptions.InvalidNameError, if 'pathname' does not match pattern. - - - None. - """ - - formats.RELPATH_SCHEMA.check_match(pathname) - - if '\\' in pathname: - raise exceptions.InvalidNameError('Path ' + repr(pathname) - + ' does not use the forward slash (/) as directory separator.') - - if pathname.startswith('/'): - raise exceptions.InvalidNameError('Path ' + repr(pathname) - + ' starts with a directory separator. All paths should be relative' - ' to targets directory.') - - - - -def create_new_repository(repository_directory, repository_name='default', - storage_backend=None, use_timestamp_length=True, use_timestamp_hashes=True, - use_snapshot_length=False, use_snapshot_hashes=False): - """ - - Create a new repository, instantiate barebones metadata for the top-level - roles, and return a Repository object. On disk, create_new_repository() - only creates the directories needed to hold the metadata and targets files. - The repository object returned may be modified to update the newly created - repository. The methods of the returned object may be called to create - actual repository files (e.g., repository.write()). - - - repository_directory: - The directory that will eventually hold the metadata and target files of - the TUF repository. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. When no object is - passed a FilesystemBackend will be instantiated and used. - - use_timestamp_length: - Whether to include the optional length attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - use_timestamp_hashes: - Whether to include the optional hashes attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - use_snapshot_length: - Whether to include the optional length attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - use_snapshot_hashes: - Whether to include the optional hashes attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - - The 'repository_directory' is created if it does not exist, including its - metadata and targets sub-directories. - - - A 'tuf.repository_tool.Repository' object. - """ - - # Does 'repository_directory' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(repository_directory) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if storage_backend is None: - storage_backend = sslib_storage.FilesystemBackend() - - # Set the repository, metadata, and targets directories. These directories - # are created if they do not exist. - repository_directory = os.path.abspath(repository_directory) - metadata_directory = None - targets_directory = None - - # Ensure the 'repository_directory' exists - logger.info('Creating ' + repr(repository_directory)) - storage_backend.create_folder(repository_directory) - - # Set the metadata and targets directories. The metadata directory is a - # staged one so that the "live" repository is not affected. The - # staged metadata changes may be moved over to "live" after all updated - # have been completed. - metadata_directory = os.path.join(repository_directory, - METADATA_STAGED_DIRECTORY_NAME) - targets_directory = os.path.join(repository_directory, TARGETS_DIRECTORY_NAME) - - # Ensure the metadata directory exists - logger.info('Creating ' + repr(metadata_directory)) - storage_backend.create_folder(metadata_directory) - - # Ensure the targets directory exists - logger.info('Creating ' + repr(targets_directory)) - storage_backend.create_folder(targets_directory) - - # Create the bare bones repository object, where only the top-level roles - # have been set and contain default values (e.g., Root roles has a threshold - # of 1, expires 1 year into the future, etc.) - repository = Repository(repository_directory, metadata_directory, - targets_directory, storage_backend, repository_name, use_timestamp_length, - use_timestamp_hashes, use_snapshot_length, use_snapshot_hashes) - - return repository - - - - - -def load_repository(repository_directory, repository_name='default', - storage_backend=None, use_timestamp_length=True, use_timestamp_hashes=True, - use_snapshot_length=False, use_snapshot_hashes=False): - """ - - Return a repository object containing the contents of metadata files loaded - from the repository. - - - repository_directory: - The root folder of the repository that contains the metadata and targets - sub-directories. - - repository_name: - The name of the repository. If not supplied, 'default' is used as the - repository name. - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. When no object is - passed a FilesystemBackend will be instantiated and used. - - use_timestamp_length: - Whether to include the optional length attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - use_timestamp_hashes: - Whether to include the optional hashes attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - use_snapshot_length: - Whether to include the optional length attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - use_snapshot_hashes: - Whether to include the optional hashes attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - - securesystemslib.exceptions.FormatError, if 'repository_directory' or any of - the metadata files are improperly formatted. - - tuf.exceptions.RepositoryError, if the Root role cannot be - found. At a minimum, a repository must contain 'root.json' - - - All the metadata files found in the repository are loaded and their contents - stored in a repository_tool.Repository object. - - - repository_tool.Repository object. - """ - - # Does 'repository_directory' have the correct format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(repository_directory) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if storage_backend is None: - storage_backend = sslib_storage.FilesystemBackend() - - repository_directory = os.path.abspath(repository_directory) - metadata_directory = os.path.join(repository_directory, - METADATA_STAGED_DIRECTORY_NAME) - targets_directory = os.path.join(repository_directory, TARGETS_DIRECTORY_NAME) - - # The Repository() object loaded (i.e., containing all the metadata roles - # found) and returned. - repository = Repository(repository_directory, metadata_directory, - targets_directory, storage_backend, repository_name, use_timestamp_length, - use_timestamp_hashes, use_snapshot_length, use_snapshot_hashes) - - filenames = repo_lib.get_top_level_metadata_filenames(metadata_directory) - - # The Root file is always available without a version number (a consistent - # snapshot) attached to the filename. Store the 'consistent_snapshot' value - # and read the loaded Root file so that other metadata files may be located. - consistent_snapshot = False - - # Load the metadata of the top-level roles (i.e., Root, Timestamp, Targets, - # and Snapshot). - repository, consistent_snapshot = repo_lib._load_top_level_metadata(repository, - filenames, repository_name) - - delegated_roles_filenames = repo_lib.get_delegated_roles_metadata_filenames( - metadata_directory, consistent_snapshot, storage_backend) - - # Load the delegated targets metadata and their fileinfo. - # The delegated targets roles form a tree/graph which is traversed in a - # breadth-first-search manner starting from 'targets' in order to correctly - # load the delegations hierarchy. - parent_targets_object = repository.targets - - # Keep the next delegations to be loaded in a deque structure which - # has the properties of a list but is designed to have fast appends - # and pops from both ends - delegations = deque() - # A set used to keep the already loaded delegations and avoid an infinite - # loop in case of cycles in the delegations graph - loaded_delegations = set() - - # Top-level roles are already loaded, fetch targets and get its delegations. - # Store the delegations in the form of delegated-delegating role tuples, - # starting from the top-level targets: - # [('role1', 'targets'), ('role2', 'targets'), ... ] - roleinfo = roledb.get_roleinfo('targets', repository_name) - for role in roleinfo['delegations']['roles']: - delegations.append((role, 'targets')) - - # Traverse the graph by appending the next delegation to the deque and - # 'pop'-ing and loading the left-most element. - while delegations: - delegation_info, delegating_role = delegations.popleft() - - rolename = delegation_info['name'] - if (rolename, delegating_role) in loaded_delegations: - logger.warning('Detected cycle in the delegation graph: ' + - repr(delegating_role) + ' -> ' + - repr(rolename) + - ' is reached more than once.') - continue - - # Instead of adding only rolename to the set, store the already loaded - # delegated-delegating role tuples. This way a delegated role is added - # to each of its delegating roles but when the role is reached twice - # from the same delegating role an infinite loop is avoided. - loaded_delegations.add((rolename, delegating_role)) - - metadata_path = delegated_roles_filenames[rolename] - signable = None - - try: - signable = sslib_util.load_json_file(metadata_path) - - except (sslib_exceptions.Error, ValueError, IOError): - logger.debug('Tried to load metadata with invalid JSON' - ' content: ' + repr(metadata_path)) - continue - - metadata_object = signable['signed'] - - # Extract the metadata attributes of 'metadata_object' and update its - # corresponding roleinfo. - roleinfo = {'name': rolename, - 'signing_keyids': [], - 'signatures': [], - 'partial_loaded': False - } - - roleinfo['signatures'].extend(signable['signatures']) - roleinfo['version'] = metadata_object['version'] - roleinfo['expires'] = metadata_object['expires'] - roleinfo['paths'] = metadata_object['targets'] - roleinfo['delegations'] = metadata_object['delegations'] - roleinfo['threshold'] = delegation_info['threshold'] - roleinfo['keyids'] = delegation_info['keyids'] - - # Generate the Targets object of the delegated role, - # add it to the top-level 'targets' object and to its - # direct delegating role object. - new_targets_object = Targets(targets_directory, rolename, - roleinfo, parent_targets_object=parent_targets_object, - repository_name=repository_name) - - parent_targets_object.add_delegated_role(rolename, - new_targets_object) - if delegating_role != 'targets': - parent_targets_object(delegating_role).add_delegated_role(rolename, - new_targets_object) - - # Append the next level delegations to the deque: - # the 'delegated' role becomes the 'delegating' - for delegation in metadata_object['delegations']['roles']: - delegations.append((delegation, rolename)) - - # Extract the keys specified in the delegations field of the Targets - # role. Add 'key_object' to the list of recognized keys. Keys may be - # shared, so do not raise an exception if 'key_object' has already been - # added. In contrast to the methods that may add duplicate keys, do not - # log a warning here as there may be many such duplicate key warnings. - # The repository maintainer should have also been made aware of the - # duplicate key when it was added. - for key_metadata in metadata_object['delegations']['keys'].values(): - - # The repo may have used hashing algorithms for the generated keyids - # that doesn't match the client's set of hash algorithms. Make sure - # to only used the repo's selected hashing algorithms. - key_object, keyids = format_metadata_to_key(key_metadata, - keyid_hash_algorithms=key_metadata['keyid_hash_algorithms']) - try: - for keyid in keyids: # pragma: no branch - key_object['keyid'] = keyid - keydb.add_key(key_object, keyid=None, - repository_name=repository_name) - - except exceptions.KeyAlreadyExistsError: - pass - - return repository - - - - - -def dump_signable_metadata(metadata_filepath): - """ - - Dump the "signed" portion of metadata. It is the portion that is normally - signed by the repository tool, which is in canonicalized JSON form. - This function is intended for external tools that wish to independently - sign metadata. - - The normal workflow for this use case is to: - (1) call dump_signable_metadata(metadata_filepath) - (2) sign the output with an external tool - (3) call append_signature(signature, metadata_filepath) - - - metadata_filepath: - The path to the metadata file. For example, - repository/metadata/root.json. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - IOError, if 'metadata_filepath' cannot be opened. - - - None. - - - Metadata content that is normally signed by the repository tool (i.e., the - "signed" portion of a metadata file). - """ - - # Are the argument properly formatted? - sslib_formats.PATH_SCHEMA.check_match(metadata_filepath) - - signable = sslib_util.load_json_file(metadata_filepath) - - # Is 'signable' a valid metadata file? - formats.SIGNABLE_SCHEMA.check_match(signable) - - return sslib_formats.encode_canonical(signable['signed']) - - - - - -def append_signature(signature, metadata_filepath): - """ - - Append 'signature' to the metadata at 'metadata_filepath'. The signature - is assumed to be valid, and externally generated by signing the output of - dump_signable_metadata(metadata_filepath). This function is intended for - external tools that wish to independently sign metadata. - - The normal workflow for this use case is to: - (1) call dump_signable_metadata(metadata_filepath) - (2) sign the output with an external tool - (3) call append_signature(signature, metadata_filepath) - - - signature: - A TUF signature structure that contains the KEYID, signing method, and - the signature. It conforms to securesystemslib.formats.SIGNATURE_SCHEMA. - - For example: - - { - "keyid": "a0a0f0cf08...", - "method": "ed25519", - "sig": "14f6e6566ec13..." - } - - metadata_filepath: - The path to the metadata file. For example, - repository/metadata/root.json. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - - 'metadata_filepath' is overwritten. - - - None. - """ - - # Are the arguments properly formatted? - sslib_formats.SIGNATURE_SCHEMA.check_match(signature) - sslib_formats.PATH_SCHEMA.check_match(metadata_filepath) - - signable = sslib_util.load_json_file(metadata_filepath) - - # Is 'signable' a valid metadata file? - formats.SIGNABLE_SCHEMA.check_match(signable) - - signable['signatures'].append(signature) - - file_object = tempfile.TemporaryFile() - - written_metadata_content = json.dumps(signable, indent=1, - separators=(',', ': '), sort_keys=True).encode('utf-8') - - file_object.write(written_metadata_content) - sslib_util.persist_temp_file(file_object, metadata_filepath) - - - - - -if __name__ == '__main__': - # The interactive sessions of the documentation strings can - # be tested by running repository_tool.py as a standalone module: - # $ python3 repository_tool.py. - import doctest - doctest.testmod() diff --git a/tuf/requests_fetcher.py b/tuf/requests_fetcher.py deleted file mode 100644 index 1692ebee7c..0000000000 --- a/tuf/requests_fetcher.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2021, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -"""Provides an implementation of FetcherInterface using the Requests HTTP - library. -""" - -# Imports -import requests -import logging -import time -from urllib import parse -from urllib3.exceptions import ReadTimeoutError - -import tuf -from tuf import exceptions -from tuf import settings - -from tuf.client.fetcher import FetcherInterface - -# Globals -logger = logging.getLogger(__name__) - -# Classess -class RequestsFetcher(FetcherInterface): - """A concrete implementation of FetcherInterface based on the Requests - library. - - Attributes: - _sessions: A dictionary of Requests.Session objects storing a separate - session per scheme+hostname combination. - """ - - def __init__(self): - # From http://docs.python-requests.org/en/master/user/advanced/#session-objects: - # - # "The Session object allows you to persist certain parameters across - # requests. It also persists cookies across all requests made from the - # Session instance, and will use urllib3's connection pooling. So if you're - # making several requests to the same host, the underlying TCP connection - # will be reused, which can result in a significant performance increase - # (see HTTP persistent connection)." - # - # NOTE: We use a separate requests.Session per scheme+hostname combination, - # in order to reuse connections to the same hostname to improve efficiency, - # but avoiding sharing state between different hosts-scheme combinations to - # minimize subtle security issues. Some cookies may not be HTTP-safe. - self._sessions = {} - - - def fetch(self, url, required_length): - """Fetches the contents of HTTP/HTTPS url from a remote server. - - Ensures the length of the downloaded data is up to 'required_length'. - - Arguments: - url: A URL string that represents a file location. - required_length: An integer value representing the file length in bytes. - - Raises: - tuf.exceptions.SlowRetrievalError: A timeout occurs while receiving data. - tuf.exceptions.FetcherHTTPError: An HTTP error code is received. - - Returns: - A bytes iterator - """ - # Get a customized session for each new schema+hostname combination. - session = self._get_session(url) - - # Get the requests.Response object for this URL. - # - # Defer downloading the response body with stream=True. - # Always set the timeout. This timeout value is interpreted by requests as: - # - connect timeout (max delay before first byte is received) - # - read (gap) timeout (max delay between bytes received) - response = session.get(url, stream=True, - timeout=settings.SOCKET_TIMEOUT) - # Check response status. - try: - response.raise_for_status() - except requests.HTTPError as e: - response.close() - status = e.response.status_code - raise exceptions.FetcherHTTPError(str(e), status) - - - # Define a generator function to be returned by fetch. This way the caller - # of fetch can differentiate between connection and actual data download - # and measure download times accordingly. - def chunks(): - try: - bytes_received = 0 - while True: - # We download a fixed chunk of data in every round. This is so that we - # can defend against slow retrieval attacks. Furthermore, we do not - # wish to download an extremely large file in one shot. - # Before beginning the round, sleep (if set) for a short amount of - # time so that the CPU is not hogged in the while loop. - if settings.SLEEP_BEFORE_ROUND: - time.sleep(settings.SLEEP_BEFORE_ROUND) - - read_amount = min( - settings.CHUNK_SIZE, required_length - bytes_received) - - # NOTE: This may not handle some servers adding a Content-Encoding - # header, which may cause urllib3 to misbehave: - # https://github.com/pypa/pip/blob/404838abcca467648180b358598c597b74d568c9/src/pip/_internal/download.py#L547-L582 - data = response.raw.read(read_amount) - bytes_received += len(data) - - # We might have no more data to read. Check number of bytes downloaded. - if not data: - logger.debug('Downloaded ' + repr(bytes_received) + '/' + - repr(required_length) + ' bytes.') - - # Finally, we signal that the download is complete. - break - - yield data - - if bytes_received >= required_length: - break - - except ReadTimeoutError as e: - raise exceptions.SlowRetrievalError(str(e)) - - finally: - response.close() - - return chunks() - - - - def _get_session(self, url): - """Returns a different customized requests.Session per schema+hostname - combination. - """ - # Use a different requests.Session per schema+hostname combination, to - # reuse connections while minimizing subtle security issues. - parsed_url = parse.urlparse(url) - - if not parsed_url.scheme or not parsed_url.hostname: - raise exceptions.URLParsingError( - 'Could not get scheme and hostname from URL: ' + url) - - session_index = parsed_url.scheme + '+' + parsed_url.hostname - - logger.debug('url: ' + url) - logger.debug('session index: ' + session_index) - - session = self._sessions.get(session_index) - - if not session: - session = requests.Session() - self._sessions[session_index] = session - - # Attach some default headers to every Session. - requests_user_agent = session.headers['User-Agent'] - # Follows the RFC: https://tools.ietf.org/html/rfc7231#section-5.5.3 - tuf_user_agent = 'tuf/' + tuf.__version__ + ' ' + requests_user_agent - session.headers.update({ - # Tell the server not to compress or modify anything. - # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding#Directives - 'Accept-Encoding': 'identity', - # The TUF user agent. - 'User-Agent': tuf_user_agent}) - - logger.debug('Made new session for ' + session_index) - - else: - logger.debug('Reusing session for ' + session_index) - - return session diff --git a/tuf/roledb.py b/tuf/roledb.py deleted file mode 100755 index 02c7b801eb..0000000000 --- a/tuf/roledb.py +++ /dev/null @@ -1,1013 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - roledb.py - - - Vladimir Diaz - - - March 21, 2012. Based on a previous version of this module by Geremy Condra. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Represent a collection of roles and their organization. The caller may - create a collection of roles from those found in the 'root.json' metadata - file by calling 'create_roledb_from_root_metadata()', or individually by - adding roles with 'add_role()'. There are many supplemental functions - included here that yield useful information about the roles contained in the - database, such as extracting all the parent rolenames for a specified - rolename, deleting all the delegated roles, retrieving role paths, etc. The - Update Framework process maintains a role database for each repository. - - The role database is a dictionary conformant to - 'tuf.formats.ROLEDICT_SCHEMA' and has the form: - - {'repository_name': { - 'rolename': {'keyids': ['34345df32093bd12...'], - 'threshold': 1 - 'signatures': ['abcd3452...'], - 'paths': ['role.json'], - 'path_hash_prefixes': ['ab34df13'], - 'delegations': {'keys': {}, 'roles': {}}} - - The 'name', 'paths', 'path_hash_prefixes', and 'delegations' dict keys are - optional. -""" - -import logging -import copy - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats - -from tuf import exceptions -from tuf import formats - -# See 'tuf.log' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - -# The role database. -_roledb_dict = {} -_roledb_dict['default'] = {} - -# A dictionary (where the keys are repository names) containing a set of roles -# that have been modified (e.g., via update_roleinfo()) and should be written -# to disk. -_dirty_roles = {} -_dirty_roles['default'] = set() - - -TOP_LEVEL_ROLES = ['root', 'targets', 'snapshot', 'timestamp'] - - -def create_roledb_from_root_metadata(root_metadata, repository_name='default'): - """ - - Create a role database containing all of the unique roles found in - 'root_metadata'. - - - root_metadata: - A dictionary conformant to 'tuf.formats.ROOT_SCHEMA'. The - roles found in the 'roles' field of 'root_metadata' is needed by this - function. - - repository_name: - The name of the repository to store 'root_metadata'. If not supplied, - 'rolename' is added to the 'default' repository. - - - securesystemslib.exceptions.FormatError, if 'root_metadata' does not have - the correct object format. - - securesystemslib.exceptions.Error, if one of the roles found in - 'root_metadata' contains an invalid delegation (i.e., a nonexistent parent - role). - - - Calls add_role(). The old role database for 'repository_name' is replaced. - - - None. - """ - - # Does 'root_metadata' have the correct object format? - # This check will ensure 'root_metadata' has the appropriate number of objects - # and object types, and that all dict keys are properly named. - # Raises securesystemslib.exceptions.FormatError. - formats.ROOT_SCHEMA.check_match(root_metadata) - - # Is 'repository_name' formatted correctly? - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Clear the role database. - if repository_name in _roledb_dict: - _roledb_dict[repository_name].clear() - - # Ensure _roledb_dict and _dirty_roles contains an entry for - # 'repository_name' so that adding the newly created roleinfo succeeds. - _roledb_dict[repository_name] = {} - _dirty_roles[repository_name] = set() - - # Do not modify the contents of the 'root_metadata' argument. - root_metadata = copy.deepcopy(root_metadata) - - # Iterate the roles found in 'root_metadata' and add them to '_roledb_dict'. - # Duplicates are avoided. - for rolename, roleinfo in root_metadata['roles'].items(): - if rolename == 'root': - roleinfo['version'] = root_metadata['version'] - roleinfo['expires'] = root_metadata['expires'] - roleinfo['previous_keyids'] = roleinfo['keyids'] - roleinfo['previous_threshold'] = roleinfo['threshold'] - - roleinfo['signatures'] = [] - roleinfo['signing_keyids'] = [] - roleinfo['partial_loaded'] = False - - if rolename.startswith('targets'): - roleinfo['paths'] = {} - roleinfo['delegations'] = {'keys': {}, 'roles': []} - - add_role(rolename, roleinfo, repository_name) - - - - - -def create_roledb(repository_name): - """ - - Create a roledb for the repository named 'repository_name'. This function - is intended for creation of a non-default roledb. - - - repository_name: - The name of the repository to create. An empty roledb is created, and - roles may be added via add_role(rolename, roleinfo, repository_name) or - create_roledb_from_root_metadata(root_metadata, repository_name). - - - securesystemslib.exceptions.FormatError, if 'repository_name' is improperly - formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' already - exists in the roledb. - - - None. - - - None. - """ - - # Is 'repository_name' properly formatted? If not, raise - # 'securesystemslib.exceptions.FormatError'. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name in _roledb_dict or repository_name in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name' - ' already exists: ' + repr(repository_name)) - - _roledb_dict[repository_name] = {} - _dirty_roles[repository_name] = set() - - - - - -def remove_roledb(repository_name): - """ - - Remove the roledb belonging to 'repository_name'. - - - repository_name: - The name of the repository to remove. 'repository_name' cannot be - 'default' because the default repository is expected to always exist. - - - securesystemslib.exceptions.FormatError, if 'repository_name' is improperly - formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' is the - 'default' repository name. The 'default' repository name should always - exist. - - - None. - - - None. - """ - - # Is 'repository_name' properly formatted? If not, raise - # 'securesystemslib.exceptions.FormatError'. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - logger.warning('Repository name does not exist:' - ' ' + repr(repository_name)) - return - - if repository_name == 'default': - raise sslib_exceptions.InvalidNameError('Cannot remove the' - ' default repository: ' + repr(repository_name)) - - del _roledb_dict[repository_name] - del _dirty_roles[repository_name] - - - -def add_role(rolename, roleinfo, repository_name='default'): - """ - - Add to the role database the 'roleinfo' associated with 'rolename'. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - roleinfo: - An object representing the role associated with 'rolename', conformant to - ROLEDB_SCHEMA. 'roleinfo' has the form: - {'keyids': ['34345df32093bd12...'], - 'threshold': 1, - 'signatures': ['ab23dfc32'] - 'paths': ['path/to/target1', 'path/to/target2', ...], - 'path_hash_prefixes': ['a324fcd...', ...], - 'delegations': {'keys': } - - The 'paths', 'path_hash_prefixes', and 'delegations' dict keys are - optional. - - The 'target' role has an additional 'paths' key. Its value is a list of - strings representing the path of the target file(s). - - repository_name: - The name of the repository to store 'rolename'. If not supplied, - 'rolename' is added to the 'default' repository. - - - securesystemslib.exceptions.FormatError, if 'rolename' or 'roleinfo' does - not have the correct object format. - - securesystemslib.exceptions.RoleAlreadyExistsError, if 'rolename' has - already been added. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is improperly - formatted, or 'repository_name' does not exist. - - - The role database is modified. - - - None. - """ - - # Does 'rolename' have the correct object format? - # This check will ensure 'rolename' has the appropriate number of objects - # and object types, and that all dict keys are properly named. - formats.ROLENAME_SCHEMA.check_match(rolename) - - # Does 'roleinfo' have the correct object format? - formats.ROLEDB_SCHEMA.check_match(roleinfo) - - # Is 'repository_name' correctly formatted? - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.InvalidNameError. - _validate_rolename(rolename) - - if repository_name not in _roledb_dict: - raise sslib_exceptions.InvalidNameError('Repository name does not exist: ' + repository_name) - - if rolename in _roledb_dict[repository_name]: - raise exceptions.RoleAlreadyExistsError('Role already exists: ' + rolename) - - _roledb_dict[repository_name][rolename] = copy.deepcopy(roleinfo) - - - - - -def update_roleinfo(rolename, roleinfo, mark_role_as_dirty=True, repository_name='default'): - """ - - Modify 'rolename's _roledb_dict entry to include the new 'roleinfo'. - 'rolename' is also added to the _dirty_roles set. Roles added to - '_dirty_roles' are marked as modified and can be used by the repository - tools to determine which roles need to be written to disk. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - roleinfo: - An object representing the role associated with 'rolename', conformant to - ROLEDB_SCHEMA. 'roleinfo' has the form: - {'name': 'role_name', - 'keyids': ['34345df32093bd12...'], - 'threshold': 1, - 'paths': ['path/to/target1', 'path/to/target2', ...], - 'path_hash_prefixes': ['a324fcd...', ...]} - - The 'name', 'paths', and 'path_hash_prefixes' dict keys are optional. - - The 'target' role has an additional 'paths' key. Its value is a list of - strings representing the path of the target file(s). - - mark_role_as_dirty: - A boolean indicating whether the updated 'roleinfo' for 'rolename' should - be marked as dirty. The caller might not want to mark 'rolename' as - dirty if it is loading metadata from disk and only wants to populate - roledb.py. Likewise, add_role() would support a similar boolean to allow - the repository tools to successfully load roles via load_repository() - without needing to mark these roles as dirty (default behavior). - - repository_name: - The name of the repository to update the roleinfo of 'rolename'. If not - supplied, the 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if 'rolename' or 'roleinfo' does - not have the correct object format. - - tuf.exceptions.UnknownRoleError, if 'rolename' cannot be found - in the role database. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is improperly - formatted, or 'repository_name' does not exist in the role database. - - - The role database is modified. - - - None. - """ - - # Does the arguments have the correct object format? - # This check will ensure arguments have the appropriate number of objects - # and object types, and that all dict keys are properly named. - formats.ROLENAME_SCHEMA.check_match(rolename) - sslib_formats.BOOLEAN_SCHEMA.check_match(mark_role_as_dirty) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Does 'roleinfo' have the correct object format? - formats.ROLEDB_SCHEMA.check_match(roleinfo) - - # Raises securesystemslib.exceptions.InvalidNameError. - _validate_rolename(rolename) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name does not' ' exist: ' + - repository_name) - - if rolename not in _roledb_dict[repository_name]: - raise exceptions.UnknownRoleError('Role does not exist: ' + rolename) - - # Update the global _roledb_dict and _dirty_roles structures so that - # the latest 'roleinfo' is available to other modules, and the repository - # tools know which roles should be saved to disk. - _roledb_dict[repository_name][rolename] = copy.deepcopy(roleinfo) - - if mark_role_as_dirty: - _dirty_roles[repository_name].add(rolename) - - - - - -def get_dirty_roles(repository_name='default'): - """ - - A function that returns a list of the roles that have been modified. Tools - that write metadata to disk can use the list returned to determine which - roles should be written. - - - repository_name: - The name of the repository to get the dirty roles. If not supplied, the - 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if 'repository_name' is improperly - formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not - exist in the role database. - - - None. - - - A sorted list of the roles that have been modified. - """ - - # Does 'repository_name' have the correct format? Raise - # 'securesystemslib.exceptions.FormatError' if not. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name does' - ' not' ' exist: ' + repository_name) - - return sorted(list(_dirty_roles[repository_name])) - - - -def mark_dirty(roles, repository_name='default'): - """ - - Mark the list of 'roles' as dirty. - - - repository_name: - The name of the repository to get the dirty roles. If not supplied, the - 'default' repository is searched. - - roles: - A list of roles that should be marked as dirty. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not - exist in the role database. - - - None. - - - None. - """ - - # Are the arguments properly formatted? If not, raise - # securesystemslib.exceptions.FormatError. - sslib_formats.NAMES_SCHEMA.check_match(roles) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name does' - ' not' ' exist: ' + repository_name) - - _dirty_roles[repository_name].update(roles) - - - -def unmark_dirty(roles, repository_name='default'): - """ - - No longer mark the roles in 'roles' as dirty. - - - repository_name: - The name of the repository to get the dirty roles. If not supplied, the - 'default' repository is searched. - - roles: - A list of roles that should no longer be marked as dirty. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not - exist in the role database. - - - None. - - - None. - """ - - # Are the arguments properly formatted? If not, raise - # securesystemslib.exceptions.FormatError. - sslib_formats.NAMES_SCHEMA.check_match(roles) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name does' - ' not exist: ' + repository_name) - - for role in roles: - try: - _dirty_roles[repository_name].remove(role) - - except (KeyError, ValueError): - logger.debug(repr(role) + ' is not dirty.') - - - -def role_exists(rolename, repository_name='default'): - """ - - Verify whether 'rolename' is stored in the role database. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - repository_name: - The name of the repository to check whether 'rolename' exists. If not - supplied, the 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if 'rolename' does not have the - correct object format. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is incorrectly - formatted, or 'repository_name' does not exist in the role database. - - - None. - - - Boolean. True if 'rolename' is found in the role database, False otherwise. - """ - - # Raise securesystemslib.exceptions.FormatError, - # securesystemslib.exceptions.InvalidNameError if the arguments are invalid. - # We do not intercept securesystemslib.exceptions.FormatError - # or securesystemslib.exceptions.InvalidNameError exceptions. - try: - _check_rolename(rolename, repository_name) - - except exceptions.UnknownRoleError: - return False - - return True - - - - - -def remove_role(rolename, repository_name='default'): - """ - - Remove 'rolename'. Delegated roles were previously removed as well, - but this step is longer supported since the repository can resemble - a graph of delegations. That is, we shouldn't delete rolename's - delegations because another role may have a valid delegation - to it, whereas before the only valid delegation to it must be from - 'rolename' (repository resembles a tree of delegations). - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - repository_name: - The name of the repository to remove the role. If not supplied, the - 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if 'rolename' does not have the - correct object format. - - tuf.exceptions.UnknownRoleError, if 'rolename' cannot be found - in the role database. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is incorrectly - formatted, or 'repository_name' does not exist in the role database. - - - A role may be removed from the role database. - - - None. - """ - - # Does 'repository_name' have the correct format? Raise - # 'securesystemslib.exceptions.FormatError' if it is improperly formatted. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.FormatError, - # tuf.exceptions.UnknownRoleError, or - # securesystemslib.exceptions.InvalidNameError. - _check_rolename(rolename, repository_name) - - # 'rolename' was verified to exist in _check_rolename(). - # Remove 'rolename' now. - del _roledb_dict[repository_name][rolename] - - - - - -def get_rolenames(repository_name='default'): - """ - - Return a list of the rolenames found in the role database. - - - repository_name: - The name of the repository to get the rolenames. If not supplied, the - 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if 'repository_name' is improperly - formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not - exist in the role database. - - - None. - - - A list of rolenames. - """ - - # Does 'repository_name' have the correct format? Raise - # 'securesystemslib.exceptions.FormatError' if it is improperly formatted. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name does' - ' not' ' exist: ' + repository_name) - - return list(_roledb_dict[repository_name].keys()) - - - - - -def get_roleinfo(rolename, repository_name='default'): - """ - - Return the roleinfo of 'rolename'. - - {'keyids': ['34345df32093bd12...'], - 'threshold': 1, - 'signatures': ['ab453bdf...', ...], - 'paths': ['path/to/target1', 'path/to/target2', ...], - 'path_hash_prefixes': ['a324fcd...', ...], - 'delegations': {'keys': {}, 'roles': []}} - - The 'signatures', 'paths', 'path_hash_prefixes', and 'delegations' dict keys - are optional. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - repository_name: - The name of the repository to get the role info. If not supplied, the - 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - tuf.exceptions.UnknownRoleError, if 'rolename' does not exist. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is incorrectly - formatted, or 'repository_name' does not exist in the role database. - - - - None. - - - The roleinfo of 'rolename'. - """ - - # Is 'repository_name' properly formatted? If not, raise - # 'securesystemslib.exceptions.FormatError'. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.FormatError, - # tuf.exceptions.UnknownRoleError, or - # securesystemslib.exceptions.InvalidNameError. - _check_rolename(rolename, repository_name) - - return copy.deepcopy(_roledb_dict[repository_name][rolename]) - - - - - -def get_role_keyids(rolename, repository_name='default'): - """ - - Return a list of the keyids associated with 'rolename'. Keyids are used as - identifiers for keys (e.g., rsa key). A list of keyids are associated with - each rolename. Signing a metadata file, such as 'root.json' (Root role), - involves signing or verifying the file with a list of keys identified by - keyid. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - repository_name: - The name of the repository to get the role keyids. If not supplied, the - 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if the arguments do not have the - correct object format. - - tuf.exceptions.UnknownRoleError, if 'rolename' cannot be found - in the role database. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is incorrectly - formatted, or 'repository_name' does not exist in the role database. - - - None. - - - A list of keyids. - """ - - # Raise 'securesystemslib.exceptions.FormatError' if 'repository_name' is - # improperly formatted. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.FormatError, - # tuf.exceptions.UnknownRoleError, or - # securesystemslib.exceptions.InvalidNameError. - _check_rolename(rolename, repository_name) - - roleinfo = _roledb_dict[repository_name][rolename] - - return roleinfo['keyids'] - - - - - -def get_role_threshold(rolename, repository_name='default'): - """ - - Return the threshold value of the role associated with 'rolename'. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - repository_name: - The name of the repository to get the role threshold. If not supplied, - the 'default' repository is searched. - - - - securesystemslib.exceptions.FormatError, if the arguments do not have the - correct object format. - - tuf.exceptions.UnknownRoleError, if 'rolename' cannot be found - in the role database. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is incorrectly - formatted, or 'repository_name' does not exist in the role database. - - - None. - - - A threshold integer value. - """ - - # Raise 'securesystemslib.exceptions.FormatError' if 'repository_name' is - # improperly formatted. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.FormatError, - # tuf.exceptions.UnknownRoleError, or - # securesystemslib.exceptions.InvalidNameError. - _check_rolename(rolename, repository_name) - - roleinfo = _roledb_dict[repository_name][rolename] - - return roleinfo['threshold'] - - - - - -def get_role_paths(rolename, repository_name='default'): - """ - - Return the paths of the role associated with 'rolename'. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - repository_name: - The name of the repository to get the role paths. If not supplied, the - 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if the arguments do not have the - correct object format. - - tuf.exceptions.UnknownRoleError, if 'rolename' cannot be found - in the role database. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is incorrectly - formatted, or 'repository_name' does not exist in the role database. - - - None. - - - A list of paths. - """ - - # Raise 'securesystemslib.exceptions.FormatError' if 'repository_name' is - # improperly formatted. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.FormatError, - # tuf.exceptions.UnknownRoleError, or - # securesystemslib.exceptions.InvalidNameError. - _check_rolename(rolename, repository_name) - - roleinfo = _roledb_dict[repository_name][rolename] - - # Paths won't exist for non-target roles. - try: - return roleinfo['paths'] - - except KeyError: - return dict() - - - - - -def get_delegated_rolenames(rolename, repository_name='default'): - """ - - Return the delegations of a role. If 'rolename' is 'tuf' and the role - database contains ['django', 'requests', 'cryptography'], in 'tuf's - delegations field, return ['django', 'requests', 'cryptography']. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - repository_name: - The name of the repository to get the delegated rolenames. If not - supplied, the 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if the arguments do not have the - correct object format. - - tuf.exceptions.UnknownRoleError, if 'rolename' cannot be found - in the role database. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is incorrectly - formatted, or 'repository_name' does not exist in the role database. - - - None. - - - A list of rolenames. Note that the rolenames are *NOT* sorted by order of - delegation. - """ - - - # Does 'repository_name' have the correct format? Raise - # 'securesystemslib.exceptions.FormatError' if it does not. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.FormatError, - # tuf.exceptions.UnknownRoleError, or - # securesystemslib.exceptions.InvalidNameError. - _check_rolename(rolename, repository_name) - - # get_roleinfo() raises a 'securesystemslib.exceptions.InvalidNameError' if - # 'repository_name' does not exist in the role database. - roleinfo = get_roleinfo(rolename, repository_name) - delegated_roles = [] - - for delegated_role in roleinfo['delegations']['roles']: - delegated_roles.append(delegated_role['name']) - - return delegated_roles - - - - - -def clear_roledb(repository_name='default', clear_all=False): - """ - - Reset the roledb database. - - - repository_name: - The name of the repository to clear. If not supplied, the 'default' - repository is cleared. - - clear_all: - Boolean indicating whether to clear the entire roledb. - - - securesystemslib.exceptions.FormatError, if 'repository_name' does not have - the correct format. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not - exist in the role database. - - - None. - - - None. - """ - - # Do the arguments have the correct format? If not, raise - # 'securesystemslib.exceptions.FormatError' - sslib_formats.NAME_SCHEMA.check_match(repository_name) - sslib_formats.BOOLEAN_SCHEMA.check_match(clear_all) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name does not' - ' exist: ' + repository_name) - - if clear_all: - _roledb_dict.clear() - _roledb_dict['default'] = {} - _dirty_roles.clear() - _dirty_roles['default'] = set() - return - - _roledb_dict[repository_name] = {} - _dirty_roles[repository_name] = set() - - - - - -def _check_rolename(rolename, repository_name='default'): - """ Raise securesystemslib.exceptions.FormatError if 'rolename' does not match - 'tuf.formats.ROLENAME_SCHEMA', - tuf.exceptions.UnknownRoleError if 'rolename' is not found in the - role database, or securesystemslib.exceptions.InvalidNameError if - 'repository_name' does not exist in the role database. - """ - - # Does 'rolename' have the correct object format? - # This check will ensure 'rolename' has the appropriate number of objects - # and object types, and that all dict keys are properly named. - formats.ROLENAME_SCHEMA.check_match(rolename) - - # Does 'repository_name' have the correct format? - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.InvalidNameError. - _validate_rolename(rolename) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name does not' - ' exist: ' + repository_name) - - if rolename not in _roledb_dict[repository_name]: - raise exceptions.UnknownRoleError('Role name does not exist: ' + rolename) - - - - - -def _validate_rolename(rolename): - """ - Raise securesystemslib.exceptions.InvalidNameError if 'rolename' is not - formatted correctly. It is assumed 'rolename' has been checked against - 'ROLENAME_SCHEMA' prior to calling this function. """ - - if rolename == '': - raise sslib_exceptions.InvalidNameError('Rolename must *not* be' - ' an empty string.') - - if rolename != rolename.strip(): - raise sslib_exceptions.InvalidNameError('Invalid rolename.' - ' Cannot start or end with whitespace: ' + rolename) - - if rolename.startswith('/') or rolename.endswith('/'): - raise sslib_exceptions.InvalidNameError('Invalid rolename.' - ' Cannot start or end with a "/": ' + rolename) diff --git a/tuf/scripts/__init__.py b/tuf/scripts/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tuf/scripts/client.py b/tuf/scripts/client.py deleted file mode 100755 index 8f30c53648..0000000000 --- a/tuf/scripts/client.py +++ /dev/null @@ -1,236 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2018, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - client.py - - - Vladimir Diaz - - - September 2012. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Provide a basic TUF client that can update all of the metatada and target - files provided by the user-specified repository mirror. Updated files are - saved to the 'targets' directory in the current working directory. The - repository mirror is specified by the user through the '--repo' command- - line option. - - Normally, a software updater integrating TUF will develop their own costum - client module by importing 'tuf.client.updater', instantiating the required - object, and calling the desired methods to perform an update. This basic - client is provided to users who wish to give TUF a quick test run without the - hassle of writing client code. This module can also used by updaters that do - not need the customization and only require their clients to perform an - update of all the files provided by their repository mirror(s). - - For software updaters that DO require customization, see the - 'example_client.py' script. The 'example_client.py' script provides an - outline of the client code that software updaters may develop and then tailor - to their specific software updater or package manager. - - Additional tools for clients running legacy applications will also be made - available. These tools will allow secure software updates using The Update - Framework without the need to modify the original application. - - - $ client.py --repo http://localhost:8001 - $ client.py --repo http://localhost:8001 --verbose 3 - - - --verbose: - Set the verbosity level of logging messages. Accepts values 1-5. - - Example: - $ client.py --repo http://localhost:8001 --verbose 3 README.txt - - --repo: - Set the repository mirror that will be responding to client requests. - E.g., 'http://localhost:8001'. - - Example: - $ client.py --repo http://localhost:8001 README.txt -""" - -import sys -import argparse -import logging - -from tuf import exceptions -from tuf import log -from tuf import settings -from tuf.client.updater import Updater - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - - -def update_client(parsed_arguments): - """ - - Perform an update of the metadata and target files located at - 'repository_mirror'. Target files are saved to the 'targets' directory - in the current working directory. The current directory must already - include a 'metadata' directory, which in turn must contain the 'current' - and 'previous' directories. At a minimum, these two directories require - the 'root.json' metadata file. - - - parsed_arguments: - An argparse Namespace object, containing the parsed arguments. - - - tuf.exceptions.Error, if 'parsed_arguments' is not a Namespace object. - - - Connects to a repository mirror and updates the local metadata files and - any target files. Obsolete, local targets are also removed. - - - None. - """ - - if not isinstance(parsed_arguments, argparse.Namespace): - raise exceptions.Error('Invalid namespace object.') - - else: - logger.debug('We have a valid argparse Namespace object.') - - # Set the local repositories directory containing all of the metadata files. - settings.repositories_directory = '.' - - # Set the repository mirrors. This dictionary is needed by the Updater - # class of updater.py. - repository_mirrors = {'mirror': {'url_prefix': parsed_arguments.repo, - 'metadata_path': 'metadata', 'targets_path': 'targets'}} - - # Create the repository object using the repository name 'repository' - # and the repository mirrors defined above. - updater = Updater('tufrepo', repository_mirrors) - - # The local destination directory to save the target files. - destination_directory = './tuftargets' - - # Refresh the repository's top-level roles... - updater.refresh(unsafely_update_root_if_necessary=False) - - # ... and store the target information for the target file specified on the - # command line, and determine which of these targets have been updated. - target_fileinfo = [] - for target in parsed_arguments.targets: - target_fileinfo.append(updater.get_one_valid_targetinfo(target)) - - updated_targets = updater.updated_targets(target_fileinfo, destination_directory) - - # Retrieve each of these updated targets and save them to the destination - # directory. - for target in updated_targets: - try: - updater.download_target(target, destination_directory) - - except exceptions.DownloadError: - pass - - # Remove any files from the destination directory that are no longer being - # tracked. - updater.remove_obsolete_targets(destination_directory) - - - - - -def parse_arguments(): - """ - - Parse the command-line options and set the logging level - as specified by the user through the --verbose option. - 'client' expects the '--repo' to be set by the user. - - Example: - $ client.py --repo http://localhost:8001 LICENSE - - If the required option is unset, a parser error is printed - and the scripts exits. - - - None. - - - None. - - - Sets the logging level for TUF logging. - - - The parsed_arguments (i.e., a argparse Namespace object). - """ - - parser = argparse.ArgumentParser( - description='Retrieve file from TUF repository.') - - # Add the options supported by 'basic_client' to the option parser. - parser.add_argument('-v', '--verbose', type=int, default=2, - choices=range(0, 6), help='Set the verbosity level of logging messages.' - ' The lower the setting, the greater the verbosity. Supported logging' - ' levels: 0=UNSET, 1=DEBUG, 2=INFO, 3=WARNING, 4=ERROR,' - ' 5=CRITICAL') - - parser.add_argument('-r', '--repo', type=str, required=True, metavar='', - help='Specify the remote repository\'s URI' - ' (e.g., http://www.example.com:8001/tuf/). The client retrieves' - ' updates from the remote repository.') - - parser.add_argument('targets', nargs='+', metavar='', help='Specify' - ' the target files to retrieve from the specified TUF repository.') - - parsed_arguments = parser.parse_args() - - - # Set the logging level. - if parsed_arguments.verbose == 5: - log.set_log_level(logging.CRITICAL) - - elif parsed_arguments.verbose == 4: - log.set_log_level(logging.ERROR) - - elif parsed_arguments.verbose == 3: - log.set_log_level(logging.WARNING) - - elif parsed_arguments.verbose == 2: - log.set_log_level(logging.INFO) - - elif parsed_arguments.verbose == 1: - log.set_log_level(logging.DEBUG) - - else: - log.set_log_level(logging.NOTSET) - - # Return the repository mirror containing the metadata and target files. - return parsed_arguments - - - -if __name__ == '__main__': - - # Parse the command-line arguments and set the logging level. - arguments = parse_arguments() - - # Perform an update of all the files in the 'targets' directory located in - # the current directory. - try: - update_client(arguments) - - except (exceptions.NoWorkingMirrorError, exceptions.RepositoryError, - exceptions.FormatError, exceptions.Error) as e: - sys.stderr.write('Error: ' + str(e) + '\n') - sys.exit(1) - - # Successfully updated the client's target files. - sys.exit(0) diff --git a/tuf/scripts/repo.py b/tuf/scripts/repo.py deleted file mode 100755 index 0b61b2bc59..0000000000 --- a/tuf/scripts/repo.py +++ /dev/null @@ -1,1149 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - repo.py - - - Vladimir Diaz - - - January 2018. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Provide a command-line interface to create and modify TUF repositories. The - CLI removes the need to write Python code when creating or modifying - repositories, which is the case with repository_tool.py and - developer_tool.py. - - Note: - 'python3 -m pip install securesystemslib[crypto,pynacl]' is required by the CLI, - which installs the 3rd-party dependencies: cryptography and pynacl. - - - Note: arguments within brackets are optional. - - $ repo.py --init - [--consistent, --bare, --path, --root_pw, --targets_pw, - --snapshot_pw, --timestamp_pw] - $ repo.py --add ... [--path, --recursive] - $ repo.py --remove - $ repo.py --distrust --pubkeys [--role] - $ repo.py --trust --pubkeys [--role] - $ repo.py --sign [--role ] - $ repo.py --key - [--filename - --path , --pw [my_password]] - $ repo.py --delegate --delegatee - --pubkeys - [--role --terminating --threshold - --sign ] - $ repo.py --revoke --delegatee - [--role --sign ] - $ repo.py --verbose <0-5> - $ repo.py --clean [--path] - - - --init: - Create new TUF repository in current working or specified directory. - - --consistent: - Enable consistent snapshots for newly created TUF repository. - - --bare: - Specify creation of bare TUF repository with no key created or set. - - --path: - Choose specified path location of a TUF repository or key(s). - - --role: - Specify top-level role(s) affected by the main command-line option. - - --pubkeys: - Indicate location of key(s) affected by the main command-line option. - - --root_pw: - Set password for encrypting top-level key file of root role. - - --targets_pw: - Set password for encrypting top-level key file of targets role. - - --snapshot_pw: - Set password for encrypting top-level key file of snapshot role. - - --timestamp_pw: - Set password for encrypting top-level key file of timestamp role. - - --add: - Add file specified by to the Targets metadata. - - --recursive: - Include files in subdirectories of specified directory . - - --remove: - Remove target files from Targets metadata matching . - - --distrust: - Discontinue trust of keys located in directory of a role. - - --trust: - Indicate trusted keys located in directory of a role. - - --sign: - Sign metadata of target role(s) with keys in specified directory. - - --key: - Generate cryptographic key of specified type (default: Ed25519). - - --filename: - Specify filename associated with generated top-level key. - - --pw: - Set password for the generated key of specified type . - - --delegate: - Delegate trust of target files from Targets role (or specified - in --role) to --delegatee role with specified . - - --delegatee: - Specify role that is targeted by delegator in --role to sign for - target files matching delegated or in revocation of trust. - - --terminating: - Mark delegation to --delegatee role from delegator as a terminating one. - - --threshold: - Specify signature threshold of --delegatee role as the value . - - --revoke: - Revoke trust of target files from delegated role (--delegatee) - - --verbose: - Set the verbosity level of logging messages. Accepts values 1-5. - - --clean: - Delete repo in current working or specified directory. -""" - -import os -import sys -import logging -import argparse -import shutil -import time -import fnmatch - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import interface as sslib_interface -from securesystemslib import keys as sslib_keys -from securesystemslib import settings as sslib_settings -from securesystemslib import util as sslib_util - -from tuf import exceptions -from tuf import formats -from tuf import keydb -from tuf import log -from tuf import repository_tool as repo_tool -from tuf import roledb - - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - -repo_tool.disable_console_log_messages() - -PROG_NAME = 'repo.py' - -REPO_DIR = 'tufrepo' -CLIENT_DIR = 'tufclient' -KEYSTORE_DIR = 'tufkeystore' - -ROOT_KEY_NAME = 'root_key' -TARGETS_KEY_NAME = 'targets_key' -SNAPSHOT_KEY_NAME = 'snapshot_key' -TIMESTAMP_KEY_NAME = 'timestamp_key' - -STAGED_METADATA_DIR = 'metadata.staged' -METADATA_DIR = 'metadata' - -# The keytype strings, as expected on the command line. -ED25519_KEYTYPE = 'ed25519' -ECDSA_KEYTYPE = 'ecdsa' -RSA_KEYTYPE = 'rsa' -SUPPORTED_CLI_KEYTYPES = (ECDSA_KEYTYPE, ED25519_KEYTYPE, RSA_KEYTYPE) - -# The supported keytype strings (as they appear in metadata) are listed here -# because they won't necessarily match the key types supported by -# securesystemslib. -SUPPORTED_KEY_TYPES = ('rsa', 'ed25519', 'ecdsa', 'ecdsa-sha2-nistp256') - -# pylint: disable=protected-access -# ... to allow use of sslib _generate_and_write_*_keypair convenience methods - -def process_command_line_arguments(parsed_arguments): - """ - - Perform the relevant operations on the repo according to the chosen - command-line options. Which functions are executed depends on - 'parsed_arguments'. For instance, the --init and --clean options will - cause the init_repo() and clean_repo() functions to be called. - Multiple operations can be executed in one invocation of the CLI. - - - parsed_arguments: - The parsed arguments returned by argparse.parse_args(). - - - securesystemslib.exceptions.Error, if any of the arguments are - improperly formatted or if any of the argument could not be processed. - - - None. - - - None. - """ - - # Do we have a valid argparse Namespace? - if not isinstance(parsed_arguments, argparse.Namespace): - raise exceptions.Error('Invalid namespace: ' + repr(parsed_arguments)) - - else: - logger.debug('We have a valid argparse Namespace.') - - # TODO: Make sure the order that the arguments are processed allows for the - # most convenient use of multiple options in one invocation of the CLI. For - # instance, it might be best for --clean to be processed first before --init - # so that a user can do the following: repo.py --clean --init (that is, first - # clear the repo in the current working directory, and then initialize a new - # one. - if parsed_arguments.clean: - clean_repo(parsed_arguments) - - if parsed_arguments.init: - init_repo(parsed_arguments) - - if parsed_arguments.remove: - remove_targets(parsed_arguments) - - if parsed_arguments.add: - add_targets(parsed_arguments) - - if parsed_arguments.distrust: - remove_verification_key(parsed_arguments) - - if parsed_arguments.trust: - add_verification_key(parsed_arguments) - - if parsed_arguments.key: - gen_key(parsed_arguments) - - if parsed_arguments.revoke: - revoke(parsed_arguments) - - if parsed_arguments.delegate: - delegate(parsed_arguments) - - # --sign should be processed last, after the other options, so that metadata - # is signed last after potentially being modified by the other options. - if parsed_arguments.sign: - sign_role(parsed_arguments) - - - -def delegate(parsed_arguments): - - if not parsed_arguments.delegatee: - raise exceptions.Error( - '--delegatee must be set to perform the delegation.') - - if parsed_arguments.delegatee in ('root', 'snapshot', 'timestamp', 'targets'): - raise exceptions.Error( - 'Cannot delegate to the top-level role: ' + repr(parsed_arguments.delegatee)) - - if not parsed_arguments.pubkeys: - raise exceptions.Error( - '--pubkeys must be set to perform the delegation.') - - public_keys = [] - for public_key in parsed_arguments.pubkeys: - imported_pubkey = import_publickey_from_file(public_key) - public_keys.append(imported_pubkey) - - repository = repo_tool.load_repository( - os.path.join(parsed_arguments.path, REPO_DIR)) - - if parsed_arguments.role == 'targets': - repository.targets.delegate(parsed_arguments.delegatee, public_keys, - parsed_arguments.delegate, parsed_arguments.threshold, - parsed_arguments.terminating, list_of_targets=None, - path_hash_prefixes=None) - - targets_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, TARGETS_KEY_NAME), - parsed_arguments.targets_pw) - - repository.targets.load_signing_key(targets_private) - - # A delegated (non-top-level-Targets) role. - else: - repository.targets(parsed_arguments.role).delegate( - parsed_arguments.delegatee, public_keys, - parsed_arguments.delegate, parsed_arguments.threshold, - parsed_arguments.terminating, list_of_targets=None, - path_hash_prefixes=None) - - # Update the required top-level roles, Snapshot and Timestamp, to make a new - # release. Automatically making a new release can be disabled via - # --no_release. - if not parsed_arguments.no_release: - snapshot_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), - parsed_arguments.snapshot_pw) - timestamp_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) - - repository.snapshot.load_signing_key(snapshot_private) - repository.timestamp.load_signing_key(timestamp_private) - - consistent_snapshot = roledb.get_roleinfo('root', - repository._repository_name)['consistent_snapshot'] - repository.writeall(consistent_snapshot=consistent_snapshot) - - # Move staged metadata directory to "live" metadata directory. - write_to_live_repo(parsed_arguments) - - - -def revoke(parsed_arguments): - - repository = repo_tool.load_repository( - os.path.join(parsed_arguments.path, REPO_DIR)) - - if parsed_arguments.role == 'targets': - repository.targets.revoke(parsed_arguments.delegatee) - - targets_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, TARGETS_KEY_NAME), - parsed_arguments.targets_pw) - - repository.targets.load_signing_key(targets_private) - - # A non-top-level role. - else: - repository.targets(parsed_arguments.role).revoke(parsed_arguments.delegatee) - - role_privatekey = import_privatekey_from_file(parsed_arguments.sign) - - repository.targets(parsed_arguments.role).load_signing_key(role_privatekey) - - # Update the required top-level roles, Snapshot and Timestamp, to make a new - # release. Automatically making a new release can be disabled via - # --no_release. - if not parsed_arguments.no_release: - snapshot_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), - parsed_arguments.snapshot_pw) - timestamp_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) - - repository.snapshot.load_signing_key(snapshot_private) - repository.timestamp.load_signing_key(timestamp_private) - - consistent_snapshot = roledb.get_roleinfo('root', - repository._repository_name)['consistent_snapshot'] - repository.writeall(consistent_snapshot=consistent_snapshot) - - # Move staged metadata directory to "live" metadata directory. - write_to_live_repo(parsed_arguments) - - - -def gen_key(parsed_arguments): - - if parsed_arguments.filename: - parsed_arguments.filename = os.path.join(parsed_arguments.path, - KEYSTORE_DIR, parsed_arguments.filename) - - keypath = None - - keygen_kwargs = { - "password": parsed_arguments.pw, - "filepath": parsed_arguments.filename, - "prompt": (not parsed_arguments.pw) # prompt if no default or passed pw - } - - if parsed_arguments.key not in SUPPORTED_CLI_KEYTYPES: - exceptions.Error( - 'Invalid key type: ' + repr(parsed_arguments.key) + '. Supported' - ' key types: ' + repr(SUPPORTED_CLI_KEYTYPES)) - - elif parsed_arguments.key == ECDSA_KEYTYPE: - keypath = sslib_interface._generate_and_write_ecdsa_keypair( - **keygen_kwargs) - - elif parsed_arguments.key == ED25519_KEYTYPE: - keypath = sslib_interface._generate_and_write_ed25519_keypair( - **keygen_kwargs) - - # RSA key.. - else: - keypath = sslib_interface._generate_and_write_rsa_keypair( - **keygen_kwargs) - - - # If a filename is not given, the generated keypair is saved to the current - # working directory. By default, the keypair is written to .pub - # and (private key). - if not parsed_arguments.filename: - privkey_repo_path = os.path.join(parsed_arguments.path, - KEYSTORE_DIR, os.path.basename(keypath)) - pubkey_repo_path = os.path.join(parsed_arguments.path, - KEYSTORE_DIR, os.path.basename(keypath + '.pub')) - - sslib_util.ensure_parent_dir(privkey_repo_path) - sslib_util.ensure_parent_dir(pubkey_repo_path) - - # Move them from the CWD to the repo's keystore. - shutil.move(keypath, privkey_repo_path) - shutil.move(keypath + '.pub', pubkey_repo_path) - - - -def import_privatekey_from_file(keypath, password=None): - # Note: should securesystemslib support this functionality (import any - # privatekey type)? - # If the caller does not provide a password argument, prompt for one. - # Password confirmation is disabled here, which should ideally happen only - # when creating encrypted key files. - if password is None: # pragma: no cover - - # It is safe to specify the full path of 'filepath' in the prompt and not - # worry about leaking sensitive information about the key's location. - # However, care should be taken when including the full path in exceptions - # and log files. - password = sslib_interface.get_password('Enter a password for' - ' the encrypted key (' + sslib_interface.TERM_RED + repr(keypath) + sslib_interface.TERM_RED + '): ', - confirm=False) - - # Does 'password' have the correct format? - sslib_formats.PASSWORD_SCHEMA.check_match(password) - - # Store the encrypted contents of 'filepath' prior to calling the decryption - # routine. - encrypted_key = None - - with open(keypath, 'rb') as file_object: - encrypted_key = file_object.read().decode('utf-8') - - # Decrypt the loaded key file, calling the 'cryptography' library to generate - # the derived encryption key from 'password'. Raise - # 'securesystemslib.exceptions.CryptoError' if the decryption fails. - try: - key_object = sslib_keys.decrypt_key(encrypted_key, password) - - except sslib_exceptions.CryptoError: - try: - logger.debug( - 'Decryption failed. Attempting to import a private PEM instead.') - key_object = sslib_keys.import_rsakey_from_private_pem( - encrypted_key, 'rsassa-pss-sha256', password) - - except sslib_exceptions.CryptoError as error: - raise exceptions.Error(repr(keypath) + ' cannot be ' - ' imported, possibly because an invalid key file is given or ' - ' the decryption password is incorrect.') from error - - if key_object['keytype'] not in SUPPORTED_KEY_TYPES: - raise exceptions.Error('Trying to import an unsupported key' - ' type: ' + repr(key_object['keytype'] + '.' - ' Supported key types: ' + repr(SUPPORTED_KEY_TYPES))) - - else: - # Add "keyid_hash_algorithms" so that equal keys with different keyids can - # be associated using supported keyid_hash_algorithms. - key_object['keyid_hash_algorithms'] = sslib_settings.HASH_ALGORITHMS - - return key_object - - - -def import_publickey_from_file(keypath): - - try: - key_metadata = sslib_util.load_json_file(keypath) - - # An RSA public key is saved to disk in PEM format (not JSON), so the - # load_json_file() call above can fail for this reason. Try to potentially - # load the PEM string in keypath if an exception is raised. - except sslib_exceptions.Error: - key_metadata = sslib_interface.import_rsa_publickey_from_file( - keypath) - - key_object, junk = sslib_keys.format_metadata_to_key(key_metadata) - - if key_object['keytype'] not in SUPPORTED_KEY_TYPES: - raise exceptions.Error('Trying to import an unsupported key' - ' type: ' + repr(key_object['keytype'] + '.' - ' Supported key types: ' + repr(SUPPORTED_KEY_TYPES))) - - else: - return key_object - - - -def add_verification_key(parsed_arguments): - if not parsed_arguments.pubkeys: - raise exceptions.Error('--pubkeys must be given with --trust.') - - repository = repo_tool.load_repository( - os.path.join(parsed_arguments.path, REPO_DIR)) - - for keypath in parsed_arguments.pubkeys: - imported_pubkey = import_publickey_from_file(keypath) - - if parsed_arguments.role not in ('root', 'targets', 'snapshot', 'timestamp'): - raise exceptions.Error('The given --role is not a top-level role.') - - elif parsed_arguments.role == 'root': - repository.root.add_verification_key(imported_pubkey) - - elif parsed_arguments.role == 'targets': - repository.targets.add_verification_key(imported_pubkey) - - elif parsed_arguments.role == 'snapshot': - repository.snapshot.add_verification_key(imported_pubkey) - - # The timestamp role.. - else: - repository.timestamp.add_verification_key(imported_pubkey) - - consistent_snapshot = roledb.get_roleinfo('root', - repository._repository_name)['consistent_snapshot'] - repository.write('root', consistent_snapshot=consistent_snapshot, - increment_version_number=False) - - # Move staged metadata directory to "live" metadata directory. - write_to_live_repo(parsed_arguments) - - - -def remove_verification_key(parsed_arguments): - if not parsed_arguments.pubkeys: - raise exceptions.Error('--pubkeys must be given with --distrust.') - - repository = repo_tool.load_repository( - os.path.join(parsed_arguments.path, REPO_DIR)) - - for keypath in parsed_arguments.pubkeys: - imported_pubkey = import_publickey_from_file(keypath) - - try: - if parsed_arguments.role not in ('root', 'targets', 'snapshot', 'timestamp'): - raise exceptions.Error('The given --role is not a top-level role.') - - elif parsed_arguments.role == 'root': - repository.root.remove_verification_key(imported_pubkey) - - elif parsed_arguments.role == 'targets': - repository.targets.remove_verification_key(imported_pubkey) - - elif parsed_arguments.role == 'snapshot': - repository.snapshot.remove_verification_key(imported_pubkey) - - # The Timestamp key.. - else: - repository.timestamp.remove_verification_key(imported_pubkey) - - # It is assumed remove_verification_key() only raises - # securesystemslib.exceptions.Error and - # securesystemslib.exceptions.FormatError, and the latter is not raised - # because a valid key should have been returned by - # import_publickey_from_file(). - except sslib_exceptions.Error: - print(repr(keypath) + ' is not a trusted key. Skipping.') - - consistent_snapshot = roledb.get_roleinfo('root', - repository._repository_name)['consistent_snapshot'] - repository.write('root', consistent_snapshot=consistent_snapshot, - increment_version_number=False) - - # Move staged metadata directory to "live" metadata directory. - write_to_live_repo(parsed_arguments) - - - -def sign_role(parsed_arguments): - - repository = repo_tool.load_repository( - os.path.join(parsed_arguments.path, REPO_DIR)) - consistent_snapshot = roledb.get_roleinfo('root', - repository._repository_name)['consistent_snapshot'] - - for keypath in parsed_arguments.sign: - - role_privatekey = import_privatekey_from_file(keypath) - - if parsed_arguments.role == 'targets': - repository.targets.load_signing_key(role_privatekey) - - elif parsed_arguments.role == 'root': - repository.root.load_signing_key(role_privatekey) - - elif parsed_arguments.role == 'snapshot': - repository.snapshot.load_signing_key(role_privatekey) - - elif parsed_arguments.role == 'timestamp': - repository.timestamp.load_signing_key(role_privatekey) - - else: - # TODO: repository_tool.py will be refactored to clean up the following - # code, which adds and signs for a non-existent role. - if not roledb.role_exists(parsed_arguments.role): - - # Load the private key keydb and set the roleinfo in roledb so that - # metadata can be written with repository.write(). - keydb.remove_key(role_privatekey['keyid'], - repository_name = repository._repository_name) - keydb.add_key( - role_privatekey, repository_name = repository._repository_name) - - # Set the delegated metadata file to expire in 3 months. - expiration = formats.unix_timestamp_to_datetime( - int(time.time() + 7889230)) - expiration = expiration.isoformat() + 'Z' - - roleinfo = {'name': parsed_arguments.role, - 'keyids': [role_privatekey['keyid']], - 'signing_keyids': [role_privatekey['keyid']], - 'partial_loaded': False, 'paths': {}, - 'signatures': [], 'version': 1, 'expires': expiration, - 'delegations': {'keys': {}, 'roles': []}} - - roledb.add_role(parsed_arguments.role, roleinfo, - repository_name=repository._repository_name) - - # Generate the Targets object of --role, and add it to the top-level - # 'targets' object. - new_targets_object = repo_tool.Targets(repository._targets_directory, - parsed_arguments.role, roleinfo, - repository_name=repository._repository_name) - repository.targets._delegated_roles[parsed_arguments.role] = new_targets_object - - else: - repository.targets(parsed_arguments.role).load_signing_key(role_privatekey) - - # Write the Targets metadata now that it's been modified. Once write() is - # called on a role, it is no longer considered "dirty" and the role will not - # be written again if another write() or writeall() were subsequently made. - repository.write(parsed_arguments.role, - consistent_snapshot=consistent_snapshot, increment_version_number=False) - - # Write the updated top-level roles, if any. Also write Snapshot and - # Timestamp to make a new release. Automatically making a new release can be - # disabled via --no_release. - if not parsed_arguments.no_release: - snapshot_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), - parsed_arguments.snapshot_pw) - timestamp_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) - - repository.snapshot.load_signing_key(snapshot_private) - repository.timestamp.load_signing_key(timestamp_private) - - repository.writeall(consistent_snapshot=consistent_snapshot) - - # Move staged metadata directory to "live" metadata directory. - write_to_live_repo(parsed_arguments) - - - -def clean_repo(parsed_arguments): - repo_dir = os.path.join(parsed_arguments.path, REPO_DIR) - client_dir = os.path.join(parsed_arguments.path, CLIENT_DIR) - keystore_dir = os.path.join(parsed_arguments.path, KEYSTORE_DIR) - - shutil.rmtree(repo_dir, ignore_errors=True) - shutil.rmtree(client_dir, ignore_errors=True) - shutil.rmtree(keystore_dir, ignore_errors=True) - - - -def write_to_live_repo(parsed_arguments): - staged_meta_directory = os.path.join( - parsed_arguments.path, REPO_DIR, STAGED_METADATA_DIR) - live_meta_directory = os.path.join( - parsed_arguments.path, REPO_DIR, METADATA_DIR) - - shutil.rmtree(live_meta_directory, ignore_errors=True) - shutil.copytree(staged_meta_directory, live_meta_directory) - - - -def add_target_to_repo(parsed_arguments, target_path, repo_targets_path, - repository, custom=None): - """ - (1) Copy 'target_path' to 'repo_targets_path'. - (2) Add 'target_path' to Targets metadata of 'repository'. - """ - - if custom is None: - custom = {} - - if not os.path.exists(target_path): - logger.debug(repr(target_path) + ' does not exist. Skipping.') - - else: - sslib_util.ensure_parent_dir(os.path.join(repo_targets_path, target_path)) - shutil.copy(target_path, os.path.join(repo_targets_path, target_path)) - - - roleinfo = roledb.get_roleinfo( - parsed_arguments.role, repository_name=repository._repository_name) - - # It is assumed we have a delegated role, and that the caller has made - # sure to reject top-level roles specified with --role. - if target_path not in roleinfo['paths']: - logger.debug('Adding new target: ' + repr(target_path)) - roleinfo['paths'].update({target_path: custom}) - - else: - logger.debug('Replacing target: ' + repr(target_path)) - roleinfo['paths'].update({target_path: custom}) - - roledb.update_roleinfo(parsed_arguments.role, roleinfo, - mark_role_as_dirty=True, repository_name=repository._repository_name) - - - -def remove_target_files_from_metadata(parsed_arguments, repository): - - if parsed_arguments.role in ('root', 'snapshot', 'timestamp'): - raise exceptions.Error( - 'Invalid rolename specified: ' + repr(parsed_arguments.role) + '.' - ' It must be "targets" or a delegated rolename.') - - else: - # NOTE: The following approach of using roledb to update the target - # files will be modified in the future when the repository tool's API is - # refactored. - roleinfo = roledb.get_roleinfo( - parsed_arguments.role, repository._repository_name) - - for glob_pattern in parsed_arguments.remove: - for path in list(roleinfo['paths'].keys()): - if fnmatch.fnmatch(path, glob_pattern): - del roleinfo['paths'][path] - - else: - logger.debug('Delegated path ' + repr(path) + ' does not match' - ' given path/glob pattern ' + repr(glob_pattern)) - continue - - roledb.update_roleinfo( - parsed_arguments.role, roleinfo, mark_role_as_dirty=True, - repository_name=repository._repository_name) - - - -def add_targets(parsed_arguments): - repo_targets_path = os.path.join(parsed_arguments.path, REPO_DIR, 'targets') - repository = repo_tool.load_repository( - os.path.join(parsed_arguments.path, REPO_DIR)) - - # Copy the target files in --path to the repo directory, and - # add them to Targets metadata. Make sure to also copy & add files - # in directories (and subdirectories, if --recursive is True). - for target_path in parsed_arguments.add: - if os.path.isdir(target_path): - for sub_target_path in repository.get_filepaths_in_directory( - target_path, parsed_arguments.recursive): - add_target_to_repo(parsed_arguments, sub_target_path, - repo_targets_path, repository) - - else: - add_target_to_repo(parsed_arguments, target_path, - repo_targets_path, repository) - - consistent_snapshot = roledb.get_roleinfo('root', - repository._repository_name)['consistent_snapshot'] - - if parsed_arguments.role == 'targets': - # Load the top-level, non-root, keys to make a new release. - targets_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, TARGETS_KEY_NAME), - parsed_arguments.targets_pw) - repository.targets.load_signing_key(targets_private) - - elif parsed_arguments.role not in ('root', 'snapshot', 'timestamp'): - repository.write(parsed_arguments.role, - consistent_snapshot=consistent_snapshot, increment_version_number=True) - return - - # Update the required top-level roles, Snapshot and Timestamp, to make a new - # release. Automatically making a new release can be disabled via - # --no_release. - if not parsed_arguments.no_release: - snapshot_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), - parsed_arguments.snapshot_pw) - timestamp_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) - - repository.snapshot.load_signing_key(snapshot_private) - repository.timestamp.load_signing_key(timestamp_private) - - repository.writeall(consistent_snapshot=consistent_snapshot) - - # Move staged metadata directory to "live" metadata directory. - write_to_live_repo(parsed_arguments) - - - -def remove_targets(parsed_arguments): - repository = repo_tool.load_repository( - os.path.join(parsed_arguments.path, REPO_DIR)) - - # Remove target files from the Targets metadata (or the role specified in - # --role) that match the glob patterns specified in --remove. - remove_target_files_from_metadata(parsed_arguments, repository) - - # Examples of how the --pw command-line option is interpreted: - # repo.py --init': parsed_arguments.pw = 'pw' - # repo.py --init --pw my_password: parsed_arguments.pw = 'my_password' - # repo.py --init --pw: The user is prompted for a password, as follows: - if not parsed_arguments.pw: - parsed_arguments.pw = sslib_interface.get_password( - prompt='Enter a password for the top-level role keys: ', confirm=True) - - targets_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, TARGETS_KEY_NAME), - parsed_arguments.targets_pw) - repository.targets.load_signing_key(targets_private) - - # Load the top-level keys for Snapshot and Timestamp to make a new release. - # Automatically making a new release can be disabled via --no_release. - if not parsed_arguments.no_release: - snapshot_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), - parsed_arguments.snapshot_pw) - timestamp_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) - - repository.snapshot.load_signing_key(snapshot_private) - repository.timestamp.load_signing_key(timestamp_private) - - consistent_snapshot = roledb.get_roleinfo('root', - repository._repository_name)['consistent_snapshot'] - repository.writeall(consistent_snapshot=consistent_snapshot) - - # Move staged metadata directory to "live" metadata directory. - write_to_live_repo(parsed_arguments) - - - -def init_repo(parsed_arguments): - """ - Create a repo at the specified location in --path (the current working - directory, by default). Each top-level role has one key, if --bare' is False - (default). - """ - - repo_path = os.path.join(parsed_arguments.path, REPO_DIR) - repository = repo_tool.create_new_repository(repo_path) - - if not parsed_arguments.bare: - set_top_level_keys(repository, parsed_arguments) - repository.writeall(consistent_snapshot=parsed_arguments.consistent) - - else: - repository.write( - 'root', consistent_snapshot=parsed_arguments.consistent) - repository.write('targets', consistent_snapshot=parsed_arguments.consistent) - repository.write('snapshot', consistent_snapshot=parsed_arguments.consistent) - repository.write('timestamp', consistent_snapshot=parsed_arguments.consistent) - - write_to_live_repo(parsed_arguments) - - # Create the client files. The client directory contains the required - # directory structure and metadata files for clients to successfully perform - # an update. - repo_tool.create_tuf_client_directory( - os.path.join(parsed_arguments.path, REPO_DIR), - os.path.join(parsed_arguments.path, CLIENT_DIR, REPO_DIR)) - - - -def set_top_level_keys(repository, parsed_arguments): - """ - Generate, write, and set the top-level keys. 'repository' is modified. - """ - - # Examples of how the --*_pw command-line options are interpreted: - # repo.py --init': parsed_arguments.*_pw = 'pw' - # repo.py --init --*_pw my_pw: parsed_arguments.*_pw = 'my_pw' - # repo.py --init --*_pw: The user is prompted for a password. - - sslib_interface._generate_and_write_ed25519_keypair( - password=parsed_arguments.root_pw, - filepath=os.path.join(parsed_arguments.path, KEYSTORE_DIR, ROOT_KEY_NAME), - prompt=(not parsed_arguments.root_pw)) - sslib_interface._generate_and_write_ed25519_keypair( - password=parsed_arguments.targets_pw, - filepath=os.path.join(parsed_arguments.path, KEYSTORE_DIR, TARGETS_KEY_NAME), - prompt=(not parsed_arguments.targets_pw)) - sslib_interface._generate_and_write_ed25519_keypair( - password=parsed_arguments.snapshot_pw, - filepath=os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), - prompt=(not parsed_arguments.snapshot_pw)) - sslib_interface._generate_and_write_ed25519_keypair( - password=parsed_arguments.timestamp_pw, - filepath=os.path.join(parsed_arguments.path, KEYSTORE_DIR, TIMESTAMP_KEY_NAME), - prompt=(not parsed_arguments.timestamp_pw)) - - # Import the private keys. They are needed to generate the signatures - # included in metadata. - root_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - ROOT_KEY_NAME), parsed_arguments.root_pw) - targets_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TARGETS_KEY_NAME), parsed_arguments.targets_pw) - snapshot_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - SNAPSHOT_KEY_NAME), parsed_arguments.snapshot_pw) - timestamp_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) - - # Import the public keys. They are needed so that metadata roles are - # assigned verification keys, which clients need in order to verify the - # signatures created by the corresponding private keys. - root_public = import_publickey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - ROOT_KEY_NAME) + '.pub') - targets_public = import_publickey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TARGETS_KEY_NAME) + '.pub') - snapshot_public = import_publickey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - SNAPSHOT_KEY_NAME) + '.pub') - timestamp_public = import_publickey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TIMESTAMP_KEY_NAME) + '.pub') - - # Add the verification keys to the top-level roles. - repository.root.add_verification_key(root_public) - repository.targets.add_verification_key(targets_public) - repository.snapshot.add_verification_key(snapshot_public) - repository.timestamp.add_verification_key(timestamp_public) - - # Load the previously imported signing keys for the top-level roles so that - # valid metadata can be written. - repository.root.load_signing_key(root_private) - repository.targets.load_signing_key(targets_private) - repository.snapshot.load_signing_key(snapshot_private) - repository.timestamp.load_signing_key(timestamp_private) - - - -def parse_arguments(): - """ - - Parse the command-line arguments. Also set the logging level, as specified - via the --verbose argument (2, by default). - - Example: - # Create a TUF repository in the current working directory. The - # top-level roles are created, each containing one key. - $ repo.py --init - - $ repo.py --init --bare --consistent --verbose 3 - - If a required argument is unset, a parser error is printed and the script - exits. - - - None. - - - None. - - - Sets the logging level for TUF logging. - - - A tuple ('options.REPOSITORY_PATH', command, command_arguments). 'command' - 'command_arguments' correspond to a repository tool fuction. - """ - - parser = argparse.ArgumentParser( - description='Create or modify a TUF repository.') - - parser.add_argument('-i', '--init', action='store_true', - help='Create a repository. The "tufrepo", "tufkeystore", and' - ' "tufclient" directories are created in the current working' - ' directory, unless --path is specified.') - - parser.add_argument('-p', '--path', nargs='?', default='.', - metavar='', help='Specify a repository path. If used' - ' with --init, the initialized repository is saved to the given' - ' path.') - - parser.add_argument('-b', '--bare', action='store_true', - help='If initializing a repository, neither create nor set keys' - ' for any of the top-level roles. False, by default.') - - parser.add_argument('--no_release', action='store_true', - help='Do not automatically sign Snapshot and Timestamp metadata.' - ' False, by default.') - - parser.add_argument('--consistent', action='store_true', - help='Set consistent snapshots for an initialized repository.' - ' Consistent snapshot is False by default.') - - parser.add_argument('-c', '--clean', type=str, nargs='?', const='.', - metavar='', help='Delete the repo files from the' - ' specified directory. If a directory is not specified, the current' - ' working directory is cleaned.') - - parser.add_argument('-a', '--add', type=str, nargs='+', - metavar='', help='Add one or more target files to the' - ' "targets" role (or the role specified in --role). If a directory' - ' is given, all files in the directory are added.') - - parser.add_argument('--remove', type=str, nargs='+', - metavar='', help='Remove one or more target files from the' - ' "targets" role (or the role specified in --role).') - - parser.add_argument('--role', nargs='?', type=str, const='targets', - default='targets', metavar='', help='Specify a rolename.' - ' The rolename "targets" is used by default.') - - parser.add_argument('-r', '--recursive', action='store_true', - help='By setting -r, any directory specified with --add is processed' - ' recursively. If unset, the default behavior is to not add target' - ' files in subdirectories.') - - parser.add_argument('-k', '--key', type=str, nargs='?', const=ED25519_KEYTYPE, - default=None, choices=[ECDSA_KEYTYPE, ED25519_KEYTYPE, RSA_KEYTYPE], - help='Generate an ECDSA, Ed25519, or RSA key. An Ed25519 key is' - ' created if the key type is unspecified.') - - parser.add_argument('--filename', nargs='?', default=None, const=None, - metavar='', help='Specify a filename. This option can' - ' be used to name a generated key file. The top-level keys should' - ' be named "root_key", "targets_key", "snapshot_key", "timestamp_key."') - - parser.add_argument('--trust', action='store_true', - help='Indicate the trusted key(s) (via --pubkeys) for the role in --role.' - ' This action modifies Root metadata with the trusted key(s).') - - parser.add_argument('--distrust', action='store_true', - help='Discontinue trust of key(s) (via --pubkeys) for the role in --role.' - ' This action modifies Root metadata by removing trusted key(s).') - - parser.add_argument('--sign', nargs='+', type=str, - metavar='', help='Sign the "targets"' - ' metadata (or the one for --role) with the specified key(s).') - - parser.add_argument('--pw', nargs='?', default='pw', metavar='', - help='Specify a password. "pw" is used if --pw is unset, or a' - ' password can be entered via a prompt by specifying --pw by itself.' - ' This option can be used with --sign and --key.') - - parser.add_argument('--root_pw', nargs='?', default='pw', metavar='', - help='Specify a Root password. "pw" is used if --pw is unset, or a' - ' password can be entered via a prompt by specifying --pw by itself.') - - parser.add_argument('--targets_pw', nargs='?', default='pw', metavar='', - help='Specify a Targets password. "pw" is used if --pw is unset, or a' - ' password can be entered via a prompt by specifying --pw by itself.') - - parser.add_argument('--snapshot_pw', nargs='?', default='pw', metavar='', - help='Specify a Snapshot password. "pw" is used if --pw is unset, or a' - ' password can be entered via a prompt by specifying --pw by itself.') - - parser.add_argument('--timestamp_pw', nargs='?', default='pw', metavar='', - help='Specify a Timestamp password. "pw" is used if --pw is unset, or a' - ' password can be entered via a prompt by specifying --pw by itself.') - - parser.add_argument('-d', '--delegate', type=str, nargs='+', - metavar='', help='Delegate trust of target files' - ' from the "targets" role (or --role) to some other role (--delegatee).' - ' The named delegatee is trusted to sign for the target files that' - ' match the glob pattern(s).') - - parser.add_argument('--delegatee', nargs='?', type=str, const=None, - default=None, metavar='', help='Specify the rolename' - ' of the delegated role. Can be used with --delegate.') - - parser.add_argument('-t', '--terminating', action='store_true', - help='Set the terminating flag to True. Can be used with --delegate.') - - parser.add_argument('--threshold', type=int, default=1, metavar='', - help='Set the threshold number of signatures' - ' needed to validate a metadata file. Can be used with --delegate.') - - parser.add_argument('--pubkeys', type=str, nargs='+', - metavar='', help='Specify one or more public keys' - ' for the delegated role. Can be used with --delegate.') - - parser.add_argument('--revoke', action='store_true', - help='Revoke trust of target files from a delegated role.') - - # Add the parser arguments supported by PROG_NAME. - parser.add_argument('-v', '--verbose', type=int, default=2, - choices=range(0, 6), help='Set the verbosity level of logging messages.' - ' The lower the setting, the greater the verbosity. Supported logging' - ' levels: 0=UNSET, 1=DEBUG, 2=INFO, 3=WARNING, 4=ERROR,' - ' 5=CRITICAL') - - # Should we include usage examples in the help output? - - parsed_args = parser.parse_args() - - # Set the logging level. - logging_levels = [logging.NOTSET, logging.DEBUG, - logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL] - - log.set_log_level(logging_levels[parsed_args.verbose]) - - return parsed_args - - - -if __name__ == '__main__': - - # Parse the arguments and set the logging level. - arguments = parse_arguments() - - # Create or modify the repository depending on the option specified on the - # command line. For example, the following adds the 'foo.bar.gz' to the - # default repository and updates the relevant metadata (i.e., Targets, - # Snapshot, and Timestamp metadata are updated): - # $ repo.py --add foo.bar.gz - - try: - process_command_line_arguments(arguments) - - except (exceptions.Error) as e: - sys.stderr.write('Error: ' + str(e) + '\n') - sys.exit(1) - - # Successfully created or updated the TUF repository. - sys.exit(0) diff --git a/tuf/settings.py b/tuf/settings.py deleted file mode 100755 index f07c4d961a..0000000000 --- a/tuf/settings.py +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - settings.py - - - Vladimir Diaz - - - January 11, 2017 - - - See LICENSE-MIT OR LICENSE for licensing information. - - - A central location for TUF configuration settings. Example options include - setting the destination of temporary files and downloaded content, the maximum - length of downloaded metadata (unknown file attributes), and download - behavior. -""" - - -# Set a directory that should be used for all temporary files. If this -# is None, then the system default will be used. The system default -# will also be used if a directory path set here is invalid or -# unusable. -temporary_directory = None - -# Set a local directory to store metadata that is requested from mirrors. This -# directory contains subdirectories for different repositories, where each -# subdirectory contains a different set of metadata. For example: -# tuf.settings.repositories_directory = /tmp/repositories. The root file for a -# repository named 'django_repo' can be found at: -# /tmp/repositories/django_repo/metadata/current/root.METADATA_EXTENSION -repositories_directory = None - -# The 'log.py' module manages TUF's logging system. Users have the option to -# enable/disable logging to a file via 'ENABLE_FILE_LOGGING', or -# tuf.log.enable_file_logging() and tuf.log.disable_file_logging(). -ENABLE_FILE_LOGGING = False - -# If file logging is enabled via 'ENABLE_FILE_LOGGING', TUF log messages will -# be saved to 'LOG_FILENAME' -LOG_FILENAME = 'tuf.log' - -# Since the timestamp role does not have signed metadata about itself, we set a -# default but sane upper bound for the number of bytes required to download it. -DEFAULT_TIMESTAMP_REQUIRED_LENGTH = 16384 #bytes - -# The Root role may be updated without knowing its version if top-level -# metadata cannot be safely downloaded (e.g., keys may have been revoked, thus -# requiring a new Root file that includes the updated keys). Set a default -# upper bound for the maximum total bytes that may be downloaded for Root -# metadata. -DEFAULT_ROOT_REQUIRED_LENGTH = 512000 #bytes - -# Set a default, but sane, upper bound for the number of bytes required to -# download Snapshot metadata. -DEFAULT_SNAPSHOT_REQUIRED_LENGTH = 2000000 #bytes - -# Set a default, but sane, upper bound for the number of bytes required to -# download Targets metadata. -DEFAULT_TARGETS_REQUIRED_LENGTH = 5000000 #bytes - -# Set a timeout value in seconds (float) for non-blocking socket operations. -SOCKET_TIMEOUT = 4 #seconds - -# The maximum chunk of data, in bytes, we would download in every round. -CHUNK_SIZE = 400000 #bytes - -# The minimum average download speed (bytes/second) that must be met to -# avoid being considered as a slow retrieval attack. -MIN_AVERAGE_DOWNLOAD_SPEED = 50 #bytes/second - -# By default, limit number of delegatees we visit for any target. -MAX_NUMBER_OF_DELEGATIONS = 2**5 - -# A setting for the instances where a default hashing algorithm is needed. -# This setting is currently used to calculate the path hash prefixes of hashed -# bin delegations, and digests of targets filepaths. The other instances -# (e.g., digest of files) that require a hashing algorithm rely on settings in -# the securesystemslib external library. -DEFAULT_HASH_ALGORITHM = 'sha256' - -# The hashing algorithms used to compute file hashes -FILE_HASH_ALGORITHMS = ['sha256', 'sha512'] - -# The client's update procedure (contained within a while-loop) can potentially -# hog the CPU. The following setting can be used to force the update sequence -# to suspend execution for a specified amount of time. See -# theupdateframework/tuf/issue#338. -SLEEP_BEFORE_ROUND = None - -# Maximum number of root metadata file rotations we should perform in order to -# prevent a denial-of-service (DoS) attack. -MAX_NUMBER_ROOT_ROTATIONS = 2**5 diff --git a/tuf/sig.py b/tuf/sig.py deleted file mode 100755 index 4e1f05fc2a..0000000000 --- a/tuf/sig.py +++ /dev/null @@ -1,395 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - sig.py - - - Vladimir Diaz - - - February 28, 2012. Based on a previous version by Geremy Condra. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Survivable key compromise is one feature of a secure update system - incorporated into TUF's design. Responsibility separation through - the use of multiple roles, multi-signature trust, and explicit and - implicit key revocation are some of the mechanisms employed towards - this goal of survivability. These mechanisms can all be seen in - play by the functions available in this module. - - The signed metadata files utilized by TUF to download target files - securely are used and represented here as the 'signable' object. - More precisely, the signature structures contained within these metadata - files are packaged into 'signable' dictionaries. This module makes it - possible to capture the states of these signatures by organizing the - keys into different categories. As keys are added and removed, the - system must securely and efficiently verify the status of these signatures. - For instance, a bunch of keys have recently expired. How many valid keys - are now available to the Snapshot role? This question can be answered by - get_signature_status(), which will return a full 'status report' of these - 'signable' dicts. This module also provides a convenient verify() function - that will determine if a role still has a sufficient number of valid keys. - If a caller needs to update the signatures of a 'signable' object, there - is also a function for that. -""" - -import logging - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import keys as sslib_keys - -from tuf import exceptions -from tuf import formats -from tuf import keydb -from tuf import roledb - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - -def get_signature_status(signable, role=None, repository_name='default', - threshold=None, keyids=None): - """ - - Return a dictionary representing the status of the signatures listed in - 'signable'. Signatures in the returned dictionary are identified by the - signature keyid and can have a status of either: - - * bad -- Invalid signature - * good -- Valid signature from key that is available in 'tuf.keydb', and is - authorized for the passed role as per 'roledb' (authorization may be - overwritten by passed 'keyids'). - * unknown -- Signature from key that is not available in 'tuf.keydb', or if - 'role' is None. - * unknown signing schemes -- Signature from key with unknown signing - scheme. - * untrusted -- Valid signature from key that is available in 'tuf.keydb', - but is not trusted for the passed role as per 'roledb' or the passed - 'keyids'. - - NOTE: The result may contain duplicate keyids or keyids that reference the - same key, if 'signable' lists multiple signatures from the same key. - - - signable: - A dictionary containing a list of signatures and a 'signed' identifier. - signable = {'signed': 'signer', - 'signatures': [{'keyid': keyid, - 'sig': sig}]} - - Conformant to tuf.formats.SIGNABLE_SCHEMA. - - role: - TUF role string (e.g. 'root', 'targets', 'snapshot' or timestamp). - - threshold: - Rather than reference the role's threshold as set in roledb, use - the given 'threshold' to calculate the signature status of 'signable'. - 'threshold' is an integer value that sets the role's threshold value, or - the minimum number of signatures needed for metadata to be considered - fully signed. - - keyids: - Similar to the 'threshold' argument, use the supplied list of 'keyids' - to calculate the signature status, instead of referencing the keyids - in roledb for 'role'. - - - securesystemslib.exceptions.FormatError, if 'signable' does not have the - correct format. - - tuf.exceptions.UnknownRoleError, if 'role' is not recognized. - - - None. - - - A dictionary representing the status of the signatures in 'signable'. - Conformant to tuf.formats.SIGNATURESTATUS_SCHEMA. - """ - - # Do the arguments have the correct format? This check will ensure that - # arguments have the appropriate number of objects and object types, and that - # all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if the check fails. - formats.SIGNABLE_SCHEMA.check_match(signable) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if role is not None: - formats.ROLENAME_SCHEMA.check_match(role) - - if threshold is not None: - formats.THRESHOLD_SCHEMA.check_match(threshold) - - if keyids is not None: - sslib_formats.KEYIDS_SCHEMA.check_match(keyids) - - # The signature status dictionary returned. - signature_status = {} - good_sigs = [] - bad_sigs = [] - unknown_sigs = [] - untrusted_sigs = [] - unknown_signing_schemes = [] - - # Extract the relevant fields from 'signable' that will allow us to identify - # the different classes of keys (i.e., good_sigs, bad_sigs, etc.). - signed = sslib_formats.encode_canonical(signable['signed']).encode('utf-8') - signatures = signable['signatures'] - - # Iterate the signatures and enumerate the signature_status fields. - # (i.e., good_sigs, bad_sigs, etc.). - for signature in signatures: - keyid = signature['keyid'] - - # Does the signature use an unrecognized key? - try: - key = keydb.get_key(keyid, repository_name) - - except exceptions.UnknownKeyError: - unknown_sigs.append(keyid) - continue - - # Does the signature use an unknown/unsupported signing scheme? - try: - valid_sig = sslib_keys.verify_signature(key, signature, signed) - - except sslib_exceptions.UnsupportedAlgorithmError: - unknown_signing_schemes.append(keyid) - continue - - # We are now dealing with either a trusted or untrusted key... - if valid_sig: - if role is not None: - - # Is this an unauthorized key? (a keyid associated with 'role') - # Note that if the role is not known, tuf.exceptions.UnknownRoleError - # is raised here. - if keyids is None: - keyids = roledb.get_role_keyids(role, repository_name) - - if keyid not in keyids: - untrusted_sigs.append(keyid) - continue - - # This is an unset role, thus an unknown signature. - else: - unknown_sigs.append(keyid) - continue - - # Identify good/authorized key. - good_sigs.append(keyid) - - else: - # This is a bad signature for a trusted key. - bad_sigs.append(keyid) - - # Retrieve the threshold value for 'role'. Raise - # tuf.exceptions.UnknownRoleError if we were given an invalid role. - if role is not None: - if threshold is None: - # Note that if the role is not known, tuf.exceptions.UnknownRoleError is - # raised here. - threshold = roledb.get_role_threshold( - role, repository_name=repository_name) - - else: - logger.debug('Not using roledb.py\'s threshold for ' + repr(role)) - - else: - threshold = 0 - - # Build the signature_status dict. - signature_status['threshold'] = threshold - signature_status['good_sigs'] = good_sigs - signature_status['bad_sigs'] = bad_sigs - signature_status['unknown_sigs'] = unknown_sigs - signature_status['untrusted_sigs'] = untrusted_sigs - signature_status['unknown_signing_schemes'] = unknown_signing_schemes - - return signature_status - - - - - -def verify(signable, role, repository_name='default', threshold=None, - keyids=None): - """ - - Verify that 'signable' has a valid threshold of authorized signatures - identified by unique keyids. The threshold and whether a keyid is - authorized is determined by querying the 'threshold' and 'keyids' info for - the passed 'role' in 'roledb'. Both values can be overwritten by - passing the 'threshold' or 'keyids' arguments. - - NOTE: - - Signatures with identical authorized keyids only count towards the - threshold once. - - Signatures with the same key only count toward the threshold once. - - - signable: - A dictionary containing a list of signatures and a 'signed' identifier - that conforms to SIGNABLE_SCHEMA, e.g.: - signable = {'signed':, 'signatures': [{'keyid':, 'method':, 'sig':}]} - - role: - TUF role string (e.g. 'root', 'targets', 'snapshot' or timestamp). - - threshold: - Rather than reference the role's threshold as set in roledb, use - the given 'threshold' to calculate the signature status of 'signable'. - 'threshold' is an integer value that sets the role's threshold value, or - the minimum number of signatures needed for metadata to be considered - fully signed. - - keyids: - Similar to the 'threshold' argument, use the supplied list of 'keyids' - to calculate the signature status, instead of referencing the keyids - in roledb for 'role'. - - - tuf.exceptions.UnknownRoleError, if 'role' is not recognized. - - securesystemslib.exceptions.FormatError, if 'signable' is not formatted - correctly. - - securesystemslib.exceptions.Error, if an invalid threshold is encountered. - - - tuf.sig.get_signature_status() called. Any exceptions thrown by - get_signature_status() will be caught here and re-raised. - - - Boolean. True if the number of good unique (by keyid) signatures >= the - role's threshold, False otherwise. - """ - - formats.SIGNABLE_SCHEMA.check_match(signable) - formats.ROLENAME_SCHEMA.check_match(role) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Retrieve the signature status. tuf.sig.get_signature_status() raises: - # tuf.exceptions.UnknownRoleError - # securesystemslib.exceptions.FormatError. 'threshold' and 'keyids' are also - # validated. - status = get_signature_status(signable, role, repository_name, threshold, keyids) - - # Retrieve the role's threshold and the authorized keys of 'status' - threshold = status['threshold'] - good_sigs = status['good_sigs'] - - # Does 'status' have the required threshold of signatures? - # First check for invalid threshold values before returning result. - # Note: get_signature_status() is expected to verify that 'threshold' is - # not None or <= 0. - if threshold is None or threshold <= 0: #pragma: no cover - raise sslib_exceptions.Error("Invalid threshold: " + repr(threshold)) - - unique_keys = set() - for keyid in good_sigs: - key = keydb.get_key(keyid, repository_name) - unique_keys.add(key['keyval']['public']) - - return len(unique_keys) >= threshold - - - - - -def may_need_new_keys(signature_status): - """ - - Return true iff downloading a new set of keys might tip this - signature status over to valid. This is determined by checking - if either the number of unknown or untrusted keys is > 0. - - - signature_status: - The dictionary returned by tuf.sig.get_signature_status(). - - - securesystemslib.exceptions.FormatError, if 'signature_status does not have - the correct format. - - - None. - - - Boolean. - """ - - # Does 'signature_status' have the correct format? - # This check will ensure 'signature_status' has the appropriate number - # of objects and object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - formats.SIGNATURESTATUS_SCHEMA.check_match(signature_status) - - unknown = signature_status['unknown_sigs'] - untrusted = signature_status['untrusted_sigs'] - - return len(unknown) or len(untrusted) - - - - - -def generate_rsa_signature(signed, rsakey_dict): - """ - - Generate a new signature dict presumably to be added to the 'signatures' - field of 'signable'. The 'signable' dict is of the form: - - {'signed': 'signer', - 'signatures': [{'keyid': keyid, - 'method': 'evp', - 'sig': sig}]} - - The 'signed' argument is needed here for the signing process. - The 'rsakey_dict' argument is used to generate 'keyid', 'method', and 'sig'. - - The caller should ensure the returned signature is not already in - 'signable'. - - - signed: - The data used by 'securesystemslib.keys.create_signature()' to generate - signatures. It is stored in the 'signed' field of 'signable'. - - rsakey_dict: - The RSA key, a 'securesystemslib.formats.RSAKEY_SCHEMA' dictionary. - Used here to produce 'keyid', 'method', and 'sig'. - - - securesystemslib.exceptions.FormatError, if 'rsakey_dict' does not have the - correct format. - - TypeError, if a private key is not defined for 'rsakey_dict'. - - - None. - - - Signature dictionary conformant to securesystemslib.formats.SIGNATURE_SCHEMA. - Has the form: - {'keyid': keyid, 'method': 'evp', 'sig': sig} - """ - - # We need 'signed' in canonical JSON format to generate - # the 'method' and 'sig' fields of the signature. - signed = sslib_formats.encode_canonical(signed).encode('utf-8') - - # Generate the RSA signature. - # Raises securesystemslib.exceptions.FormatError and TypeError. - signature = sslib_keys.create_signature(rsakey_dict, signed) - - return signature diff --git a/tuf/unittest_toolbox.py b/tuf/unittest_toolbox.py deleted file mode 100755 index ac1305918b..0000000000 --- a/tuf/unittest_toolbox.py +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - unittest_toolbox.py - - - Konstantin Andrianov. - - - March 26, 2012. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Provides an array of various methods for unit testing. Use it instead of - actual unittest module. This module builds on unittest module. - Specifically, Modified_TestCase is a derived class from unittest.TestCase. -""" - -import os -import shutil -import unittest -import tempfile -import random -import string - -from typing import Optional - -class Modified_TestCase(unittest.TestCase): - """ - - Provide additional test-setup methods to make testing - of module's methods-under-test as independent as possible. - - If you want to modify setUp()/tearDown() do: - class Your_Test_Class(modified_TestCase): - def setUp(): - your setup modification - your setup modification - ... - modified_TestCase.setUp(self) - - - make_temp_directory(self, directory=None): - Creates and returns an absolute path of a temporary directory. - - make_temp_file(self, suffix='.txt', directory=None): - Creates and returns an absolute path of an empty temp file. - - make_temp_data_file(self, suffix='', directory=None, data = junk_data): - Returns an absolute path of a temp file containing some data. - - random_path(self, length = 7): - Generate a 'random' path consisting of n-length strings of random chars. - - - Static Methods: - -------------- - Following methods are static because they technically don't operate - on any instances of the class, what they do is: they modify class variables - (dictionaries) that are shared among all instances of the class. So - it is possible to call them without instantiating the class. - - random_string(length=7): - Generate a 'length' long string of random characters. - """ - - - def setUp(self) -> None: - self._cleanup = [] - - - - def tearDown(self) -> None: - for cleanup_function in self._cleanup: - # Perform clean up by executing clean-up functions. - try: - # OSError will occur if the directory was already removed. - cleanup_function() - - except OSError: - pass - - - - def make_temp_directory(self, directory: Optional[str]=None) -> str: - """Creates and returns an absolute path of a directory.""" - - prefix = self.__class__.__name__+'_' - temp_directory = tempfile.mkdtemp(prefix=prefix, dir=directory) - - def _destroy_temp_directory(): - shutil.rmtree(temp_directory) - - self._cleanup.append(_destroy_temp_directory) - - return temp_directory - - - - def make_temp_file( - self,suffix: str='.txt', directory: Optional[str]=None - ) -> str: - """Creates and returns an absolute path of an empty file.""" - prefix='tmp_file_'+self.__class__.__name__+'_' - temp_file = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=directory) - def _destroy_temp_file(): - os.unlink(temp_file[1]) - self._cleanup.append(_destroy_temp_file) - return temp_file[1] - - - - def make_temp_data_file( - self, suffix: str='', directory: Optional[str]=None, data: str = 'junk data' - ) -> str: - """Returns an absolute path of a temp file containing data.""" - temp_file_path = self.make_temp_file(suffix=suffix, directory=directory) - temp_file = open(temp_file_path, 'wt', encoding='utf8') - temp_file.write(data) - temp_file.close() - return temp_file_path - - - - def random_path(self, length: int = 7) -> str: - """Generate a 'random' path consisting of random n-length strings.""" - - rand_path = '/' + self.random_string(length) - - for junk in range(2): - rand_path = os.path.join(rand_path, self.random_string(length)) - - return rand_path - - - - @staticmethod - def random_string(length: int=15) -> str: - """Generate a random string of specified length.""" - - rand_str = '' - for junk in range(length): - rand_str += random.SystemRandom().choice('abcdefABCDEF' + string.digits) - - return rand_str