Skip to content

Commit

Permalink
Merge pull request #4 from dr8co/dev
Browse files Browse the repository at this point in the history
Dev
  • Loading branch information
dr8co authored Nov 18, 2023
2 parents 3d98526 + ea7d1db commit 45ea078
Show file tree
Hide file tree
Showing 3 changed files with 136 additions and 68 deletions.
74 changes: 55 additions & 19 deletions buildscript.sh
Original file line number Diff line number Diff line change
@@ -1,30 +1,66 @@
#!/bin/env bash

set -e

# This script is used to build the project on the Ubuntu Jammy (22.04) distribution.
# It is not intended to be used on other distributions, and must be run from the project root.

# Root access is required to install the dependencies.
if [ "$EUID" -ne 0 ]; then
REQUIRED_PACKAGES=(cmake ninja-build gcc-13 g++-13 clang-17 lldb-17 lld-17 libc++-17-dev libc++abi-17-dev libomp-17-dev libgcrypt20 openssl libreadline8 libsodium23 libsodium-dev)
PARALLELISM_LEVEL=4

function check_root() {
# Root access is required to install the dependencies.
if [ "$EUID" -ne 0 ]; then
echo "Please run as root."
exit
fi
abort
fi
}

function check_dependencies() {
for cmd in wget add-apt-repository cmake; do
if ! command -v $cmd &>/dev/null; then
echo "$cmd could not be found"
exit
fi
done
}

function install_dependencies() {
wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc
add-apt-repository -y "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-17 main"
add-apt-repository -y ppa:ubuntu-toolchain-r/ppa
apt update

for package in "${REQUIRED_PACKAGES[@]}"; do
if ! dpkg -s "$package" >/dev/null 2>&1; then
apt install -y "$package"
else
echo "$package is already installed"
fi
done
}

# Run from this directory
cd "${0%/*}" || exit 1
function build_blake3() {
./install_blake3.sh clang-17
}

# Install the dependencies.
wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc
add-apt-repository -y "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-17 main"
add-apt-repository -y ppa:ubuntu-toolchain-r/ppa
apt update
apt install -y cmake ninja-build gcc-13 g++-13 clang-17 lldb-17 lld-17 libc++-17-dev \
libc++abi-17-dev libomp-17-dev libgcrypt20 openssl libreadline8 libsodium23 libsodium-dev
function configure_cmake() {
cmake -B build -DCMAKE_C_COMPILER=clang-17 -DCMAKE_CXX_COMPILER=clang++-17 -DCMAKE_BUILD_TYPE=Debug -G Ninja
}

# Build BLAKE3
./install_blake3.sh clang-17
function build_project() {
cmake --build build --config Debug -j "$PARALLELISM_LEVEL"
}

# Configure CMake
cmake -B build -DCMAKE_C_COMPILER=clang-17 -DCMAKE_CXX_COMPILER=clang++-17 -DCMAKE_BUILD_TYPE=Debug -G Ninja
main() {
trap "echo 'An unexpected error occurred. Program aborted.'" ERR
check_root
check_dependencies
cd "${0%/*}" || abort
install_dependencies
build_blake3
configure_cmake
build_project
}

# Build the project
cmake --build build --config Debug -j 4
main
128 changes: 80 additions & 48 deletions src/fileShredder/shredFiles.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include "../utils/utils.hpp"
#include "shredFiles.hpp"
#include<cstring>
#include <iostream>
#include <fstream>
#include <random>
Expand All @@ -26,6 +27,7 @@
#include <format>

namespace fs = std::filesystem;
constexpr std::streamoff BUFFER_SIZE = 4096;

/// \brief overwrites a file with random bytes.
/// \param file output file stream object.
Expand All @@ -38,19 +40,32 @@ void overwriteRandom(std::ofstream &file, const std::size_t fileSize, int nPasse
std::uniform_int_distribution<unsigned char> dist(0, 255);

for (int i = 0; i < nPasses; ++i) {
// seek to the beginning of the file
file.seekp(0, std::ios::beg);

// (Re)seed the Mersenne Twister engine in every pass
std::mt19937_64 gen(rd());

// seek to the beginning of the file
file.seekp(0, std::ios::beg);
std::vector<unsigned char> buffer(BUFFER_SIZE);

// Overwrite the file with random data
for (std::size_t pos = 0; pos < fileSize; ++pos) {
unsigned char randomByte = dist(gen);
file.write(reinterpret_cast<char *>(&randomByte), sizeof(decltype(randomByte)));
for (std::size_t pos = 0; pos < fileSize; pos += BUFFER_SIZE) {
// Generate a buffer filled with random data
for (auto &byte: buffer) {
byte = dist(gen);
}
// Adjust the buffer size for the last chunk of data, which may be smaller than the buffer size
if (pos + BUFFER_SIZE > fileSize) {
buffer.resize(fileSize - pos);
}

file.write(reinterpret_cast<char *>(buffer.data()), static_cast<std::streamsize>(buffer.size()));

if (!file) {
throw std::runtime_error("file write error");
}
}
}

}

/// \brief overwrites a file wih a constant byte.
Expand All @@ -59,12 +74,17 @@ void overwriteRandom(std::ofstream &file, const std::size_t fileSize, int nPasse
/// \param byte the byte to overwrite the file with.
/// \param fileSize the size of the file in bytes.
template<typename T>
void overwriteConstantByte(std::ofstream &file, T byte, const auto &fileSize) {
void overwriteConstantByte(std::ofstream &file, T &byte, const auto &fileSize) {
// seek to the beginning of the file
file.seekp(0, std::ios::beg);

for (std::streamoff pos = 0; pos < fileSize; ++pos) {
file.write(reinterpret_cast<char *>(&byte), sizeof(T));
std::vector<T> buffer(BUFFER_SIZE, byte);

for (std::streamoff pos = 0; pos < fileSize; pos += BUFFER_SIZE) {
if (pos + BUFFER_SIZE > fileSize) {
buffer.resize(fileSize - pos);
}
file.write(reinterpret_cast<char *>(buffer.data()), static_cast<std::streamsize>(buffer.size()));
}
}

Expand Down Expand Up @@ -130,53 +150,65 @@ inline void renameAndRemove(const std::string &filename, int numTimes = 1) {
if (ec) std::cerr << "Failed to delete " << filename << ": " << ec.message() << '\n';
}

/// \brief wipes the cluster tips of a file.
/// \param fileName the path to the file to be wiped.
inline void wipeClusterTips(const std::string &fileName) {
int fileDescriptor = open(fileName.c_str(), O_RDWR);
if (fileDescriptor == -1) {
perror("Failed to open file to wipe cluster tips:");
return;
/// \struct FileDescriptor
/// \brief Represents a file descriptor.
///
/// The FileDescriptor class provides a convenient way to manage a file descriptor. It automatically opens the file
/// with the specified filename upon initialization, and closes the file when the object is destroyed. If the file
/// open operation fails, a runtime_error exception is thrown.
struct FileDescriptor {
int fd{-1};

explicit FileDescriptor(const std::string &filename) : fd(open(filename.c_str(), O_RDWR)) {
if (fd == -1)
throw std::runtime_error("Failed to open file: " + filename + " (" + std::strerror(errno) + ")");
}
// Get the file stats
struct stat fileStat{};
if (fstat(fileDescriptor, &fileStat) == -1) {
perror("Failed to get file size:");
close(fileDescriptor);
return;

~FileDescriptor() {
if (fd != -1) close(fd);
}
// Get the block size of the filesystem
const auto blockSize = fileStat.st_blksize;
if (blockSize == 0) {
std::cerr << "Invalid block size for the filesystem." << std::endl;
close(fileDescriptor);
return;
};

/// \struct FileStatInfo
/// \brief Provides information about a file based on its file descriptor.
///
/// The FileStatInfo struct encapsulates the information obtained from the stat function
/// for a given file descriptor. It provides a simple way to access file attributes such as
/// file size, permissions, and timestamps.
struct FileStatInfo {
struct stat fileStat{};

explicit FileStatInfo(int &fileDescriptor) {
if (fstat(fileDescriptor, &fileStat) == -1)
throw std::runtime_error(std::format("Failed to get file size: ({})", std::strerror(errno)));
}
};

/// \brief wipes the cluster tips of a file.
/// \param fileName the path to the file to be wiped.
inline void wipeClusterTips(const std::string &fileName) {
FileDescriptor fileDescriptor(fileName);
FileStatInfo fileInformation(fileDescriptor.fd);

// Calculate the size of the cluster tip
auto clusterTipSize = blockSize - (fileStat.st_size % blockSize);
auto clusterTipSize = fileInformation.fileStat.st_blksize -
(fileInformation.fileStat.st_size % fileInformation.fileStat.st_blksize);

// If the cluster tip size is larger than the file size, set it to 0
if (clusterTipSize >= fileStat.st_size)
if (clusterTipSize >= fileInformation.fileStat.st_size) {
clusterTipSize = 0;
}

// Write zeros to the cluster tip
if (clusterTipSize > 0) {
off_t offset = lseek(fileDescriptor, 0, SEEK_END);
if (offset == -1) {
perror("Failed to seek to end of file:");
close(fileDescriptor);
return;
}
std::vector<char> zeroBuffer(clusterTipSize, 0);
std::size_t bytesWritten = write(fileDescriptor, zeroBuffer.data(), zeroBuffer.size());
if (bytesWritten == static_cast<std::size_t>(-1)) {
perror("Failed to write zeros:");
close(fileDescriptor);
return;
}
// Seek to the end of the file
if (lseek(fileDescriptor.fd, 0, SEEK_END) == -1) {
throw std::runtime_error(std::format("Failed to seek to end of file: ({})", std::strerror(errno)));
}

close(fileDescriptor);
// Write zeros to the cluster tip
std::vector<char> zeroBuffer(clusterTipSize, 0);
auto bytesWritten = write(fileDescriptor.fd, zeroBuffer.data(), zeroBuffer.size());
if (bytesWritten == static_cast<ssize_t>(-1)) {
throw std::runtime_error(std::format("Failed to write zeros: ({})", std::strerror(errno)));
}
}

/// \brief shreds a file by overwriting it with random bytes.
Expand Down Expand Up @@ -268,7 +300,7 @@ unsigned int {
Simple = 1 << 0, // Simple overwrite with random bytes
Dod5220 = 1 << 1, // DoD 5220.22-M Standard algorithm
Dod5220_7 = 1 << 2, // DoD 5220.22-M Standard algorithm with 7 passes
WipeClusterTips = 1 << 3 // Wiping of the cluster tips
WipeClusterTips = 1 << 3 // Wiping of the cluster tips
};

/// \brief Adds write and write permissions to a file, if the user has authority.
Expand Down
2 changes: 1 addition & 1 deletion src/utils/utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

namespace fs = std::filesystem;

const std::unordered_map<char, const char *const> COLOR = {
static const std::unordered_map<char, const char *const> COLOR = {
{'r', "\033[1;31m"}, // Red
{'g', "\033[1;32m"}, // Green
{'y', "\033[1;33m"}, // Yellow
Expand Down

0 comments on commit 45ea078

Please sign in to comment.