Merge branch 'main' into 'linkspec_refactor_v3'

# Conflicts:
#   crates/tor-netdir/semver.md
This commit is contained in:
Nick Mathewson 2022-08-10 14:48:57 +00:00
commit 36a28a8587
37 changed files with 532 additions and 136 deletions

33
Cargo.lock generated
View File

@ -617,6 +617,7 @@ checksum = "d1873270f8f7942c191139cb8a40fd228da6c3fd2fc376d7e92d47aa14aeb59e"
dependencies = [
"crypto-common",
"inout",
"zeroize",
]
[[package]]
@ -976,16 +977,6 @@ dependencies = [
"dirs-sys",
]
[[package]]
name = "dirs-next"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1"
dependencies = [
"cfg-if 1.0.0",
"dirs-sys-next",
]
[[package]]
name = "dirs-sys"
version = "0.3.7"
@ -997,17 +988,6 @@ dependencies = [
"winapi 0.3.9",
]
[[package]]
name = "dirs-sys-next"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d"
dependencies = [
"libc",
"redox_users",
"winapi 0.3.9",
]
[[package]]
name = "displaydoc"
version = "0.2.3"
@ -2977,12 +2957,12 @@ dependencies = [
]
[[package]]
name = "shellexpand-fork"
version = "2.1.1"
name = "shellexpand"
version = "2.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b9b96d72c00ea377d8dfe6177badd0a6f42b4cb3122479ffe44fee2532d85ee"
checksum = "7ccc8076840c4da029af4f87e4e8daeb0fca6b87bbb02e10cb60b791450e11e4"
dependencies = [
"dirs-next",
"dirs",
]
[[package]]
@ -3424,6 +3404,7 @@ dependencies = [
"thiserror",
"tor-error",
"tor-llcrypto",
"zeroize",
]
[[package]]
@ -3559,7 +3540,7 @@ dependencies = [
"serde",
"serde_ignored",
"serde_json",
"shellexpand-fork",
"shellexpand",
"tempfile",
"thiserror",
"toml",

View File

@ -25,7 +25,7 @@ walkdir = { version = "2", optional = true }
libc = "0.2"
once_cell = "1"
[target.'cfg(all(unix, not(target_os="ios")))'.dependencies]
[target.'cfg(all(unix, not(target_os="ios"), not(target_os="android")))'.dependencies]
users = "0.11"
[dev-dependencies]

View File

@ -181,7 +181,7 @@ impl<'a> super::Verifier<'a> {
// about a directory, the owner cah change the permissions and owner
// of anything in the directory.)
#[cfg(not(target_os = "ios"))]
#[cfg(all(not(target_os = "ios"), not(target_os = "android")))]
{
let uid = meta.uid();
if uid != 0 && Some(uid) != self.mistrust.trust_user {
@ -225,14 +225,23 @@ impl<'a> super::Verifier<'a> {
}
};
// If we trust the GID, then we allow even more bits to be set.
#[cfg(not(target_os = "ios"))]
#[cfg(all(not(target_os = "ios"), not(target_os = "android")))]
if self.mistrust.trust_group == Some(meta.gid()) {
forbidden_bits &= !0o070;
}
// rational: on iOS the platform already protect user data, and not setting this poses
// issue with the default application data folder.
#[cfg(target_os = "ios")]
// Both iOS and Android have some directory on the path for application data directory
// which is group writeable. However both system already offer some guarantees regarding
// application data being kept away from other apps.
//
// iOS: https://developer.apple.com/library/archive/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/FileSystemOverview/FileSystemOverview.html
// > For security purposes, an iOS apps interactions with the file system are limited
// to the directories inside the apps sandbox directory
//
// Android: https://developer.android.com/training/data-storage
// > App-specific storage: [...] Use the directories within internal storage to save
// sensitive information that other apps shouldn't access.
#[cfg(any(target_os = "ios", target_os = "android"))]
{
forbidden_bits &= !0o070;
}

View File

@ -283,7 +283,11 @@ mod dir;
mod disable;
mod err;
mod imp;
#[cfg(all(target_family = "unix", not(target_os = "ios")))]
#[cfg(all(
target_family = "unix",
not(target_os = "ios"),
not(target_os = "android")
))]
mod user;
#[cfg(test)]
@ -305,7 +309,11 @@ pub use err::Error;
/// A result type as returned by this crate
pub type Result<T> = std::result::Result<T, Error>;
#[cfg(all(target_family = "unix", not(target_os = "ios")))]
#[cfg(all(
target_family = "unix",
not(target_os = "ios"),
not(target_os = "android")
))]
pub use user::{TrustedGroup, TrustedUser};
/// Configuration for verifying that a file or directory is really "private".
@ -361,7 +369,11 @@ pub struct Mistrust {
status: disable::Status,
/// What user ID do we trust by default (if any?)
#[cfg(all(target_family = "unix", not(target_os = "ios")))]
#[cfg(all(
target_family = "unix",
not(target_os = "ios"),
not(target_os = "android")
))]
#[builder(
setter(into),
field(type = "TrustedUser", build = "self.trust_user.get_uid()?")
@ -369,7 +381,11 @@ pub struct Mistrust {
trust_user: Option<u32>,
/// What group ID do we trust by default (if any?)
#[cfg(all(target_family = "unix", not(target_os = "ios")))]
#[cfg(all(
target_family = "unix",
not(target_os = "ios"),
not(target_os = "android")
))]
#[builder(
setter(into),
field(type = "TrustedGroup", build = "self.trust_group.get_gid()?")
@ -400,7 +416,11 @@ impl MistrustBuilder {
/// trusted.
///
/// This option disables the default group-trust behavior as well.
#[cfg(all(target_family = "unix", not(target_os = "ios")))]
#[cfg(all(
target_family = "unix",
not(target_os = "ios"),
not(target_os = "android")
))]
pub fn trust_admin_only(&mut self) -> &mut Self {
self.trust_user = TrustedUser::None;
self.trust_group = TrustedGroup::None;
@ -415,7 +435,11 @@ impl MistrustBuilder {
/// With this option set, no group is trusted, and and any group-readable or
/// group-writable objects are treated the same as world-readable and
/// world-writable objects respectively.
#[cfg(all(target_family = "unix", not(target_os = "ios")))]
#[cfg(all(
target_family = "unix",
not(target_os = "ios"),
not(target_os = "android")
))]
pub fn trust_no_group_id(&mut self) -> &mut Self {
self.trust_group = TrustedGroup::None;
self

View File

@ -21,6 +21,7 @@ signature = "1"
thiserror = "1"
tor-error = { path = "../tor-error", version = "0.3.2" }
tor-llcrypto = { path = "../tor-llcrypto", version = "0.3.3" }
zeroize = "1"
[dev-dependencies]
hex-literal = "0.3"

View File

@ -0,0 +1,2 @@
MODIFIED: New SecretBuf API.

View File

@ -86,10 +86,12 @@
mod err;
mod impls;
mod reader;
mod secretbuf;
mod writer;
pub use err::{EncodeError, Error};
pub use reader::Reader;
pub use secretbuf::SecretBuf;
pub use writer::Writer;
use arrayref::array_ref;

View File

@ -0,0 +1,123 @@
//! Define a wrapper for Vec<u8> that will act as Writer, but zeroize its
//! contents on drop or reallocation.
use crate::Writer;
use zeroize::{Zeroize, ZeroizeOnDrop};
/// A [`Writer`] used for accumulating secret data, which gets cleared on drop.
///
/// Unlike `Zeroizing<Vec<u8>>`, this type makes sure that we always zeroize the
/// contents of the buffer, even if the buffer has to be reallocated in order to
/// grow.
///
/// We use this for cases when we're building the input to a key derivation
/// function (KDF), and want to ensure that we don't expose the values we feed
/// to it.
///
/// This struct is expected to have additional overhead beyond `Vec<u8>` only
/// when it has to grow its capacity.
#[derive(Zeroize, ZeroizeOnDrop, Debug, Clone, Eq, PartialEq)]
pub struct SecretBuf(Vec<u8>);
/// The default size of our buffer.
///
/// This is based on the size of a typical secret input in `tor-proto`.
const DEFAULT_CAPACITY: usize = 384;
impl SecretBuf {
/// Construct a new empty [`SecretBuf`]
pub fn new() -> Self {
Self::with_capacity(DEFAULT_CAPACITY)
}
/// Construct a new empty [`SecretBuf`] with a specified capacity.
///
/// This buffer will not have to be reallocated until it uses `capacity`
/// bytes.
pub fn with_capacity(capacity: usize) -> Self {
Self(Vec::with_capacity(capacity))
}
/// Truncate this buffer to a given length.
pub fn truncate(&mut self, new_len: usize) {
self.0.truncate(new_len);
}
/// Add all the bytes from `slice` to the end of this vector.
pub fn extend_from_slice(&mut self, slice: &[u8]) {
let new_len = self.0.len() + slice.len();
if new_len >= self.0.capacity() {
// We will need to reallocate. But in doing so we might reallocate,
// which neglects to zero the previous contents. So instead,
// explicitly make a new vector and zeroize the old one.
// Make sure we always at least double our capacity.
let new_capacity = std::cmp::max(self.0.capacity() * 2, new_len);
let mut new_vec = Vec::with_capacity(new_capacity);
new_vec.extend_from_slice(&self.0[..]);
let mut old_vec = std::mem::replace(&mut self.0, new_vec);
old_vec.zeroize();
}
self.0.extend_from_slice(slice);
debug_assert_eq!(self.0.len(), new_len);
}
}
impl From<Vec<u8>> for SecretBuf {
fn from(v: Vec<u8>) -> Self {
Self(v)
}
}
impl Default for SecretBuf {
fn default() -> Self {
Self::new()
}
}
impl AsMut<[u8]> for SecretBuf {
fn as_mut(&mut self) -> &mut [u8] {
&mut self.0[..]
}
}
// It's okay to implement `Deref` since all operations taking an _immutable_
// reference are still right here.
impl std::ops::Deref for SecretBuf {
type Target = Vec<u8>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Writer for SecretBuf {
fn write_all(&mut self, b: &[u8]) {
self.extend_from_slice(b);
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn simple_case() -> crate::EncodeResult<()> {
// Sadly, there is no way in safe rust to test that the zeroization
// actually happened. All we can test is that the data is correct.
let mut buf1 = SecretBuf::default();
let mut buf2 = Vec::new();
let xyz = b"Nine hundred pounds of sifted flax";
// This is enough to be sure that we'll reallocate.
for _ in 0..200 {
buf1.write(xyz)?;
buf2.write(xyz)?;
}
assert_eq!(&buf1[..], &buf2[..]);
Ok(())
}
}

View File

@ -66,9 +66,12 @@ pub enum RelayMsg {
#[cfg(feature = "experimental-udp")]
Datagram(udp::Datagram),
// No hs for now.
#[cfg(feature = "onion-service")]
/// Establish Introduction
#[cfg(feature = "onion-service")]
EstablishIntro(onion_service::EstablishIntro),
/// Establish Rendezvous
#[cfg(feature = "onion-service")]
EstablishRendezvous(onion_service::EstablishRendezvous),
/// An unrecognized command.
Unrecognized(Unrecognized),
@ -118,6 +121,8 @@ impl RelayMsg {
Datagram(_) => RelayCmd::DATAGRAM,
#[cfg(feature = "onion-service")]
EstablishIntro(_) => RelayCmd::ESTABLISH_INTRO,
#[cfg(feature = "onion-service")]
EstablishRendezvous(_) => RelayCmd::ESTABLISH_RENDEZVOUS,
Unrecognized(u) => u.cmd(),
}
}
@ -151,6 +156,10 @@ impl RelayMsg {
RelayCmd::ESTABLISH_INTRO => {
RelayMsg::EstablishIntro(onion_service::EstablishIntro::decode_from_reader(r)?)
}
#[cfg(feature = "onion-service")]
RelayCmd::ESTABLISH_RENDEZVOUS => RelayMsg::EstablishRendezvous(
onion_service::EstablishRendezvous::decode_from_reader(r)?,
),
_ => RelayMsg::Unrecognized(Unrecognized::decode_with_cmd(c, r)?),
})
}
@ -181,6 +190,8 @@ impl RelayMsg {
Datagram(b) => b.encode_onto(w),
#[cfg(feature = "onion-service")]
EstablishIntro(b) => b.encode_onto(w),
#[cfg(feature = "onion-service")]
EstablishRendezvous(b) => b.encode_onto(w),
Unrecognized(b) => b.encode_onto(w),
}
}

View File

@ -71,3 +71,33 @@ impl msg::Body for EstablishIntro {
Ok(())
}
}
/// A message sent from client to rendezvous point.
#[derive(Debug, Clone)]
pub struct EstablishRendezvous {
/// A rendezvous cookie is an arbitrary 20-byte value,
/// chosen randomly by the client.
cookie: [u8; EstablishRendezvous::COOKIE_LEN],
}
impl EstablishRendezvous {
/// The only acceptable length of a rendezvous cookie.
pub const COOKIE_LEN: usize = 20;
/// Construct a new establish rendezvous cell.
pub fn new(cookie: [u8; Self::COOKIE_LEN]) -> Self {
Self { cookie }
}
}
impl msg::Body for EstablishRendezvous {
fn into_message(self) -> msg::RelayMsg {
msg::RelayMsg::EstablishRendezvous(self)
}
fn decode_from_reader(r: &mut Reader<'_>) -> Result<Self> {
let cookie = r.extract()?;
r.take_rest();
Ok(Self { cookie })
}
fn encode_onto(self, w: &mut Vec<u8>) -> EncodeResult<()> {
w.write(&self.cookie)
}
}

View File

@ -12,6 +12,8 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use hex_literal::hex;
#[cfg(feature = "onion-service")]
use tor_cell::relaycell::onion_service;
#[cfg(feature = "experimental-udp")]
use tor_cell::relaycell::udp;
@ -615,6 +617,42 @@ fn test_connected_udp() {
);
}
#[cfg(feature = "onion-service")]
#[test]
fn test_establish_rendezvous() {
let cmd = RelayCmd::ESTABLISH_RENDEZVOUS;
assert_eq!(Into::<u8>::into(cmd), 33_u8);
// Valid cookie length
let cookie = [1; 20];
msg(
cmd,
// 20 ones
"0101010101010101010101010101010101010101",
&onion_service::EstablishRendezvous::new(cookie).into(),
);
// Extra bytes are ignored
// 21 ones
let body = "010101010101010101010101010101010101010101";
let actual_msg = decode(cmd, &unhex(body)[..]).unwrap();
let mut actual_bytes = vec![];
actual_msg
.encode_onto(&mut actual_bytes)
.expect("Encode msg onto byte vector");
let expected_bytes = vec![1; 20];
assert_eq!(actual_bytes, expected_bytes);
// Invalid cookie length
// 19 ones
let body = "01010101010101010101010101010101010101";
assert_eq!(
decode(cmd, &unhex(body)[..]).unwrap_err(),
BytesError::Truncated,
);
}
// TODO: need to add tests for:
// - unrecognized
// - data

View File

@ -27,7 +27,7 @@ paste = "1"
regex = { version = "1", default-features = false, features = ["std"] }
serde = { version = "1.0.103", features = ["derive"] }
serde_ignored = "0.1.3"
shellexpand = { version = "2.1", package = "shellexpand-fork", optional = true }
shellexpand = { version = "2.1.2", optional = true }
thiserror = "1"
toml = "0.5"
tor-basic-utils = { path = "../tor-basic-utils", version = "0.3.3" }

View File

@ -140,7 +140,7 @@ where
let d1 = hex::decode(elts[1])?;
let d2 = hex::decode(elts[2])?;
match (d1.try_into(), d2.try_into()) {
(Ok(a), Ok(b)) => (Ok((a, b))),
(Ok(a), Ok(b)) => Ok((a, b)),
_ => Err(Error::BadDiff("wrong digest lengths on 'hash' line")),
}
}

View File

@ -0,0 +1 @@
BREAKING: Use new version of NetDirProvider.

View File

@ -90,6 +90,7 @@ use scopeguard::ScopeGuard;
use tor_circmgr::CircMgr;
use tor_dirclient::SourceInfo;
use tor_error::into_internal;
use tor_netdir::params::NetParameters;
use tor_netdir::{DirEvent, MdReceiver, NetDir, NetDirProvider};
use async_trait::async_trait;
@ -177,6 +178,30 @@ impl<R: Runtime> NetDirProvider for DirMgr<R> {
fn events(&self) -> BoxStream<'static, DirEvent> {
Box::pin(self.events.subscribe())
}
fn params(&self) -> Arc<dyn AsRef<tor_netdir::params::NetParameters>> {
if let Some(netdir) = self.netdir.get() {
// We have a directory, so we'd like to give it out for its
// parameters.
//
// We do this even if the directory is expired, since parameters
// don't really expire on any plausible timescale.
netdir
} else {
// We have no directory, so we'll give out the default parameters as
// modified by the provided override_net_params configuration.
//
self.default_parameters
.lock()
.expect("Poisoned lock")
.clone()
}
// TODO(nickm): If we felt extremely clever, we could add a third case
// where, if we have a pending directory with a validated consensus, we
// give out that consensus's network parameters even if we _don't_ yet
// have a full directory. That's significant refactoring, though, for
// an unclear amount of benefit.
}
}
#[async_trait]
@ -232,6 +257,9 @@ pub struct DirMgr<R: Runtime> {
// gets wrapped in an Arc)
netdir: Arc<SharedMutArc<NetDir>>,
/// A set of network parameters to hand out when we have no directory.
default_parameters: Mutex<Arc<NetParameters>>,
/// A publisher handle that we notify whenever the consensus changes.
events: event::FlagPublisher<DirEvent>,
@ -721,6 +749,11 @@ impl<R: Runtime> DirMgr<R> {
netdir.replace_overridden_parameters(&new_config.override_net_params);
Ok(())
});
{
let mut params = self.default_parameters.lock().expect("lock failed");
*params = Arc::new(NetParameters::from_map(&new_config.override_net_params));
}
// (It's okay to ignore the error, since it just means that there
// was no current netdir.)
self.events.publish(DirEvent::NewConsensus);
@ -810,6 +843,8 @@ impl<R: Runtime> DirMgr<R> {
let store = Mutex::new(config.open_store(offline)?);
let netdir = Arc::new(SharedMutArc::new());
let events = event::FlagPublisher::new();
let default_parameters = NetParameters::from_map(&config.override_net_params);
let default_parameters = Mutex::new(Arc::new(default_parameters));
let (send_status, receive_status) = postage::watch::channel();
let send_status = Mutex::new(send_status);
@ -827,6 +862,7 @@ impl<R: Runtime> DirMgr<R> {
config: config.into(),
store,
netdir,
default_parameters,
events,
send_status,
receive_status,

View File

@ -183,9 +183,21 @@ impl From<Vec<u8>> for InputString {
/// Configuration of expiration of each element of a [`Store`].
pub(crate) struct ExpirationConfig {
/// How long to keep expired router descriptors.
/// How long to keep router descriptors.
///
/// This timeout is measured since the publication date of the router
/// descriptor.
///
/// TODO(nickm): We may want a better approach in the future; see notes in
/// `EXPIRATION_DEFAULTS`.
pub(super) router_descs: Duration,
/// How long to keep expired microdescriptors descriptors.
/// How long to keep unlisted microdescriptors.
///
/// This timeout counts the amount of time since a microdescriptor is no
/// longer listed in a live consensus. Shorter values save storage at the
/// expense of extra bandwidth spent re-downloading microdescriptors; higher
/// values save bandwidth at the expense of storage used to store old
/// microdescriptors that might become listed again.
pub(super) microdescs: Duration,
/// How long to keep expired authority certificate.
pub(super) authcerts: Duration,
@ -196,10 +208,21 @@ pub(crate) struct ExpirationConfig {
/// Configuration of expiration shared between [`Store`] implementations.
pub(crate) const EXPIRATION_DEFAULTS: ExpirationConfig = {
ExpirationConfig {
// TODO: Choose a more realistic time.
router_descs: Duration::days(3 * 30),
// TODO: Choose a more realistic time.
microdescs: Duration::days(3 * 30),
// TODO: This is the value that C Tor uses here, but it may be desirable
// to adjust it depending on what we find in practice. For relays,
// instead of looking at publication date, we might want to use an
// approach more similar to the "last-listed" approach taken by
// microdescriptors. For bridges, we can keep descriptors for a longer
// time. In either case, we may be able to discard all but the most
// recent descriptor from each identity.
router_descs: Duration::days(5),
// This value is a compromise between saving bandwidth (by not having to
// re-download microdescs) and saving space (by not having to store too
// many microdescs). It's the same one that C tor uses; experiments on
// 2022 data suggest that it winds up using only 1% more microdesc dl
// bandwidth than strictly necessary, at the cost of storing 40% more
// microdescriptors than will be immediately useful at any given time.
microdescs: Duration::days(7),
authcerts: Duration::ZERO,
consensuses: Duration::days(2),
}

View File

@ -111,7 +111,7 @@ pub(crate) fn default_fallbacks() -> Vec<FallbackDirBuilder> {
bld
}
include!("fallback_dirs.inc")
include!("../data/fallback_dirs.rs")
}
impl tor_linkspec::HasAddrs for FallbackDir {

View File

@ -23,11 +23,11 @@ relay = []
hsv3-client = []
[dependencies]
aes = { version = "0.8" }
aes = { version = "0.8", features = ["zeroize"] }
arrayref = "0.3"
base64 = "0.13.0"
cipher = { version = "0.4.3", optional = true }
ctr = "0.9"
cipher = { version = "0.4.3", optional = true, features = ["zeroize"] }
ctr = { version = "0.9", features = ["zeroize"] }
curve25519-dalek = "3.2"
digest = "0.10.0"
ed25519-dalek = { version = "1", features = ["batch"] }

View File

@ -28,11 +28,13 @@ pub mod aes {
use cipher::{InnerIvInit, IvSizeUser, StreamCipher, StreamCipherError};
use digest::crypto_common::{InnerUser, KeyInit, KeySizeUser};
use openssl::symm::{Cipher, Crypter, Mode};
use zeroize::{Zeroize, ZeroizeOnDrop};
/// AES 128 in counter mode as used by Tor.
pub struct Aes128Ctr(Crypter);
/// AES 128 key
#[derive(Zeroize, ZeroizeOnDrop)]
pub struct Aes128Key([u8; 16]);
impl KeySizeUser for Aes128Key {
@ -58,6 +60,7 @@ pub mod aes {
&mut self,
mut buf: InOutBuf<'_, '_, u8>,
) -> Result<(), StreamCipherError> {
// TODO(nickm): It would be lovely if we could get rid of this copy somehow.
let in_buf = buf.get_in().to_vec();
self.0
.update(&in_buf, buf.get_out())
@ -78,6 +81,7 @@ pub mod aes {
pub struct Aes256Ctr(Crypter);
/// AES 256 key
#[derive(Zeroize, ZeroizeOnDrop)]
pub struct Aes256Key([u8; 32]);
impl KeySizeUser for Aes256Key {
@ -103,6 +107,7 @@ pub mod aes {
&mut self,
mut buf: InOutBuf<'_, '_, u8>,
) -> Result<(), StreamCipherError> {
// TODO(nickm): It would be lovely if we could get rid of this copy.
let in_buf = buf.get_in().to_vec();
self.0
.update(&in_buf, buf.get_out())

View File

@ -19,7 +19,6 @@ use arrayref::array_ref;
use rsa::pkcs1::{DecodeRsaPrivateKey, DecodeRsaPublicKey};
use std::fmt;
use subtle::{Choice, ConstantTimeEq};
use zeroize::Zeroize;
/// How many bytes are in an "RSA ID"? (This is a legacy tor
/// concept, and refers to identifying a relay by a SHA1 digest
@ -32,7 +31,7 @@ pub const RSA_ID_LEN: usize = 20;
/// Note that for modern purposes, you should almost always identify a
/// relay by its [`Ed25519Identity`](crate::pk::ed25519::Ed25519Identity)
/// instead of by this kind of identity key.
#[derive(Clone, Copy, Hash, Zeroize, Ord, PartialOrd)]
#[derive(Clone, Copy, Hash, Ord, PartialOrd)]
#[allow(clippy::derive_hash_xor_eq)]
pub struct RsaIdentity {
/// SHA1 digest of a DER encoded public key.

View File

@ -1,3 +1,4 @@
BREAKING: new params() function required in NetDirProvider trait.
BREAKING: Relay implements the new versions of the tor-linkspec traits.
BREAKING: Accessors now expect RelayIdRef and related types.
BREAKING: Remove various unused accessors.

View File

@ -339,6 +339,11 @@ pub trait NetDirProvider: UpcastArcNetDirProvider + Send + Sync {
/// this stream yields an event, all you can assume is that the event has
/// occurred at least once.
fn events(&self) -> BoxStream<'static, DirEvent>;
/// Return the latest network parameters.
///
/// If we have no directory, return a reasonable set of defaults.
fn params(&self) -> Arc<dyn AsRef<NetParameters>>;
}
impl<T> NetDirProvider for Arc<T>
@ -356,6 +361,10 @@ where
fn events(&self) -> BoxStream<'static, DirEvent> {
self.deref().events()
}
fn params(&self) -> Arc<dyn AsRef<NetParameters>> {
self.deref().params()
}
}
/// Helper trait: allows any `Arc<X>` to be upcast to a `Arc<dyn
@ -384,6 +393,12 @@ where
}
}
impl AsRef<NetParameters> for NetDir {
fn as_ref(&self) -> &NetParameters {
self.params()
}
}
/// A partially build NetDir -- it can't be unwrapped until it has
/// enough information to build safe paths.
#[derive(Debug, Clone)]

View File

@ -314,6 +314,14 @@ impl Default for NetParameters {
}
}
// This impl is a bit silly, but it makes the `params` method on NetDirProvider
// work out.
impl AsRef<NetParameters> for NetParameters {
fn as_ref(&self) -> &NetParameters {
self
}
}
impl NetParameters {
/// Construct a new NetParameters from a given list of key=value parameters.
///

View File

@ -23,7 +23,7 @@ tokio = ["tokio-crate", "tokio-util"]
arrayref = "0.3"
asynchronous-codec = "0.6.0"
bytes = "1"
cipher = "0.4.1"
cipher = { version = "0.4.1", features = ["zeroize"] }
coarsetime = "0.1.20"
derive_builder = { version = "0.11.2", package = "derive_builder_fork_arti" }
digest = "0.10.0"

View File

@ -48,7 +48,7 @@ pub(crate) trait CryptInit: Sized {
/// Initialize this object from a key generator.
fn construct<K: super::handshake::KeyGenerator>(keygen: K) -> Result<Self> {
let seed = keygen.expand(Self::seed_len())?;
Self::initialize(&seed)
Self::initialize(&seed[..])
}
}
@ -393,9 +393,9 @@ pub(crate) mod tor1 {
mod test {
#![allow(clippy::unwrap_used)]
use super::*;
use crate::SecretBytes;
use rand::RngCore;
use tor_basic_utils::test_rng::testing_rng;
use tor_bytes::SecretBuf;
fn add_layers(
cc_out: &mut OutboundClientCrypt,
@ -411,10 +411,8 @@ mod test {
fn roundtrip() {
// Take canned keys and make sure we can do crypto correctly.
use crate::crypto::handshake::ShakeKeyGenerator as KGen;
fn s(seed: &[u8]) -> SecretBytes {
let mut s: SecretBytes = SecretBytes::new(Vec::new());
s.extend(seed);
s
fn s(seed: &[u8]) -> SecretBuf {
seed.to_vec().into()
}
let seed1 = s(b"hidden we are free");
@ -503,7 +501,7 @@ mod test {
const SEED: &[u8;108] = b"'You mean to tell me that there's a version of Sha-3 with no limit on the output length?', said Tom shakily.";
// These test vectors were generated from Tor.
let data: &[(usize, &str)] = &include!("../../testdata/cell_crypt.data");
let data: &[(usize, &str)] = &include!("../../testdata/cell_crypt.rs");
let mut cc_out = OutboundClientCrypt::new();
let mut cc_in = InboundClientCrypt::new();

View File

@ -17,9 +17,10 @@ pub(crate) mod ntor;
#[cfg(feature = "ntor_v3")]
pub(crate) mod ntor_v3;
use crate::{Result, SecretBytes};
use crate::Result;
//use zeroize::Zeroizing;
use rand_core::{CryptoRng, RngCore};
use tor_bytes::SecretBuf;
/// A ClientHandshake is used to generate a client onionskin and
/// handle a relay onionskin.
@ -73,7 +74,7 @@ pub(crate) trait ServerHandshake {
/// It can only be used once.
pub(crate) trait KeyGenerator {
/// Consume the key
fn expand(self, keylen: usize) -> Result<SecretBytes>;
fn expand(self, keylen: usize) -> Result<SecretBuf>;
}
/// Generates keys based on the KDF-TOR function.
@ -81,18 +82,18 @@ pub(crate) trait KeyGenerator {
/// This is deprecated and shouldn't be used for new keys.
pub(crate) struct TapKeyGenerator {
/// Seed for the TAP KDF.
seed: SecretBytes,
seed: SecretBuf,
}
impl TapKeyGenerator {
/// Create a key generator based on a provided seed
pub(crate) fn new(seed: SecretBytes) -> Self {
pub(crate) fn new(seed: SecretBuf) -> Self {
TapKeyGenerator { seed }
}
}
impl KeyGenerator for TapKeyGenerator {
fn expand(self, keylen: usize) -> Result<SecretBytes> {
fn expand(self, keylen: usize) -> Result<SecretBuf> {
use crate::crypto::ll::kdf::{Kdf, LegacyKdf};
LegacyKdf::new(1).derive(&self.seed[..], keylen)
}
@ -101,19 +102,19 @@ impl KeyGenerator for TapKeyGenerator {
/// Generates keys based on SHAKE-256.
pub(crate) struct ShakeKeyGenerator {
/// Seed for the key generator
seed: SecretBytes,
seed: SecretBuf,
}
impl ShakeKeyGenerator {
/// Create a key generator based on a provided seed
#[allow(dead_code)] // We'll construct these for v3 onion services
pub(crate) fn new(seed: SecretBytes) -> Self {
pub(crate) fn new(seed: SecretBuf) -> Self {
ShakeKeyGenerator { seed }
}
}
impl KeyGenerator for ShakeKeyGenerator {
fn expand(self, keylen: usize) -> Result<SecretBytes> {
fn expand(self, keylen: usize) -> Result<SecretBuf> {
use crate::crypto::ll::kdf::{Kdf, ShakeKdf};
ShakeKdf::new().derive(&self.seed[..], keylen)
}

View File

@ -7,6 +7,7 @@ use crate::util::ct::bytes_eq;
use crate::{Error, Result};
use rand::{CryptoRng, RngCore};
use tor_bytes::SecretBuf;
use tor_error::into_internal;
/// Number of bytes used for a "CREATE_FAST" handshake by the initiator.
@ -22,6 +23,9 @@ pub(crate) struct CreateFastClientState([u8; FAST_C_HANDSHAKE_LEN]);
/// See module documentation; you probably don't want to use this.
pub(crate) struct CreateFastClient;
/// How many bytes does this handshake use for its input seed?
const SECRET_INPUT_LEN: usize = 40;
impl super::ClientHandshake for CreateFastClient {
type KeyType = ();
type StateType = CreateFastClientState;
@ -41,9 +45,13 @@ impl super::ClientHandshake for CreateFastClient {
if msg.len() != FAST_S_HANDSHAKE_LEN {
return Err(Error::BadCircHandshakeAuth);
}
let mut inp = Vec::new();
inp.extend(&state.0[..]);
inp.extend(&msg[0..20]);
// There is not necessarily much point here (and below) in using a
// SecretBuf, since the data at issue are already in a cell that
// _wasn't_ marked with Zeroize. Still, for consistency, we use it
// here.
let mut inp = SecretBuf::with_capacity(SECRET_INPUT_LEN);
inp.extend_from_slice(&state.0[..]);
inp.extend_from_slice(&msg[0..20]);
let kh_expect = LegacyKdf::new(0).derive(&inp[..], 20)?;
@ -51,7 +59,7 @@ impl super::ClientHandshake for CreateFastClient {
return Err(Error::BadCircHandshakeAuth);
}
Ok(super::TapKeyGenerator::new(inp.into()))
Ok(super::TapKeyGenerator::new(inp))
}
}
@ -76,15 +84,15 @@ impl super::ServerHandshake for CreateFastServer {
let mut reply = vec![0_u8; FAST_S_HANDSHAKE_LEN];
rng.fill_bytes(&mut reply[0..20]);
let mut inp = Vec::new();
inp.extend(msg);
inp.extend(&reply[0..20]);
let mut inp = SecretBuf::with_capacity(SECRET_INPUT_LEN);
inp.extend_from_slice(msg);
inp.extend_from_slice(&reply[0..20]);
let kh = LegacyKdf::new(0)
.derive(&inp[..], 20)
.map_err(into_internal!("Can't expand key"))?;
reply[20..].copy_from_slice(&kh);
Ok((super::TapKeyGenerator::new(inp.into()), reply))
Ok((super::TapKeyGenerator::new(inp), reply))
}
}

View File

@ -26,8 +26,8 @@
use crate::crypto::handshake::KeyGenerator;
use crate::crypto::ll::kdf::{Kdf, ShakeKdf};
use crate::{Error, Result, SecretBytes};
use tor_bytes::{Reader, Writer};
use crate::{Error, Result};
use tor_bytes::{Reader, SecretBuf, Writer};
use tor_llcrypto::d::Sha3_256;
use tor_llcrypto::pk::{curve25519, ed25519};
use tor_llcrypto::util::rand_compat::RngCompatExt;
@ -42,7 +42,9 @@ use tor_llcrypto::cipher::aes::Aes256Ctr;
use zeroize::Zeroizing;
/// The ENC_KEY from the HS Ntor protocol
type EncKey = [u8; 32];
//
// TODO (nickm): Any move operations applied to this key could subvert the zeroizing.
type EncKey = Zeroizing<[u8; 32]>;
/// The MAC_KEY from the HS Ntor protocol
type MacKey = [u8; 32];
/// A generic 256-bit MAC tag
@ -56,19 +58,19 @@ pub type Subcredential = [u8; 32];
/// expansion protocol specified in section "Key expansion" of rend-spec-v3.txt .
pub struct HsNtorHkdfKeyGenerator {
/// Secret data derived from the handshake, used as input to HKDF
seed: SecretBytes,
seed: SecretBuf,
}
impl HsNtorHkdfKeyGenerator {
/// Create a new key generator to expand a given seed
pub fn new(seed: SecretBytes) -> Self {
pub fn new(seed: SecretBuf) -> Self {
HsNtorHkdfKeyGenerator { seed }
}
}
impl KeyGenerator for HsNtorHkdfKeyGenerator {
/// Expand the seed into a keystream of 'keylen' size
fn expand(self, keylen: usize) -> Result<SecretBytes> {
fn expand(self, keylen: usize) -> Result<SecretBuf> {
ShakeKdf::new().derive(&self.seed[..], keylen)
}
}
@ -136,12 +138,12 @@ pub struct HsNtorClientState {
fn encrypt_and_mac(
mut plaintext: Vec<u8>,
other_data: &[u8],
enc_key: EncKey,
enc_key: &EncKey,
mac_key: MacKey,
) -> Result<(Vec<u8>, MacTag)> {
// Encrypt the introduction data using 'enc_key'
let zero_iv = GenericArray::default();
let mut cipher = Aes256Ctr::new(&enc_key.into(), &zero_iv);
let mut cipher = Aes256Ctr::new(enc_key.as_ref().into(), &zero_iv);
cipher.apply_keystream(&mut plaintext);
let ciphertext = plaintext; // it's now encrypted
@ -198,7 +200,7 @@ where
let (ciphertext, mac_tag) = encrypt_and_mac(
proto_input.plaintext.clone(),
&proto_input.intro_cell_data,
enc_key,
&enc_key,
mac_key,
)?;
@ -346,7 +348,7 @@ where
// Decrypt the ENCRYPTED_DATA from the intro cell
let zero_iv = GenericArray::default();
let mut cipher = Aes256Ctr::new(&enc_key.into(), &zero_iv);
let mut cipher = Aes256Ctr::new(enc_key.as_ref().into(), &zero_iv);
cipher.apply_keystream(ciphertext);
let plaintext = ciphertext; // it's now decrypted
@ -416,7 +418,7 @@ fn get_introduce1_key_material(
// Construct hs_keys = KDF(intro_secret_hs_input | t_hsenc | info, S_KEY_LEN+MAC_LEN)
// Start by getting 'intro_secret_hs_input'
let mut secret_input = Zeroizing::new(Vec::new());
let mut secret_input = SecretBuf::new();
secret_input
.write(bx) // EXP(B,x)
.and_then(|_| secret_input.write(auth_key)) // AUTH_KEY
@ -432,10 +434,12 @@ fn get_introduce1_key_material(
let hs_keys = ShakeKdf::new().derive(&secret_input[..], 32 + 32)?;
// Extract the keys into arrays
let enc_key = hs_keys[0..32]
.try_into()
.map_err(into_internal!("converting enc_key"))
.map_err(Error::from)?;
let enc_key = Zeroizing::new(
hs_keys[0..32]
.try_into()
.map_err(into_internal!("converting enc_key"))
.map_err(Error::from)?,
);
let mac_key = hs_keys[32..64]
.try_into()
.map_err(into_internal!("converting mac_key"))
@ -473,7 +477,7 @@ fn get_rendezvous1_key_material(
let hs_ntor_key_constant = &b"tor-hs-ntor-curve25519-sha3-256-1:hs_key_extract"[..];
// Start with rend_secret_hs_input
let mut secret_input = Zeroizing::new(Vec::new());
let mut secret_input = SecretBuf::new();
secret_input
.write(xy) // EXP(X,y)
.and_then(|_| secret_input.write(xb)) // EXP(X,b)
@ -491,7 +495,7 @@ fn get_rendezvous1_key_material(
let verify = hs_ntor_mac(&secret_input, hs_ntor_verify_constant)?;
// Start building 'auth_input'
let mut auth_input = Zeroizing::new(Vec::new());
let mut auth_input = Vec::new();
auth_input
.write(&verify)
.and_then(|_| auth_input.write(auth_key)) // AUTH_KEY
@ -506,12 +510,12 @@ fn get_rendezvous1_key_material(
let auth_input_mac = hs_ntor_mac(&auth_input, hs_ntor_mac_constant)?;
// Now finish up with the KDF construction
let mut kdf_seed = Zeroizing::new(Vec::new());
let mut kdf_seed = SecretBuf::new();
kdf_seed
.write(&ntor_key_seed)
.and_then(|_| kdf_seed.write(hs_ntor_expand_constant))
.map_err(into_internal!("Can't encode kdf-input for hs-ntor."))?;
let keygen = HsNtorHkdfKeyGenerator::new(Zeroizing::new(kdf_seed.to_vec()));
let keygen = HsNtorHkdfKeyGenerator::new(kdf_seed);
Ok((keygen, auth_input_mac))
}

View File

@ -2,8 +2,8 @@
use super::{KeyGenerator, RelayHandshakeError, RelayHandshakeResult};
use crate::util::ct;
use crate::{Error, Result, SecretBytes};
use tor_bytes::{EncodeResult, Reader, Writer};
use crate::{Error, Result};
use tor_bytes::{EncodeResult, Reader, SecretBuf, Writer};
use tor_error::into_internal;
use tor_llcrypto::d;
use tor_llcrypto::pk::curve25519::*;
@ -11,7 +11,6 @@ use tor_llcrypto::pk::rsa::RsaIdentity;
use digest::Mac;
use rand_core::{CryptoRng, RngCore};
use zeroize::Zeroizing;
/// Client side of the Ntor handshake.
pub(crate) struct NtorClient;
@ -104,18 +103,18 @@ pub(crate) struct NtorHandshakeState {
pub(crate) struct NtorHkdfKeyGenerator {
/// Secret key information derived from the handshake, used as input
/// to HKDF
seed: SecretBytes,
seed: SecretBuf,
}
impl NtorHkdfKeyGenerator {
/// Create a new key generator to expand a given seed
pub(crate) fn new(seed: SecretBytes) -> Self {
pub(crate) fn new(seed: SecretBuf) -> Self {
NtorHkdfKeyGenerator { seed }
}
}
impl KeyGenerator for NtorHkdfKeyGenerator {
fn expand(self, keylen: usize) -> Result<SecretBytes> {
fn expand(self, keylen: usize) -> Result<SecretBuf> {
let ntor1_key = &b"ntor-curve25519-sha256-1:key_extract"[..];
let ntor1_expand = &b"ntor-curve25519-sha256-1:key_expand"[..];
use crate::crypto::ll::kdf::{Kdf, Ntor1Kdf};
@ -211,7 +210,7 @@ fn ntor_derive(
let ntor1_verify = &b"ntor-curve25519-sha256-1:verify"[..];
let server_string = &b"Server"[..];
let mut secret_input = Zeroizing::new(Vec::new());
let mut secret_input = SecretBuf::new();
secret_input.write(xy)?; // EXP(X,y)
secret_input.write(xb)?; // EXP(X,b)
secret_input.write(&server_pk.id)?; // ID
@ -228,7 +227,7 @@ fn ntor_derive(
m.update(&secret_input[..]);
m.finalize()
};
let mut auth_input: SecretBytes = Zeroizing::new(Vec::new());
let mut auth_input = Vec::new();
auth_input.write_and_consume(verify)?; // verify
auth_input.write(&server_pk.id)?; // ID
auth_input.write(&server_pk.pk)?; // B

View File

@ -14,8 +14,8 @@
use super::{RelayHandshakeError, RelayHandshakeResult};
use crate::util::ct;
use crate::{Error, Result, SecretBytes};
use tor_bytes::{EncodeResult, Reader, Writeable, Writer};
use crate::{Error, Result};
use tor_bytes::{EncodeResult, Reader, SecretBuf, Writeable, Writer};
use tor_error::into_internal;
use tor_llcrypto::d::{Sha3_256, Shake256};
use tor_llcrypto::pk::{curve25519, ed25519::Ed25519Identity};
@ -52,7 +52,9 @@ type DigestVal = [u8; DIGEST_LEN];
/// The output of the MAC.
type MacVal = [u8; MAC_LEN];
/// A key for symmetric encryption or decryption.
type EncKey = [u8; ENC_KEY_LEN];
//
// TODO (nickm): Any move operations applied to this key could subvert the zeroizing.
type EncKey = Zeroizing<[u8; ENC_KEY_LEN]>;
/// A key for message authentication codes.
type MacKey = [u8; MAC_KEY_LEN];
@ -137,7 +139,7 @@ fn hash(t: &Encap<'_>, data: &[u8]) -> DigestVal {
fn encrypt(key: &EncKey, m: &[u8]) -> Vec<u8> {
let mut d = m.to_vec();
let zero_iv = GenericArray::default();
let mut cipher = Aes256Ctr::new(key.into(), &zero_iv);
let mut cipher = Aes256Ctr::new(key.as_ref().into(), &zero_iv);
cipher.apply_keystream(&mut d);
d
}
@ -181,7 +183,7 @@ fn kdf_msgkdf(
relay_public: &NtorV3PublicKey,
client_public: &curve25519::PublicKey,
verification: &[u8],
) -> EncodeResult<(Zeroizing<EncKey>, DigestWriter<Sha3_256>)> {
) -> EncodeResult<(EncKey, DigestWriter<Sha3_256>)> {
// secret_input_phase1 = Bx | ID | X | B | PROTOID | ENCAP(VER)
// phase1_keys = KDF_msgkdf(secret_input_phase1)
// (ENC_K1, MAC_K1) = PARTITION(phase1_keys, ENC_KEY_LEN, MAC_KEY_LEN
@ -316,10 +318,10 @@ pub(crate) struct NtorV3KeyGenerator<R> {
}
impl<R: digest::XofReader> KeyGenerator for NtorV3KeyGenerator<R> {
fn expand(mut self, keylen: usize) -> Result<SecretBytes> {
let mut ret = vec![0; keylen];
self.reader.read(&mut ret);
Ok(Zeroizing::new(ret))
fn expand(mut self, keylen: usize) -> Result<SecretBuf> {
let mut ret: SecretBuf = vec![0; keylen].into();
self.reader.read(ret.as_mut());
Ok(ret)
}
}
@ -480,7 +482,7 @@ fn server_handshake_ntor_v3_no_keygen<REPLY: MsgReply>(
// that we're going to reply.
let secret_input = {
let mut si = Zeroizing::new(Vec::new());
let mut si = SecretBuf::new();
si.write(&xy)
.and_then(|_| si.write(&xb))
.and_then(|_| si.write(&keypair.pk.id))
@ -566,7 +568,7 @@ fn client_handshake_ntor_v3_part2(
// would be better to factor it out.
let yx = state.my_sk.diffie_hellman(&y_pk);
let secret_input = {
let mut si = Zeroizing::new(Vec::new());
let mut si = SecretBuf::new();
si.write(&yx)
.and_then(|_| si.write(&state.shared_secret))
.and_then(|_| si.write(&state.relay_public.id))

View File

@ -14,16 +14,16 @@
//! services, and is likely to be used by other places in the future.
//! It is based on SHAKE-256.
use crate::{Error, Result, SecretBytes};
use crate::{Error, Result};
use digest::{ExtendableOutput, Update, XofReader};
use tor_bytes::SecretBuf;
use tor_llcrypto::d::{Sha1, Sha256, Shake256};
use zeroize::Zeroizing;
use zeroize::Zeroize;
/// A trait for a key derivation function.
pub(crate) trait Kdf {
/// Derive `n_bytes` of key data from some secret `seed`.
fn derive(&self, seed: &[u8], n_bytes: usize) -> Result<SecretBytes>;
fn derive(&self, seed: &[u8], n_bytes: usize) -> Result<SecretBuf>;
}
/// A legacy KDF, for use with TAP.
@ -58,22 +58,25 @@ impl LegacyKdf {
}
}
impl Kdf for LegacyKdf {
fn derive(&self, seed: &[u8], n_bytes: usize) -> Result<SecretBytes> {
fn derive(&self, seed: &[u8], n_bytes: usize) -> Result<SecretBuf> {
use digest::Digest;
let mut result = Zeroizing::new(Vec::with_capacity(n_bytes + Sha1::output_size()));
let mut result = SecretBuf::with_capacity(n_bytes + Sha1::output_size());
let mut k = self.idx;
if n_bytes > Sha1::output_size() * (256 - (k as usize)) {
return Err(Error::InvalidKDFOutputLength);
}
let mut digest_output = Default::default();
while result.len() < n_bytes {
let mut d = Sha1::new();
Digest::update(&mut d, seed);
Digest::update(&mut d, &[k]);
result.extend(d.finalize());
d.finalize_into(&mut digest_output);
result.extend_from_slice(&digest_output);
k += 1;
}
digest_output.zeroize();
result.truncate(n_bytes);
Ok(result)
@ -88,11 +91,11 @@ impl<'a, 'b> Ntor1Kdf<'a, 'b> {
}
impl Kdf for Ntor1Kdf<'_, '_> {
fn derive(&self, seed: &[u8], n_bytes: usize) -> Result<SecretBytes> {
fn derive(&self, seed: &[u8], n_bytes: usize) -> Result<SecretBuf> {
let hkdf = hkdf::Hkdf::<Sha256>::new(Some(self.t_key), seed);
let mut result = Zeroizing::new(vec![0; n_bytes]);
hkdf.expand(self.m_expand, &mut result[..])
let mut result: SecretBuf = vec![0; n_bytes].into();
hkdf.expand(self.m_expand, result.as_mut())
.map_err(|_| Error::InvalidKDFOutputLength)?;
Ok(result)
}
@ -105,11 +108,11 @@ impl ShakeKdf {
}
}
impl Kdf for ShakeKdf {
fn derive(&self, seed: &[u8], n_bytes: usize) -> Result<SecretBytes> {
fn derive(&self, seed: &[u8], n_bytes: usize) -> Result<SecretBuf> {
let mut xof = Shake256::default();
xof.update(seed);
let mut result = Zeroizing::new(vec![0; n_bytes]);
xof.finalize_xof().read(&mut result);
let mut result: SecretBuf = vec![0; n_bytes].into();
xof.finalize_xof().read(result.as_mut());
Ok(result)
}
}
@ -144,7 +147,7 @@ mod test {
#[test]
fn testvec_tap_kdf() {
// Taken from test_crypto.c in Tor, generated by a python script.
fn expand(b: &[u8]) -> SecretBytes {
fn expand(b: &[u8]) -> SecretBuf {
LegacyKdf::new(0).derive(b, 100).unwrap()
}
@ -199,7 +202,7 @@ mod test {
#[test]
fn testvec_ntor1_kdf() {
// From Tor's test_crypto.c; generated with ntor_ref.py
fn expand(b: &[u8]) -> SecretBytes {
fn expand(b: &[u8]) -> SecretBuf {
let t_key = b"ntor-curve25519-sha256-1:key_extract";
let m_expand = b"ntor-curve25519-sha256-1:key_expand";
Ntor1Kdf::new(&t_key[..], &m_expand[..])

View File

@ -129,9 +129,6 @@ pub use util::skew::ClockSkew;
pub use channel::params::ChannelsParams;
/// A vector of bytes that gets cleared when it's dropped.
type SecretBytes = zeroize::Zeroizing<Vec<u8>>;
/// A Result type for this crate.
pub type Result<T> = std::result::Result<T, Error>;

View File

@ -378,7 +378,7 @@ mod test {
fn suspend_and_resume_with_sleep() {
test_with_all_runtimes!(|rt| async move {
let (mut sch, hdl) = TaskSchedule::new(rt.clone());
sch.fire_in(Duration::from_micros(100));
sch.fire_in(Duration::from_millis(100));
hdl.suspend();
assert!(sch.next().now_or_never().is_none());

76
doc/ZeroizeStrategy.md Normal file
View File

@ -0,0 +1,76 @@
# Using Zeroize in Arti.
The [`zeroize`] crate provides a best-effort mechanism to ensure that memory is
reset to zero before it is dropped. Here we describe what we use it for, and
why.
This document will not explain the limitations of the `zeroize` crate: for those,
see the crate's documentation.
## What can Zeroize defend us against?
There are several ways that memory can get revealed to an attacker:
1. A programming bug might reveal uninitialized freed memory from the heap or
stack. This is less of a concern in safe Rust, but we still do link against
some code written in unsafe languages.
2. A programming bug might reveal in-use memory from a different allocation on
the heap or stack. This is also less of a concern in safe Rust. (Zeroize
cannot defend against this, since it only clears objects when they become
unused.)
3. The memory might be written to a swap file. (Zeroize cannot defend against
this, but it can limit the window of time during which the secrets are in RAM
to be swapped out.)
4. The memory might be revealed via a local attack by an attacker with physical
or administrative access. (Zeroize can't prevent this either, but it can
limit the window of time during which the secret are in RAM.)
So we see that zeroizing memory is not a categorical way to prevent
memory-exposure attacks. Instead, it is a defense-in-depth mechanism to limit
the impacts of these attacks if and when they occur.
There are several possible impacts of a memory exposure. The most important
ones seem to be, in decreasing order of severity.
1. The attacker might learn a private key, and thereby be able to impersonate a
relay or onion service.
2. The attacker might learn an ephemeral secret key, and thereby be able to
decrypt traffic that had been sent over the network. This would threaten
forward-secrecy.
3. The attacker might learn information that would help them perform traffic
analysis, like which guards a user was configured to use at a given time, or
information about a path through the network, or an IP address that the user
had been trying to connect to.
## Analysis and policies
During an exposure of type 2, 3, or 4 above, `zeroize` will never render the
attack completely harmless: it will only limit its impact. Therefore, it
makes sense to use it as part of a defense-in-depth strategy to try to lower
the impact of these attacks if they occur.
Information of types 1 and 2 is fairly well contained in individual places in
the code. Information of type 3, however, is spread all over the place: it's
hard to categorically reason that any particular piece of data _wouldn't_ help
the attacker do traffic analysis.
Therefore, we are going to try to use `zeroize` to protect secret encryption
keys and private keys only.
Furthermore, though we will consider failure to use `zeroize` in these cases as
a bug, we will not treat it as a critical security bug: instead, we'll treat it
only as a missing defense-in-depth to be addressed under our usual update
schedule.
## Other defenses
There are orthogonal defenses against these attacks that we don't consider here.
They include:
1. Encrypted swap
2. Reducing the amount of unsafe code, and sandboxing that code in a separate process.
3. Keeping secrets in memory pages marked as unswappable.
4. Disabling swap entirely.
5. OS-level mechanisms to make it harder for other processes to read the given
process's memory.
6. Replacement allocator implementations that just zeroize everything.

View File

@ -12,7 +12,6 @@ Example:
"""
import re, subprocess
from enum import Enum
LINK_PATTERN = re.compile(r'''
\[([^\]]*)\] # Match [, then some non-] characters, then ]...
@ -125,7 +124,7 @@ def process(s):
return "".join(lnk.text() for lnk in items)
if __name__ == '__main__':
import fileinput, sys
import sys
if len(sys.argv) == 1 or sys.argv[1] == "-":
in_file = sys.stdin
else: