Require (and write) docs in tor-dirclient.

This commit is contained in:
Nick Mathewson 2020-11-20 19:11:28 -05:00
parent 3d4f5f261e
commit 5d32d69011
3 changed files with 97 additions and 7 deletions

View File

@ -1,27 +1,53 @@
//! Decompression support for Tor directory connections.
//!
//! There are different compression algorithms that can be used on the
//! Tor network; right now only zlib and identity decompression are
//! supported here.
//!
//! This provides a single streaming API for decompression; we may
//! want others in the future.
use anyhow::Result; use anyhow::Result;
/// Possible return conditions from a decompression operation.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub(crate) enum StatusKind { pub(crate) enum StatusKind {
/// Some data was written.
Written, Written,
/// We're out of space in the output buffer.
OutOfSpace, OutOfSpace,
/// We finished writing.
Done, Done,
} }
/// Return value from [`Decompressor::process`]. It describes how much data
/// was transferred, and what the caller needs to do next.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub(crate) struct Status { pub(crate) struct Status {
/// The (successful) result of the decompression
pub status: StatusKind, pub status: StatusKind,
/// How many bytes were consumed from `inp`.
pub consumed: usize, pub consumed: usize,
/// How many bytes were written into `out`.
pub written: usize, pub written: usize,
} }
/// An implementation of a compression algorithm, including its state.
pub(crate) trait Decompressor { pub(crate) trait Decompressor {
/// Decompress data from 'inp' into 'out'. If 'finished' is true, no
/// more data will be provided after the current contents of inputs.
fn process(&mut self, inp: &[u8], out: &mut [u8], finished: bool) -> Result<Status>; fn process(&mut self, inp: &[u8], out: &mut [u8], finished: bool) -> Result<Status>;
} }
/// Implementation for the identity decompressor.
///
/// This does more copying than Rust best practices would prefer, but
/// we should never actually use it in practice.
pub(crate) mod identity { pub(crate) mod identity {
use super::{Decompressor, Status, StatusKind}; use super::{Decompressor, Status, StatusKind};
use anyhow::Result; use anyhow::Result;
/// An identity decompressor
pub struct Identity; pub struct Identity;
impl Decompressor for Identity { impl Decompressor for Identity {
@ -50,6 +76,9 @@ pub(crate) mod identity {
} }
} }
/// Implementation for the [`Decompressor`] trait on [`miniz_oxide::InflateState`].
///
/// This implements zlib compression as used in Tor.
mod miniz_oxide { mod miniz_oxide {
use super::{Decompressor, Status, StatusKind}; use super::{Decompressor, Status, StatusKind};

View File

@ -1,5 +1,19 @@
//! Implements a directory client for Tor.
//!
//! Tor makes directory requests as HTTP/1.0 requests tunneled over Tor circuits.
//! For most objects, Tor uses a one-hop tunnel.
//!
//! # Limitations
//!
//! Multi-hop tunnels are not supported.
//!
//! Only zlib compression is supported.
// XXXX THIS CODE IS HORRIBLE AND NEEDS REFACTORING. // XXXX THIS CODE IS HORRIBLE AND NEEDS REFACTORING.
#![deny(missing_docs)]
#![deny(clippy::missing_docs_in_private_items)]
mod decompress; mod decompress;
pub mod request; pub mod request;
mod util; mod util;
@ -11,12 +25,13 @@ use tor_circmgr::{CircMgr, DirInfo};
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Result};
use std::sync::Arc; use std::sync::Arc;
// XXXX Remove this. /// Fetch the resource described by `req` over the Tor network.
#[allow(unused_assignments)] ///
/// Circuits are built or found using `circ_mgr`, using paths
/// constructed using `dirinfo`.
pub async fn get_resource<CR, TR>( pub async fn get_resource<CR, TR>(
req: CR, req: CR,
netdir: DirInfo<'_>, dirinfo: DirInfo<'_>,
circ_mgr: Arc<CircMgr<TR>>, circ_mgr: Arc<CircMgr<TR>>,
) -> Result<String> ) -> Result<String>
where where
@ -26,7 +41,7 @@ where
let req = req.into_request()?; let req = req.into_request()?;
let encoded = util::encode_request(req); let encoded = util::encode_request(req);
let circuit = circ_mgr.get_or_launch_dir(netdir).await?; let circuit = circ_mgr.get_or_launch_dir(dirinfo).await?;
let mut stream = circuit.begin_dir_stream().await?; let mut stream = circuit.begin_dir_stream().await?;
stream.write_bytes(encoded.as_bytes()).await?; stream.write_bytes(encoded.as_bytes()).await?;
@ -36,8 +51,6 @@ where
let mut buf = vec![0; 1024]; let mut buf = vec![0; 1024];
let mut n_in_buf = 0; let mut n_in_buf = 0;
let mut encoding: Option<String> = None; let mut encoding: Option<String> = None;
#[allow(unused_variables)]
let mut length: Option<usize> = None;
loop { loop {
let n = stream.read_bytes(&mut buf[n_in_buf..]).await?; let n = stream.read_bytes(&mut buf[n_in_buf..]).await?;
@ -72,10 +85,12 @@ where
{ {
encoding = Some(String::from_utf8(enc.value.to_vec())?); encoding = Some(String::from_utf8(enc.value.to_vec())?);
} }
/*
if let Some(clen) = response.headers.iter().find(|h| h.name == "Content-Length") { if let Some(clen) = response.headers.iter().find(|h| h.name == "Content-Length") {
let clen = std::str::from_utf8(clen.value)?; let clen = std::str::from_utf8(clen.value)?;
length = Some(clen.parse()?); length = Some(clen.parse()?);
} }
*/
let n_parsed = res.unwrap(); let n_parsed = res.unwrap();
n_in_buf -= n_parsed; n_in_buf -= n_parsed;
buf.copy_within(n_parsed.., 0); buf.copy_within(n_parsed.., 0);

View File

@ -1,3 +1,6 @@
//! Descriptions objects for different kinds of directory requests
//! that we can make.
use tor_llcrypto::pk::rsa::RSAIdentity; use tor_llcrypto::pk::rsa::RSAIdentity;
use tor_netdoc::doc::authcert::AuthCertKeyIds; use tor_netdoc::doc::authcert::AuthCertKeyIds;
use tor_netdoc::doc::microdesc::MDDigest; use tor_netdoc::doc::microdesc::MDDigest;
@ -5,19 +8,39 @@ use tor_netdoc::doc::microdesc::MDDigest;
use anyhow::Result; use anyhow::Result;
use std::time::SystemTime; use std::time::SystemTime;
/// A request for an object that can be served over the Tor directory system.
pub trait ClientRequest { pub trait ClientRequest {
/// Consume this ClientRequest and retunn an [`http::Request`] if
/// it is well-formed.
fn into_request(self) -> Result<http::Request<()>>; fn into_request(self) -> Result<http::Request<()>>;
// TODO: add a flag to say whether partial documents would be useful.
} }
/// A Request for a consensus directory.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct ConsensusRequest { pub struct ConsensusRequest {
/// What flavor of consensus are we asking for? Right now, only "microdesc"
/// is supported.
flavor: String, flavor: String,
/// A list of the authority identities that we believe in. We tell the
/// directory cache only to give us a consensus if it is signed by enough
/// of these authorities.
authority_ids: Vec<RSAIdentity>, authority_ids: Vec<RSAIdentity>,
/// The publication time of the most recent consensus we have. Used to
/// generate an If-Modified-Since header so that we don't get a document
/// we already have.
// TODO Actually use this!
last_consensus_published: Option<SystemTime>, last_consensus_published: Option<SystemTime>,
/// A set of SHA3-256 digests of the _signed portion_ of consensuses we have.
/// Used to declare what diffs we would accept.
///
/// (Currently we don't send this, since we can't handle diffs.)
last_consensus_sha3_256: Vec<[u8; 32]>, last_consensus_sha3_256: Vec<[u8; 32]>,
} }
impl ConsensusRequest { impl ConsensusRequest {
/// Create a new request for a consensus directory document.
pub fn new() -> Self { pub fn new() -> Self {
ConsensusRequest { ConsensusRequest {
flavor: "microdesc".to_string(), flavor: "microdesc".to_string(),
@ -27,14 +50,20 @@ impl ConsensusRequest {
} }
} }
/// Add `id` to the list of authorities that this request should
/// say we believe in.
pub fn push_authority_id(&mut self, id: RSAIdentity) { pub fn push_authority_id(&mut self, id: RSAIdentity) {
self.authority_ids.push(id); self.authority_ids.push(id);
} }
/// Add `d` to the list of consensus digests this request should
/// say we already haev.
pub fn push_old_consensus_digest(&mut self, d: [u8; 32]) { pub fn push_old_consensus_digest(&mut self, d: [u8; 32]) {
self.last_consensus_sha3_256.push(d); self.last_consensus_sha3_256.push(d);
} }
/// Set the publication time we should say we have for our last
/// consensus to `when`.
pub fn set_last_consensus_date(&mut self, when: SystemTime) { pub fn set_last_consensus_date(&mut self, when: SystemTime) {
self.last_consensus_published = Some(when); self.last_consensus_published = Some(when);
} }
@ -48,6 +77,7 @@ impl Default for ConsensusRequest {
impl ClientRequest for ConsensusRequest { impl ClientRequest for ConsensusRequest {
fn into_request(mut self) -> Result<http::Request<()>> { fn into_request(mut self) -> Result<http::Request<()>> {
// Build the URL.
let mut uri = "/tor/status-vote/current/consensus".to_string(); let mut uri = "/tor/status-vote/current/consensus".to_string();
if self.flavor != "ns" { if self.flavor != "ns" {
uri.push('-'); uri.push('-');
@ -67,12 +97,16 @@ impl ClientRequest for ConsensusRequest {
let mut req = http::Request::builder().method("GET").uri(uri); let mut req = http::Request::builder().method("GET").uri(uri);
req = add_common_headers(req); req = add_common_headers(req);
// Possibly, add an if-modified-since header.
if let Some(when) = self.last_consensus_published { if let Some(when) = self.last_consensus_published {
req = req.header( req = req.header(
http::header::IF_MODIFIED_SINCE, http::header::IF_MODIFIED_SINCE,
httpdate::fmt_http_date(when), httpdate::fmt_http_date(when),
); );
} }
// Possibly, add an X-Or-Diff-From-Consensus header.
if !self.last_consensus_sha3_256.is_empty() { if !self.last_consensus_sha3_256.is_empty() {
self.last_consensus_sha3_256.sort_unstable(); self.last_consensus_sha3_256.sort_unstable();
let digests: Vec<String> = self let digests: Vec<String> = self
@ -87,15 +121,20 @@ impl ClientRequest for ConsensusRequest {
} }
} }
/// A request for one or more authority certificates.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct AuthCertRequest { pub struct AuthCertRequest {
/// The identity/signing keys of the certificates we want.
ids: Vec<AuthCertKeyIds>, ids: Vec<AuthCertKeyIds>,
} }
impl AuthCertRequest { impl AuthCertRequest {
/// Create a new requst, asking for no authority certificates.
pub fn new() -> Self { pub fn new() -> Self {
AuthCertRequest { ids: Vec::new() } AuthCertRequest { ids: Vec::new() }
} }
/// Add `ids` to the list of certificates we're asking for.
pub fn push(&mut self, ids: AuthCertKeyIds) { pub fn push(&mut self, ids: AuthCertKeyIds) {
self.ids.push(ids); self.ids.push(ids);
} }
@ -132,17 +171,21 @@ impl ClientRequest for AuthCertRequest {
} }
} }
/// A request for one or more microdescriptors
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct MicrodescRequest { pub struct MicrodescRequest {
/// The SHA256 digests of the microdescriptors we want.
digests: Vec<MDDigest>, digests: Vec<MDDigest>,
} }
impl MicrodescRequest { impl MicrodescRequest {
/// Construct a request for no microdescriptors.
pub fn new() -> Self { pub fn new() -> Self {
MicrodescRequest { MicrodescRequest {
digests: Vec::new(), digests: Vec::new(),
} }
} }
/// Add `d` to the list of microdescriptors we want to request.
pub fn push(&mut self, d: MDDigest) { pub fn push(&mut self, d: MDDigest) {
self.digests.push(d) self.digests.push(d)
} }
@ -172,6 +215,9 @@ impl ClientRequest for MicrodescRequest {
} }
} }
/// Add commonly used headers to the HTTP request.
///
/// (Right now, this is only Accept-Encoding.)
fn add_common_headers(req: http::request::Builder) -> http::request::Builder { fn add_common_headers(req: http::request::Builder) -> http::request::Builder {
// TODO: gzip, zstd, brotli, xz2 // TODO: gzip, zstd, brotli, xz2
req.header(http::header::ACCEPT_ENCODING, "deflate, identity") req.header(http::header::ACCEPT_ENCODING, "deflate, identity")