Require (and write) docs in tor-dirclient.
This commit is contained in:
parent
3d4f5f261e
commit
5d32d69011
|
@ -1,27 +1,53 @@
|
|||
//! Decompression support for Tor directory connections.
|
||||
//!
|
||||
//! There are different compression algorithms that can be used on the
|
||||
//! Tor network; right now only zlib and identity decompression are
|
||||
//! supported here.
|
||||
//!
|
||||
//! This provides a single streaming API for decompression; we may
|
||||
//! want others in the future.
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
/// Possible return conditions from a decompression operation.
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) enum StatusKind {
|
||||
/// Some data was written.
|
||||
Written,
|
||||
/// We're out of space in the output buffer.
|
||||
OutOfSpace,
|
||||
/// We finished writing.
|
||||
Done,
|
||||
}
|
||||
|
||||
/// Return value from [`Decompressor::process`]. It describes how much data
|
||||
/// was transferred, and what the caller needs to do next.
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct Status {
|
||||
/// The (successful) result of the decompression
|
||||
pub status: StatusKind,
|
||||
/// How many bytes were consumed from `inp`.
|
||||
pub consumed: usize,
|
||||
/// How many bytes were written into `out`.
|
||||
pub written: usize,
|
||||
}
|
||||
|
||||
/// An implementation of a compression algorithm, including its state.
|
||||
pub(crate) trait Decompressor {
|
||||
/// Decompress data from 'inp' into 'out'. If 'finished' is true, no
|
||||
/// more data will be provided after the current contents of inputs.
|
||||
fn process(&mut self, inp: &[u8], out: &mut [u8], finished: bool) -> Result<Status>;
|
||||
}
|
||||
|
||||
/// Implementation for the identity decompressor.
|
||||
///
|
||||
/// This does more copying than Rust best practices would prefer, but
|
||||
/// we should never actually use it in practice.
|
||||
pub(crate) mod identity {
|
||||
use super::{Decompressor, Status, StatusKind};
|
||||
use anyhow::Result;
|
||||
|
||||
/// An identity decompressor
|
||||
pub struct Identity;
|
||||
|
||||
impl Decompressor for Identity {
|
||||
|
@ -50,6 +76,9 @@ pub(crate) mod identity {
|
|||
}
|
||||
}
|
||||
|
||||
/// Implementation for the [`Decompressor`] trait on [`miniz_oxide::InflateState`].
|
||||
///
|
||||
/// This implements zlib compression as used in Tor.
|
||||
mod miniz_oxide {
|
||||
use super::{Decompressor, Status, StatusKind};
|
||||
|
||||
|
|
|
@ -1,5 +1,19 @@
|
|||
//! Implements a directory client for Tor.
|
||||
//!
|
||||
//! Tor makes directory requests as HTTP/1.0 requests tunneled over Tor circuits.
|
||||
//! For most objects, Tor uses a one-hop tunnel.
|
||||
//!
|
||||
//! # Limitations
|
||||
//!
|
||||
//! Multi-hop tunnels are not supported.
|
||||
//!
|
||||
//! Only zlib compression is supported.
|
||||
|
||||
// XXXX THIS CODE IS HORRIBLE AND NEEDS REFACTORING.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(clippy::missing_docs_in_private_items)]
|
||||
|
||||
mod decompress;
|
||||
pub mod request;
|
||||
mod util;
|
||||
|
@ -11,12 +25,13 @@ use tor_circmgr::{CircMgr, DirInfo};
|
|||
use anyhow::{anyhow, Result};
|
||||
use std::sync::Arc;
|
||||
|
||||
// XXXX Remove this.
|
||||
#[allow(unused_assignments)]
|
||||
|
||||
/// Fetch the resource described by `req` over the Tor network.
|
||||
///
|
||||
/// Circuits are built or found using `circ_mgr`, using paths
|
||||
/// constructed using `dirinfo`.
|
||||
pub async fn get_resource<CR, TR>(
|
||||
req: CR,
|
||||
netdir: DirInfo<'_>,
|
||||
dirinfo: DirInfo<'_>,
|
||||
circ_mgr: Arc<CircMgr<TR>>,
|
||||
) -> Result<String>
|
||||
where
|
||||
|
@ -26,7 +41,7 @@ where
|
|||
let req = req.into_request()?;
|
||||
let encoded = util::encode_request(req);
|
||||
|
||||
let circuit = circ_mgr.get_or_launch_dir(netdir).await?;
|
||||
let circuit = circ_mgr.get_or_launch_dir(dirinfo).await?;
|
||||
let mut stream = circuit.begin_dir_stream().await?;
|
||||
|
||||
stream.write_bytes(encoded.as_bytes()).await?;
|
||||
|
@ -36,8 +51,6 @@ where
|
|||
let mut buf = vec![0; 1024];
|
||||
let mut n_in_buf = 0;
|
||||
let mut encoding: Option<String> = None;
|
||||
#[allow(unused_variables)]
|
||||
let mut length: Option<usize> = None;
|
||||
|
||||
loop {
|
||||
let n = stream.read_bytes(&mut buf[n_in_buf..]).await?;
|
||||
|
@ -72,10 +85,12 @@ where
|
|||
{
|
||||
encoding = Some(String::from_utf8(enc.value.to_vec())?);
|
||||
}
|
||||
/*
|
||||
if let Some(clen) = response.headers.iter().find(|h| h.name == "Content-Length") {
|
||||
let clen = std::str::from_utf8(clen.value)?;
|
||||
length = Some(clen.parse()?);
|
||||
}
|
||||
*/
|
||||
let n_parsed = res.unwrap();
|
||||
n_in_buf -= n_parsed;
|
||||
buf.copy_within(n_parsed.., 0);
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
//! Descriptions objects for different kinds of directory requests
|
||||
//! that we can make.
|
||||
|
||||
use tor_llcrypto::pk::rsa::RSAIdentity;
|
||||
use tor_netdoc::doc::authcert::AuthCertKeyIds;
|
||||
use tor_netdoc::doc::microdesc::MDDigest;
|
||||
|
@ -5,19 +8,39 @@ use tor_netdoc::doc::microdesc::MDDigest;
|
|||
use anyhow::Result;
|
||||
use std::time::SystemTime;
|
||||
|
||||
/// A request for an object that can be served over the Tor directory system.
|
||||
pub trait ClientRequest {
|
||||
/// Consume this ClientRequest and retunn an [`http::Request`] if
|
||||
/// it is well-formed.
|
||||
fn into_request(self) -> Result<http::Request<()>>;
|
||||
|
||||
// TODO: add a flag to say whether partial documents would be useful.
|
||||
}
|
||||
|
||||
/// A Request for a consensus directory.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ConsensusRequest {
|
||||
/// What flavor of consensus are we asking for? Right now, only "microdesc"
|
||||
/// is supported.
|
||||
flavor: String,
|
||||
/// A list of the authority identities that we believe in. We tell the
|
||||
/// directory cache only to give us a consensus if it is signed by enough
|
||||
/// of these authorities.
|
||||
authority_ids: Vec<RSAIdentity>,
|
||||
/// The publication time of the most recent consensus we have. Used to
|
||||
/// generate an If-Modified-Since header so that we don't get a document
|
||||
/// we already have.
|
||||
// TODO Actually use this!
|
||||
last_consensus_published: Option<SystemTime>,
|
||||
/// A set of SHA3-256 digests of the _signed portion_ of consensuses we have.
|
||||
/// Used to declare what diffs we would accept.
|
||||
///
|
||||
/// (Currently we don't send this, since we can't handle diffs.)
|
||||
last_consensus_sha3_256: Vec<[u8; 32]>,
|
||||
}
|
||||
|
||||
impl ConsensusRequest {
|
||||
/// Create a new request for a consensus directory document.
|
||||
pub fn new() -> Self {
|
||||
ConsensusRequest {
|
||||
flavor: "microdesc".to_string(),
|
||||
|
@ -27,14 +50,20 @@ impl ConsensusRequest {
|
|||
}
|
||||
}
|
||||
|
||||
/// Add `id` to the list of authorities that this request should
|
||||
/// say we believe in.
|
||||
pub fn push_authority_id(&mut self, id: RSAIdentity) {
|
||||
self.authority_ids.push(id);
|
||||
}
|
||||
|
||||
/// Add `d` to the list of consensus digests this request should
|
||||
/// say we already haev.
|
||||
pub fn push_old_consensus_digest(&mut self, d: [u8; 32]) {
|
||||
self.last_consensus_sha3_256.push(d);
|
||||
}
|
||||
|
||||
/// Set the publication time we should say we have for our last
|
||||
/// consensus to `when`.
|
||||
pub fn set_last_consensus_date(&mut self, when: SystemTime) {
|
||||
self.last_consensus_published = Some(when);
|
||||
}
|
||||
|
@ -48,6 +77,7 @@ impl Default for ConsensusRequest {
|
|||
|
||||
impl ClientRequest for ConsensusRequest {
|
||||
fn into_request(mut self) -> Result<http::Request<()>> {
|
||||
// Build the URL.
|
||||
let mut uri = "/tor/status-vote/current/consensus".to_string();
|
||||
if self.flavor != "ns" {
|
||||
uri.push('-');
|
||||
|
@ -67,12 +97,16 @@ impl ClientRequest for ConsensusRequest {
|
|||
|
||||
let mut req = http::Request::builder().method("GET").uri(uri);
|
||||
req = add_common_headers(req);
|
||||
|
||||
// Possibly, add an if-modified-since header.
|
||||
if let Some(when) = self.last_consensus_published {
|
||||
req = req.header(
|
||||
http::header::IF_MODIFIED_SINCE,
|
||||
httpdate::fmt_http_date(when),
|
||||
);
|
||||
}
|
||||
|
||||
// Possibly, add an X-Or-Diff-From-Consensus header.
|
||||
if !self.last_consensus_sha3_256.is_empty() {
|
||||
self.last_consensus_sha3_256.sort_unstable();
|
||||
let digests: Vec<String> = self
|
||||
|
@ -87,15 +121,20 @@ impl ClientRequest for ConsensusRequest {
|
|||
}
|
||||
}
|
||||
|
||||
/// A request for one or more authority certificates.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AuthCertRequest {
|
||||
/// The identity/signing keys of the certificates we want.
|
||||
ids: Vec<AuthCertKeyIds>,
|
||||
}
|
||||
|
||||
impl AuthCertRequest {
|
||||
/// Create a new requst, asking for no authority certificates.
|
||||
pub fn new() -> Self {
|
||||
AuthCertRequest { ids: Vec::new() }
|
||||
}
|
||||
|
||||
/// Add `ids` to the list of certificates we're asking for.
|
||||
pub fn push(&mut self, ids: AuthCertKeyIds) {
|
||||
self.ids.push(ids);
|
||||
}
|
||||
|
@ -132,17 +171,21 @@ impl ClientRequest for AuthCertRequest {
|
|||
}
|
||||
}
|
||||
|
||||
/// A request for one or more microdescriptors
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MicrodescRequest {
|
||||
/// The SHA256 digests of the microdescriptors we want.
|
||||
digests: Vec<MDDigest>,
|
||||
}
|
||||
|
||||
impl MicrodescRequest {
|
||||
/// Construct a request for no microdescriptors.
|
||||
pub fn new() -> Self {
|
||||
MicrodescRequest {
|
||||
digests: Vec::new(),
|
||||
}
|
||||
}
|
||||
/// Add `d` to the list of microdescriptors we want to request.
|
||||
pub fn push(&mut self, d: MDDigest) {
|
||||
self.digests.push(d)
|
||||
}
|
||||
|
@ -172,6 +215,9 @@ impl ClientRequest for MicrodescRequest {
|
|||
}
|
||||
}
|
||||
|
||||
/// Add commonly used headers to the HTTP request.
|
||||
///
|
||||
/// (Right now, this is only Accept-Encoding.)
|
||||
fn add_common_headers(req: http::request::Builder) -> http::request::Builder {
|
||||
// TODO: gzip, zstd, brotli, xz2
|
||||
req.header(http::header::ACCEPT_ENCODING, "deflate, identity")
|
||||
|
|
Loading…
Reference in New Issue