From d7f2718a8bb6d4b6b11b628825fdf9be9ac38821 Mon Sep 17 00:00:00 2001 From: eta Date: Wed, 20 Apr 2022 17:09:16 +0100 Subject: [PATCH] tor-dirmgr/lib.rs: move DirMgr::load_documents_into to DocQuery Move the function out of DirMgr, giving it a new &Mutex argument instead. --- crates/tor-dirmgr/src/bootstrap.rs | 2 +- crates/tor-dirmgr/src/docid.rs | 55 ++++++++++++++++++++++++++++ crates/tor-dirmgr/src/lib.rs | 58 ++---------------------------- 3 files changed, 59 insertions(+), 56 deletions(-) diff --git a/crates/tor-dirmgr/src/bootstrap.rs b/crates/tor-dirmgr/src/bootstrap.rs index 246a611f5..683c8f32b 100644 --- a/crates/tor-dirmgr/src/bootstrap.rs +++ b/crates/tor-dirmgr/src/bootstrap.rs @@ -33,7 +33,7 @@ fn load_all( ) -> Result> { let mut loaded = HashMap::new(); for query in docid::partition_by_type(missing.into_iter()).values() { - dirmgr.load_documents_into(query, &mut loaded)?; + query.load_documents_into(&mut loaded, &dirmgr.store)?; } Ok(loaded) } diff --git a/crates/tor-dirmgr/src/docid.rs b/crates/tor-dirmgr/src/docid.rs index c2fb4e172..cf14ee8b6 100644 --- a/crates/tor-dirmgr/src/docid.rs +++ b/crates/tor-dirmgr/src/docid.rs @@ -1,8 +1,11 @@ //! Declare a general purpose "document ID type" for tracking which //! documents we want and which we have. +use std::sync::Mutex; use std::{borrow::Borrow, collections::HashMap}; +use tracing::trace; +use crate::{DocumentText, DynStore}; use tor_dirclient::request; #[cfg(feature = "routerdesc")] use tor_netdoc::doc::routerdesc::RdDigest; @@ -195,6 +198,58 @@ impl DocQuery { } } } + + /// Load all the documents for a single DocumentQuery from the store. + pub(crate) fn load_documents_into( + &self, + result: &mut HashMap, + store: &Mutex, + ) -> crate::Result<()> { + use DocQuery::*; + let store = store.lock().expect("Directory storage lock poisoned"); + match self { + LatestConsensus { + flavor, + cache_usage, + } => { + if *cache_usage == CacheUsage::MustDownload { + // Do nothing: we don't want a cached consensus. + trace!("MustDownload is set; not checking for cached consensus."); + } else if let Some(c) = + store.latest_consensus(*flavor, cache_usage.pending_requirement())? + { + trace!("Found a reasonable consensus in the cache"); + let id = DocId::LatestConsensus { + flavor: *flavor, + cache_usage: *cache_usage, + }; + result.insert(id, c.into()); + } + } + AuthCert(ids) => result.extend( + store + .authcerts(ids)? + .into_iter() + .map(|(id, c)| (DocId::AuthCert(id), DocumentText::from_string(c))), + ), + Microdesc(digests) => { + result.extend( + store + .microdescs(digests)? + .into_iter() + .map(|(id, md)| (DocId::Microdesc(id), DocumentText::from_string(md))), + ); + } + #[cfg(feature = "routerdesc")] + RouterDesc(digests) => result.extend( + store + .routerdescs(digests)? + .into_iter() + .map(|(id, rd)| (DocId::RouterDesc(id), DocumentText::from_string(rd))), + ), + } + Ok(()) + } } impl From for DocQuery { diff --git a/crates/tor-dirmgr/src/lib.rs b/crates/tor-dirmgr/src/lib.rs index 78e8e9432..d9c2b04b3 100644 --- a/crates/tor-dirmgr/src/lib.rs +++ b/crates/tor-dirmgr/src/lib.rs @@ -762,8 +762,8 @@ impl DirMgr { pub fn text(&self, doc: &DocId) -> Result> { use itertools::Itertools; let mut result = HashMap::new(); - let query = (*doc).into(); - self.load_documents_into(&query, &mut result)?; + let query: DocQuery = (*doc).into(); + query.load_documents_into(&mut result, &self.store)?; let item = result.into_iter().at_most_one().map_err(|_| { Error::CacheCorruption("Found more than one entry in storage for given docid") })?; @@ -790,63 +790,11 @@ impl DirMgr { let partitioned = docid::partition_by_type(docs); let mut result = HashMap::new(); for (_, query) in partitioned.into_iter() { - self.load_documents_into(&query, &mut result)?; + query.load_documents_into(&mut result, &self.store)?; } Ok(result) } - /// Load all the documents for a single DocumentQuery from the store. - fn load_documents_into( - &self, - query: &DocQuery, - result: &mut HashMap, - ) -> Result<()> { - use DocQuery::*; - let store = self.store.lock().expect("Directory storage lock poisoned"); - match query { - LatestConsensus { - flavor, - cache_usage, - } => { - if *cache_usage == CacheUsage::MustDownload { - // Do nothing: we don't want a cached consensus. - trace!("MustDownload is set; not checking for cached consensus."); - } else if let Some(c) = - store.latest_consensus(*flavor, cache_usage.pending_requirement())? - { - trace!("Found a reasonable consensus in the cache"); - let id = DocId::LatestConsensus { - flavor: *flavor, - cache_usage: *cache_usage, - }; - result.insert(id, c.into()); - } - } - AuthCert(ids) => result.extend( - store - .authcerts(ids)? - .into_iter() - .map(|(id, c)| (DocId::AuthCert(id), DocumentText::from_string(c))), - ), - Microdesc(digests) => { - result.extend( - store - .microdescs(digests)? - .into_iter() - .map(|(id, md)| (DocId::Microdesc(id), DocumentText::from_string(md))), - ); - } - #[cfg(feature = "routerdesc")] - RouterDesc(digests) => result.extend( - store - .routerdescs(digests)? - .into_iter() - .map(|(id, rd)| (DocId::RouterDesc(id), DocumentText::from_string(rd))), - ), - } - Ok(()) - } - /// Convert a DocQuery into a set of ClientRequests, suitable for sending /// to a directory cache. ///