tor-dirmgr/lib.rs: move DirMgr::load_documents_into to DocQuery

Move the function out of DirMgr, giving it a new &Mutex<DynStore>
argument instead.
This commit is contained in:
eta 2022-04-20 17:09:16 +01:00
parent b4f4b18a90
commit d7f2718a8b
3 changed files with 59 additions and 56 deletions

View File

@ -33,7 +33,7 @@ fn load_all<R: Runtime>(
) -> Result<HashMap<DocId, DocumentText>> {
let mut loaded = HashMap::new();
for query in docid::partition_by_type(missing.into_iter()).values() {
dirmgr.load_documents_into(query, &mut loaded)?;
query.load_documents_into(&mut loaded, &dirmgr.store)?;
}
Ok(loaded)
}

View File

@ -1,8 +1,11 @@
//! Declare a general purpose "document ID type" for tracking which
//! documents we want and which we have.
use std::sync::Mutex;
use std::{borrow::Borrow, collections::HashMap};
use tracing::trace;
use crate::{DocumentText, DynStore};
use tor_dirclient::request;
#[cfg(feature = "routerdesc")]
use tor_netdoc::doc::routerdesc::RdDigest;
@ -195,6 +198,58 @@ impl DocQuery {
}
}
}
/// Load all the documents for a single DocumentQuery from the store.
pub(crate) fn load_documents_into(
&self,
result: &mut HashMap<DocId, DocumentText>,
store: &Mutex<DynStore>,
) -> crate::Result<()> {
use DocQuery::*;
let store = store.lock().expect("Directory storage lock poisoned");
match self {
LatestConsensus {
flavor,
cache_usage,
} => {
if *cache_usage == CacheUsage::MustDownload {
// Do nothing: we don't want a cached consensus.
trace!("MustDownload is set; not checking for cached consensus.");
} else if let Some(c) =
store.latest_consensus(*flavor, cache_usage.pending_requirement())?
{
trace!("Found a reasonable consensus in the cache");
let id = DocId::LatestConsensus {
flavor: *flavor,
cache_usage: *cache_usage,
};
result.insert(id, c.into());
}
}
AuthCert(ids) => result.extend(
store
.authcerts(ids)?
.into_iter()
.map(|(id, c)| (DocId::AuthCert(id), DocumentText::from_string(c))),
),
Microdesc(digests) => {
result.extend(
store
.microdescs(digests)?
.into_iter()
.map(|(id, md)| (DocId::Microdesc(id), DocumentText::from_string(md))),
);
}
#[cfg(feature = "routerdesc")]
RouterDesc(digests) => result.extend(
store
.routerdescs(digests)?
.into_iter()
.map(|(id, rd)| (DocId::RouterDesc(id), DocumentText::from_string(rd))),
),
}
Ok(())
}
}
impl From<DocId> for DocQuery {

View File

@ -762,8 +762,8 @@ impl<R: Runtime> DirMgr<R> {
pub fn text(&self, doc: &DocId) -> Result<Option<DocumentText>> {
use itertools::Itertools;
let mut result = HashMap::new();
let query = (*doc).into();
self.load_documents_into(&query, &mut result)?;
let query: DocQuery = (*doc).into();
query.load_documents_into(&mut result, &self.store)?;
let item = result.into_iter().at_most_one().map_err(|_| {
Error::CacheCorruption("Found more than one entry in storage for given docid")
})?;
@ -790,63 +790,11 @@ impl<R: Runtime> DirMgr<R> {
let partitioned = docid::partition_by_type(docs);
let mut result = HashMap::new();
for (_, query) in partitioned.into_iter() {
self.load_documents_into(&query, &mut result)?;
query.load_documents_into(&mut result, &self.store)?;
}
Ok(result)
}
/// Load all the documents for a single DocumentQuery from the store.
fn load_documents_into(
&self,
query: &DocQuery,
result: &mut HashMap<DocId, DocumentText>,
) -> Result<()> {
use DocQuery::*;
let store = self.store.lock().expect("Directory storage lock poisoned");
match query {
LatestConsensus {
flavor,
cache_usage,
} => {
if *cache_usage == CacheUsage::MustDownload {
// Do nothing: we don't want a cached consensus.
trace!("MustDownload is set; not checking for cached consensus.");
} else if let Some(c) =
store.latest_consensus(*flavor, cache_usage.pending_requirement())?
{
trace!("Found a reasonable consensus in the cache");
let id = DocId::LatestConsensus {
flavor: *flavor,
cache_usage: *cache_usage,
};
result.insert(id, c.into());
}
}
AuthCert(ids) => result.extend(
store
.authcerts(ids)?
.into_iter()
.map(|(id, c)| (DocId::AuthCert(id), DocumentText::from_string(c))),
),
Microdesc(digests) => {
result.extend(
store
.microdescs(digests)?
.into_iter()
.map(|(id, md)| (DocId::Microdesc(id), DocumentText::from_string(md))),
);
}
#[cfg(feature = "routerdesc")]
RouterDesc(digests) => result.extend(
store
.routerdescs(digests)?
.into_iter()
.map(|(id, rd)| (DocId::RouterDesc(id), DocumentText::from_string(rd))),
),
}
Ok(())
}
/// Convert a DocQuery into a set of ClientRequests, suitable for sending
/// to a directory cache.
///