Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactoring fetch_bookmark to avoid extra queries #1306

Merged
merged 1 commit into from
Jun 26, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions components/places/examples/places-utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ use cli_support::fxa_creds::{get_cli_fxa, get_default_fxa_config};
use places::bookmark_sync::store::BookmarksStore;
use places::history_sync::store::HistoryStore;
use places::storage::bookmarks::{
fetch_tree, insert_tree, BookmarkNode, BookmarkRootGuid, BookmarkTreeNode, FolderNode,
SeparatorNode,
fetch_tree, insert_tree, BookmarkNode, BookmarkRootGuid, BookmarkTreeNode, FetchDepth,
FolderNode, SeparatorNode,
};
use places::types::{BookmarkType, SyncGuid, Timestamp};
use places::{ConnectionType, PlacesApi, PlacesDb};
Expand Down Expand Up @@ -161,7 +161,7 @@ fn run_native_export(db: &PlacesDb, filename: String) -> Result<()> {
let file = File::create(filename)?;
let writer = BufWriter::new(file);

let tree = fetch_tree(db, &BookmarkRootGuid::Root.into())?.unwrap();
let tree = fetch_tree(db, &BookmarkRootGuid::Root.into(), &FetchDepth::Deepest)?.unwrap();
serde_json::to_writer_pretty(writer, &tree)?;
Ok(())
}
Expand Down
123 changes: 89 additions & 34 deletions components/places/src/storage/bookmarks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,11 @@ pub enum BookmarkPosition {
Append,
}

pub enum FetchDepth {
Specific(usize),
Deepest,
}

/// Helpers to deal with managing the position correctly.

/// Updates the position of existing items so that the insertion of a child in
Expand Down Expand Up @@ -1122,12 +1127,13 @@ fn inflate(
}
}

/// Fetch the tree starting at the specified folder guid.
/// Returns a `BookmarkTreeNode::Folder(_)`, its parent's guid (if any), and
/// Fetch the tree starting at the specified guid.
/// Returns a `BookmarkTreeNode`, its parent's guid (if any), and
/// position inside its parent.
pub fn fetch_tree(
db: &PlacesDb,
item_guid: &SyncGuid,
target_depth: &FetchDepth,
) -> Result<Option<(BookmarkTreeNode, Option<SyncGuid>, u32)>> {
// XXX - this needs additional work for tags - unlike desktop, there's no
// "tags" folder, but instead a couple of tables to join on.
Expand Down Expand Up @@ -1185,18 +1191,39 @@ pub fn fetch_tree(
let row = result?;
parent_guid = row.parent_guid.clone();
position = row.position;
FolderNode {
guid: Some(row.guid.clone()),
date_added: Some(row.date_added),
last_modified: Some(row.last_modified),
title: row.title.clone(),
children: Vec::new(),
match row.node_type {
BookmarkType::Folder => FolderNode {
guid: Some(row.guid.clone()),
date_added: Some(row.date_added),
last_modified: Some(row.last_modified),
title: row.title.clone(),
children: Vec::new(),
}
.into(),
BookmarkType::Bookmark => BookmarkNode {
guid: Some(row.guid.clone()),
date_added: Some(row.date_added),
last_modified: Some(row.last_modified),
title: row.title.clone(),
url: Url::parse(row.url.clone().unwrap().as_str())?,
}
.into(),
BookmarkType::Separator => SeparatorNode {
guid: Some(row.guid.clone()),
date_added: Some(row.date_added),
last_modified: Some(row.last_modified),
}
.into(),
}
.into()
}
None => return Ok(None),
};

// Skip the rest and return if root is not a folder
if let BookmarkTreeNode::Bookmark(_) | BookmarkTreeNode::Separator(_) = root {
return Ok(Some((root, parent_guid, position)));
}

scope.err_if_interrupted()?;
// For all remaining rows, build a pseudo-tree that maps parent GUIDs to
// ordered children. We need this intermediate step because SQLite returns
Expand All @@ -1206,6 +1233,12 @@ pub fn fetch_tree(
for result in results {
let row = result?;
scope.err_if_interrupted()?;
// Check if we have done fetching the asked depth
if let FetchDepth::Specific(d) = *target_depth {
if row.level as usize > d + 1 {
break;
}
}
let node = match row.node_type {
BookmarkType::Bookmark => match &row.url {
Some(url_str) => match Url::parse(&url_str) {
Expand Down Expand Up @@ -1354,23 +1387,13 @@ fn get_raw_bookmarks_for_url(db: &PlacesDb, url: &Url) -> Result<Vec<RawBookmark
RawBookmark::from_row,
)?)
}
fn get_raw_bookmarks_with_parent(db: &impl ConnExt, parent: RowId) -> Result<Vec<RawBookmark>> {
Ok(db.query_rows_into_cached(
&format!(
"{} WHERE b.parent = :parent ORDER BY b.position ASC",
RAW_BOOKMARK_SQL
),
&[(":parent", &parent)],
RawBookmark::from_row,
)?)
}

#[cfg(test)]
mod tests {
use super::*;
use crate::api::places_api::test::new_mem_connection;
use crate::db::PlacesDb;
use crate::tests::{assert_json_tree, insert_json_tree};
use crate::tests::{assert_json_tree, assert_json_tree_with_depth, insert_json_tree};
use pretty_assertions::assert_eq;
use rusqlite::NO_PARAMS;
use serde_json::Value;
Expand Down Expand Up @@ -2078,7 +2101,8 @@ mod tests {
let conn = new_mem_connection();

// Fetch the root
let (t, _, _) = fetch_tree(&conn, &BookmarkRootGuid::Root.into())?.unwrap();
let (t, _, _) =
fetch_tree(&conn, &BookmarkRootGuid::Root.into(), &FetchDepth::Deepest)?.unwrap();
let f = match t {
BookmarkTreeNode::Folder(ref f) => f,
_ => panic!("tree root must be a folder"),
Expand All @@ -2089,7 +2113,7 @@ mod tests {
}

#[test]
fn test_insert_tree() -> Result<()> {
fn test_insert_tree_and_fetch_level() -> Result<()> {
let _ = env_logger::try_init();
let conn = new_mem_connection();

Expand Down Expand Up @@ -2140,8 +2164,45 @@ mod tests {
};
insert_tree(&conn, &tree)?;

// check it.
assert_json_tree(
let expected = json!({
"guid": &BookmarkRootGuid::Unfiled.as_guid(),
"children": [
{
"title": "the bookmark",
"url": "https://www.example.com/"
},
{
"title": "A folder",
"children": [
{
"title": "bookmark 1 in A folder",
"url": "https://www.example2.com/"
},
{
"title": "bookmark 2 in A folder",
"url": "https://www.example3.com/"
}
],
},
{
"title": "another bookmark",
"url": "https://www.example4.com/",
}
]
});
// check it with deepest fetching level.
assert_json_tree(&conn, &BookmarkRootGuid::Unfiled.into(), expected.clone());

// check it with one level deep, which should be the same as the previous
assert_json_tree_with_depth(
&conn,
&BookmarkRootGuid::Unfiled.into(),
expected.clone(),
&FetchDepth::Specific(1),
);

// check it with zero level deep, which should return root and its children only
assert_json_tree_with_depth(
&conn,
&BookmarkRootGuid::Unfiled.into(),
json!({
Expand All @@ -2153,24 +2214,18 @@ mod tests {
},
{
"title": "A folder",
"children": [
{
"title": "bookmark 1 in A folder",
"url": "https://www.example2.com/"
},
{
"title": "bookmark 2 in A folder",
"url": "https://www.example3.com/"
}
],
"children": [],
},
{
"title": "another bookmark",
"url": "https://www.example4.com/",
}
]
}),
&FetchDepth::Specific(0),
);

Ok(())
}

}
29 changes: 0 additions & 29 deletions components/places/src/storage/bookmarks/conversions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,35 +108,6 @@ impl From<RawBookmark> for PublicNode {
}
}

impl PublicNode {
pub(crate) fn with_children(
mut self,
guids: Option<Vec<SyncGuid>>,
nodes: Option<Vec<PublicNode>>,
) -> Self {
if guids.is_some() || nodes.is_some() {
debug_assert_eq!(
self.node_type,
BookmarkType::Folder,
"Trying to set children on non-folder"
);
debug_assert!(
guids.is_some() || nodes.is_some(),
"Only one of guids or nodes should be provided for folders"
);
} else {
debug_assert_ne!(
self.node_type,
BookmarkType::Folder,
"Should provide children or guids to folder!"
);
}
self.child_nodes = nodes;
self.child_guids = guids;
self
}
}

impl From<Vec<PublicNode>> for msg_types::BookmarkNodeList {
fn from(ns: Vec<PublicNode>) -> Self {
Self {
Expand Down
Loading