Skip to content

Commit

Permalink
collect /find Info for tree and plain types consistently
Browse files Browse the repository at this point in the history
MakeTree calculated Info locally for historical reasons,
and the results were consistent with the dataService.Info call
but calculated differently.

That change fixes that, ensuring that Info is requested
in the same manner.
  • Loading branch information
paskal committed Nov 12, 2023
1 parent cd481d4 commit 7e5fdf0
Show file tree
Hide file tree
Showing 9 changed files with 236 additions and 80 deletions.
11 changes: 11 additions & 0 deletions backend/app/rest/api/admin_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -513,6 +513,7 @@ func TestAdmin_ReadOnlyNoComments(t *testing.T) {
_, err = srv.DataService.Info(store.Locator{SiteID: "remark42", URL: "https://radio-t.com/blah"}, 0)
assert.Error(t, err)

// test format "tree"
res, code := get(t, ts.URL+"/api/v1/find?site=remark42&url=https://radio-t.com/blah&format=tree")
assert.Equal(t, http.StatusOK, code)
comments := commentsWithInfo{}
Expand All @@ -521,6 +522,16 @@ func TestAdmin_ReadOnlyNoComments(t *testing.T) {
assert.Equal(t, 0, len(comments.Comments), "should have 0 comments")
assert.True(t, comments.Info.ReadOnly)
t.Logf("%+v", comments)

// test format "plain"
res, code = get(t, ts.URL+"/api/v1/find?site=remark42&url=https://radio-t.com/blah")
assert.Equal(t, http.StatusOK, code)
comments = commentsWithInfo{}
err = json.Unmarshal([]byte(res), &comments)
assert.NoError(t, err)
assert.Equal(t, 0, len(comments.Comments), "should have 0 comments")
assert.True(t, comments.Info.ReadOnly)
t.Logf("%+v", comments)
}

func TestAdmin_ReadOnlyWithAge(t *testing.T) {
Expand Down
5 changes: 5 additions & 0 deletions backend/app/rest/api/rest.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,11 @@ type commentsWithInfo struct {
Info store.PostInfo `json:"info,omitempty"`
}

type treeWithInfo struct {
*service.Tree
Info store.PostInfo `json:"info,omitempty"`
}

// Run the lister and request's router, activate rest server
func (s *Rest) Run(address string, port int) {
if address == "*" {
Expand Down
38 changes: 27 additions & 11 deletions backend/app/rest/api/rest_public.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ type pubStore interface {

// GET /find?site=siteID&url=post-url&format=[tree|plain]&sort=[+/-time|+/-score|+/-controversy]&view=[user|all]&since=unix_ts_msec
// find comments for given post. Returns in tree or plain formats, sorted
//
// When `url` parameter is not set (e.g. request is for site-wide comments), does not return deleted comments.
func (s *public) findCommentsCtrl(w http.ResponseWriter, r *http.Request) {
locator := store.Locator{SiteID: r.URL.Query().Get("site"), URL: r.URL.Query().Get("url")}
sort := r.URL.Query().Get("sort")
Expand Down Expand Up @@ -77,22 +79,36 @@ func (s *public) findCommentsCtrl(w http.ResponseWriter, r *http.Request) {
comments = []store.Comment{} // error should clear comments and continue for post info
}
comments = s.applyView(comments, view)

var commentsInfo store.PostInfo
if info, ee := s.dataService.Info(locator, s.readOnlyAge); ee == nil {
commentsInfo = info
}

if !since.IsZero() { // if since is set, number of comments can be different from total in the DB
commentsInfo.Count = 0
for _, c := range comments {
if !c.Deleted {
commentsInfo.Count++
}
}
}

// post might be readonly without any comments, Info call will fail then and ReadOnly flag should be checked separately
if !commentsInfo.ReadOnly && locator.URL != "" && s.dataService.IsReadOnly(locator) {
commentsInfo.ReadOnly = true
}

var b []byte
switch format {
case "tree":
tree := service.MakeTree(comments, sort, s.readOnlyAge)
if tree.Nodes == nil { // eliminate json nil serialization
tree.Nodes = []*service.Node{}
}
if s.dataService.IsReadOnly(locator) {
tree.Info.ReadOnly = true
withInfo := treeWithInfo{Tree: service.MakeTree(comments, sort), Info: commentsInfo}
if withInfo.Nodes == nil { // eliminate json nil serialization
withInfo.Nodes = []*service.Node{}
}
b, e = encodeJSONWithHTML(tree)
b, e = encodeJSONWithHTML(withInfo)
default:
withInfo := commentsWithInfo{Comments: comments}
if info, ee := s.dataService.Info(locator, s.readOnlyAge); ee == nil {
withInfo.Info = info
}
withInfo := commentsWithInfo{Comments: comments, Info: commentsInfo}
b, e = encodeJSONWithHTML(withInfo)
}
return b, e
Expand Down
162 changes: 158 additions & 4 deletions backend/app/rest/api/rest_public_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"net/http"
"net/http/httptest"
"os"
"strconv"
"strings"
"testing"
"time"
Expand Down Expand Up @@ -210,7 +211,7 @@ func TestRest_Find(t *testing.T) {
assert.Equal(t, id2, comments.Comments[0].ID)

// get in tree mode
tree := service.Tree{}
tree := treeWithInfo{}
res, code = get(t, ts.URL+"/api/v1/find?site=remark42&url=https://radio-t.com/blah1&format=tree")
assert.Equal(t, http.StatusOK, code)
err = json.Unmarshal([]byte(res), &tree)
Expand All @@ -236,7 +237,7 @@ func TestRest_FindAge(t *testing.T) {
_, err = srv.DataService.Create(c2)
require.NoError(t, err)

tree := service.Tree{}
tree := treeWithInfo{}

res, code := get(t, ts.URL+"/api/v1/find?site=remark42&url=https://radio-t.com/blah1&format=tree")
assert.Equal(t, http.StatusOK, code)
Expand Down Expand Up @@ -279,15 +280,15 @@ func TestRest_FindReadOnly(t *testing.T) {
require.NoError(t, err)
require.NoError(t, resp.Body.Close())

tree := service.Tree{}
tree := treeWithInfo{}
res, code := get(t, ts.URL+"/api/v1/find?site=remark42&url=https://radio-t.com/blah1&format=tree")
assert.Equal(t, http.StatusOK, code)
err = json.Unmarshal([]byte(res), &tree)
require.NoError(t, err)
assert.Equal(t, "https://radio-t.com/blah1", tree.Info.URL)
assert.True(t, tree.Info.ReadOnly, "post is ro")

tree = service.Tree{}
tree = treeWithInfo{}
res, code = get(t, ts.URL+"/api/v1/find?site=remark42&url=https://radio-t.com/blah2&format=tree")
assert.Equal(t, http.StatusOK, code)
err = json.Unmarshal([]byte(res), &tree)
Expand Down Expand Up @@ -515,6 +516,159 @@ func TestRest_FindUserComments_CWE_918(t *testing.T) {
assert.Equal(t, arbitraryServer.URL, resp.Comments[0].Locator.URL, "arbitrary URL provided by the request")
}

func TestPublic_FindCommentsCtrl_ConsistentCount(t *testing.T) {
// test that comment counting is consistent between tree and plain formats
ts, srv, teardown := startupT(t)
defer teardown()

commentLocator := store.Locator{URL: "test-url", SiteID: "remark42"}

// vote for comment multiple times
setScore := func(locator store.Locator, id string, val int) {
abs := func(x int) int {
if x < 0 {
return -x
}
return x
}
for i := 0; i < abs(val); i++ {
_, err := srv.DataService.Vote(service.VoteReq{
Locator: locator,
CommentID: id,
// unique user ID is needed for correct counting of controversial votes
UserID: "user" + strconv.Itoa(val) + strconv.Itoa(i),
Val: val > 0,
})
require.NoError(t, err)
}
}

// Adding initial comments (8 to test-url and 1 to another-url) and voting, and delete two of comments to the first post.
// With sleep so that at least few millisecond pass between each comment
// and later we would be able to use that in "since" filter with millisecond precision
ids := make([]string, 9)
timestamps := make([]time.Time, 9)
c1 := store.Comment{Text: "top-level comment 1", Locator: commentLocator}
ids[0], timestamps[0] = addCommentGetCreatedTime(t, c1, ts)
// #3 by score
setScore(commentLocator, ids[0], 1)
time.Sleep(time.Millisecond * 5)

c2 := store.Comment{Text: "top-level comment 2", Locator: commentLocator}
ids[1], timestamps[1] = addCommentGetCreatedTime(t, c2, ts)
// #2 by score
setScore(commentLocator, ids[1], 2)
time.Sleep(time.Millisecond * 5)

c3 := store.Comment{Text: "second-level comment 1", ParentID: ids[0], Locator: commentLocator}
ids[2], timestamps[2] = addCommentGetCreatedTime(t, c3, ts)
// #1 by score
setScore(commentLocator, ids[2], 10)
time.Sleep(time.Millisecond * 5)

c4 := store.Comment{Text: "third-level comment 1", ParentID: ids[2], Locator: commentLocator}
ids[3], timestamps[3] = addCommentGetCreatedTime(t, c4, ts)
// #5 by score, #1 by controversy
setScore(commentLocator, ids[3], 4)
setScore(commentLocator, ids[3], -4)
time.Sleep(time.Millisecond * 5)

c5 := store.Comment{Text: "second-level comment 2", ParentID: ids[1], Locator: commentLocator}
ids[4], timestamps[4] = addCommentGetCreatedTime(t, c5, ts)
// #5 by score, #2 by controversy
setScore(commentLocator, ids[4], 2)
setScore(commentLocator, ids[4], -3)
time.Sleep(time.Millisecond * 5)

c6 := store.Comment{Text: "third-level comment 2", ParentID: ids[4], Locator: commentLocator}
ids[5], timestamps[5] = addCommentGetCreatedTime(t, c6, ts)
// deleted later so not visible in site-wide requests
setScore(commentLocator, ids[5], 10)
setScore(commentLocator, ids[5], -10)
time.Sleep(time.Millisecond * 5)

c7 := store.Comment{Text: "top-level comment 3", Locator: commentLocator}
ids[6], timestamps[6] = addCommentGetCreatedTime(t, c7, ts)
// #6 by score, #4 by controversy
setScore(commentLocator, ids[6], -3)
setScore(commentLocator, ids[6], 1)
time.Sleep(time.Millisecond * 5)

c8 := store.Comment{Text: "second-level comment 3", ParentID: ids[6], Locator: commentLocator}
ids[7], timestamps[7] = addCommentGetCreatedTime(t, c8, ts)
// deleted later so not visible in site-wide requests
setScore(commentLocator, ids[7], -20)

c9 := store.Comment{Text: "comment to post 2", Locator: store.Locator{URL: "another-url", SiteID: "remark42"}}
ids[8], timestamps[8] = addCommentGetCreatedTime(t, c9, ts)
// #7 by score
setScore(store.Locator{URL: "another-url", SiteID: "remark42"}, ids[8], -25)

// delete two comments bringing the total from 9 to 6
err := srv.DataService.Delete(commentLocator, ids[7], store.SoftDelete)
assert.NoError(t, err)
err = srv.DataService.Delete(commentLocator, ids[5], store.HardDelete)
assert.NoError(t, err)
srv.Cache.Flush(cache.FlusherRequest{})

sinceTenSecondsAgo := strconv.FormatInt(time.Now().Add(-time.Second*10).UnixNano()/1000000, 10)
sinceTS := make([]string, 9)
formattedTS := make([]string, 9)
for i, created := range timestamps {
sinceTS[i] = strconv.FormatInt(created.UnixNano()/1000000, 10)
formattedTS[i] = created.Format(time.RFC3339Nano)
}
t.Logf("last timestamp: %v", timestamps[7])

testCases := []struct {
params string
expectedBody string
}{
{"", fmt.Sprintf(`"info":{"count":7,"first_time":%q,"last_time":%q}`, formattedTS[0], formattedTS[8])},
{"url=test-url", fmt.Sprintf(`"info":{"url":"test-url","count":6,"first_time":%q,"last_time":%q}`, formattedTS[0], formattedTS[7])},
{"format=plain", fmt.Sprintf(`"info":{"count":7,"first_time":%q,"last_time":%q}`, formattedTS[0], formattedTS[8])},
{"format=plain&url=test-url", fmt.Sprintf(`"info":{"url":"test-url","count":6,"first_time":%q,"last_time":%q}`, formattedTS[0], formattedTS[7])},
{"since=" + sinceTenSecondsAgo, fmt.Sprintf(`"info":{"count":7,"first_time":%q,"last_time":%q}`, formattedTS[0], formattedTS[8])},
{"url=test-url&since=" + sinceTenSecondsAgo, fmt.Sprintf(`"info":{"url":"test-url","count":6,"first_time":%q,"last_time":%q}`, formattedTS[0], formattedTS[7])},
{"since=" + sinceTS[0], fmt.Sprintf(`"info":{"count":7,"first_time":%q,"last_time":%q}`, formattedTS[0], formattedTS[8])},
{"url=test-url&since=" + sinceTS[0], fmt.Sprintf(`"info":{"url":"test-url","count":6,"first_time":%q,"last_time":%q}`, formattedTS[0], formattedTS[7])},
{"since=" + sinceTS[1], fmt.Sprintf(`"info":{"count":6,"first_time":%q,"last_time":%q}`, formattedTS[0], formattedTS[8])},
{"url=test-url&since=" + sinceTS[1], fmt.Sprintf(`"info":{"url":"test-url","count":5,"first_time":%q,"last_time":%q}`, formattedTS[0], formattedTS[7])},
{"since=" + sinceTS[4], fmt.Sprintf(`"info":{"count":3,"first_time":%q,"last_time":%q}`, formattedTS[0], formattedTS[8])},
{"url=test-url&since=" + sinceTS[4], fmt.Sprintf(`"info":{"url":"test-url","count":2,"first_time":%q,"last_time":%q}`, formattedTS[0], formattedTS[7])},
{"format=tree", `"info":{"count":7`},
{"format=tree&url=test-url", `"info":{"url":"test-url","count":6`},
{"format=tree&sort=+time", `"info":{"count":7`},
{"format=tree&url=test-url&sort=+time", `"info":{"url":"test-url","count":6`},
{"format=tree&sort=-score", `"info":{"count":7`},
{"format=tree&url=test-url&sort=-score", `"info":{"url":"test-url","count":6`},
{"sort=+time", fmt.Sprintf(`"score":-25,"vote":0,"time":%q}],"info":{"count":7`, formattedTS[8])},
{"sort=-time", fmt.Sprintf(`"score":1,"vote":0,"time":%q}],"info":{"count":7`, formattedTS[0])},
{"sort=+score", fmt.Sprintf(`"score":10,"vote":0,"time":%q}],"info":{"count":7`, formattedTS[2])},
{"sort=+score&url=test-url", fmt.Sprintf(`"score":10,"vote":0,"time":%q}],"info":{"url":"test-url","count":6`, formattedTS[2])},
{"sort=-score", fmt.Sprintf(`"score":-25,"vote":0,"time":%q}],"info":{"count":7`, formattedTS[8])},
{"sort=-score&url=test-url", fmt.Sprintf(`"score":-2,"vote":0,"controversy":1.5874010519681994,"time":%q}],"info":{"url":"test-url","count":6`, formattedTS[6])},
{"sort=-time&since=" + sinceTS[4], fmt.Sprintf(`"score":-1,"vote":0,"controversy":2.924017738212866,"time":%q}],"info":{"count":3`, formattedTS[4])},
{"sort=-score&since=" + sinceTS[3], fmt.Sprintf(`"score":-25,"vote":0,"time":%q}],"info":{"count":4`, formattedTS[8])},
{"sort=-score&url=test-url&since=" + sinceTS[3], fmt.Sprintf(`"score":-2,"vote":0,"controversy":1.5874010519681994,"time":%q}],"info":{"url":"test-url","count":3`, formattedTS[6])},
{"sort=+controversy&url=test-url&since=" + sinceTS[5], fmt.Sprintf(`"score":-2,"vote":0,"controversy":1.5874010519681994,"time":%q}],"info":{"url":"test-url","count":1`, formattedTS[6])},
// three comments of which last one deleted and doesn't have controversy so returned last
{"sort=-controversy&url=test-url&since=" + sinceTS[5], fmt.Sprintf(`"score":0,"vote":0,"time":%q,"delete":true}],"info":{"url":"test-url","count":1`, formattedTS[7])},
}

for _, tc := range testCases {
t.Run(tc.params, func(t *testing.T) {
url := fmt.Sprintf(ts.URL+"/api/v1/find?site=remark42&%s", tc.params)
body, code := get(t, url)
assert.Equal(t, http.StatusOK, code)
assert.Contains(t, body, tc.expectedBody)
t.Log(body)
// prevent hit limiter from engaging
time.Sleep(50 * time.Millisecond)
})
}
}

func TestRest_UserInfo(t *testing.T) {
ts, _, teardown := startupT(t)
defer teardown()
Expand Down
11 changes: 9 additions & 2 deletions backend/app/rest/api/rest_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -599,7 +599,7 @@ func post(t *testing.T, url, body string) (*http.Response, error) {
return client.Do(req)
}

func addComment(t *testing.T, c store.Comment, ts *httptest.Server) string {
func addCommentGetCreatedTime(t *testing.T, c store.Comment, ts *httptest.Server) (id string, created time.Time) {
b, err := json.Marshal(c)
require.NoError(t, err, "can't marshal comment %+v", c)

Expand All @@ -619,7 +619,14 @@ func addComment(t *testing.T, c store.Comment, ts *httptest.Server) string {
err = json.Unmarshal(b, &crResp)
require.NoError(t, err)
time.Sleep(time.Nanosecond * 10)
return crResp["id"].(string)
created, err = time.Parse(time.RFC3339, crResp["time"].(string))
require.NoError(t, err)
return crResp["id"].(string), created
}

func addComment(t *testing.T, c store.Comment, ts *httptest.Server) string {
id, _ := addCommentGetCreatedTime(t, c, ts)
return id
}

func requireAdminOnly(t *testing.T, req *http.Request) {
Expand Down
10 changes: 2 additions & 8 deletions backend/app/store/service/testdata/tree.json
Original file line number Diff line number Diff line change
Expand Up @@ -242,11 +242,5 @@
"time": "2017-12-25T19:47:22Z"
}
}
],
"info": {
"url": "url",
"count": 12,
"first_time": "2017-12-25T19:46:01Z",
"last_time": "2017-12-25T19:47:22Z"
}
}
]
}
10 changes: 2 additions & 8 deletions backend/app/store/service/testdata/tree_del.json
Original file line number Diff line number Diff line change
Expand Up @@ -176,11 +176,5 @@
}
}]
}]
}],
"info": {
"url": "url",
"count": 8,
"first_time": "2017-12-25T19:46:01Z",
"last_time": "2017-12-25T19:47:05Z"
}
}
}]
}
Loading

0 comments on commit 7e5fdf0

Please sign in to comment.