2023-03-12 16:00:57 +01:00
|
|
|
// GoToSocial
|
|
|
|
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
|
|
|
// SPDX-License-Identifier: AGPL-3.0-or-later
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2021-07-09 18:32:48 +02:00
|
|
|
|
2021-08-25 15:34:33 +02:00
|
|
|
package bundb
|
2021-07-09 18:32:48 +02:00
|
|
|
|
|
|
|
import (
|
2021-08-20 12:26:56 +02:00
|
|
|
"context"
|
2023-08-22 15:41:51 +02:00
|
|
|
"errors"
|
2023-05-25 10:37:38 +02:00
|
|
|
"fmt"
|
2022-10-29 17:10:28 +02:00
|
|
|
"time"
|
2021-07-09 18:32:48 +02:00
|
|
|
|
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/db"
|
2023-05-25 10:37:38 +02:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
|
2023-08-22 15:41:51 +02:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
2021-07-09 18:32:48 +02:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
2022-10-29 17:10:28 +02:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/id"
|
2022-07-19 10:47:55 +02:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/log"
|
2022-12-08 18:35:14 +01:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/state"
|
2021-08-25 15:34:33 +02:00
|
|
|
"github.com/uptrace/bun"
|
2022-07-10 17:18:21 +02:00
|
|
|
"golang.org/x/exp/slices"
|
2021-07-09 18:32:48 +02:00
|
|
|
)
|
|
|
|
|
2021-08-20 12:26:56 +02:00
|
|
|
type timelineDB struct {
|
2023-08-17 18:26:21 +02:00
|
|
|
db *DB
|
2022-12-08 18:35:14 +01:00
|
|
|
state *state.State
|
2021-08-20 12:26:56 +02:00
|
|
|
}
|
|
|
|
|
2023-07-25 10:34:05 +02:00
|
|
|
func (t *timelineDB) GetHomeTimeline(ctx context.Context, accountID string, maxID string, sinceID string, minID string, limit int, local bool) ([]*gtsmodel.Status, error) {
|
2022-10-08 13:50:48 +02:00
|
|
|
// Ensure reasonable
|
|
|
|
if limit < 0 {
|
|
|
|
limit = 0
|
|
|
|
}
|
|
|
|
|
2021-08-29 16:41:41 +02:00
|
|
|
// Make educated guess for slice size
|
2023-04-06 13:43:13 +02:00
|
|
|
var (
|
|
|
|
statusIDs = make([]string, 0, limit)
|
|
|
|
frontToBack = true
|
|
|
|
)
|
2021-08-29 16:41:41 +02:00
|
|
|
|
2023-07-25 10:34:05 +02:00
|
|
|
q := t.db.
|
2021-08-25 15:34:33 +02:00
|
|
|
NewSelect().
|
2022-10-08 13:50:48 +02:00
|
|
|
TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
|
2022-07-10 17:18:21 +02:00
|
|
|
// Select only IDs from table
|
2023-06-04 21:17:28 +02:00
|
|
|
Column("status.id")
|
2021-07-09 18:32:48 +02:00
|
|
|
|
2023-04-06 17:11:25 +02:00
|
|
|
if maxID == "" || maxID >= id.Highest {
|
[performance] refactoring + add fave / follow / request / visibility caching (#1607)
* refactor visibility checking, add caching for visibility
* invalidate visibility cache items on account / status deletes
* fix requester ID passed to visibility cache nil ptr
* de-interface caches, fix home / public timeline caching + visibility
* finish adding code comments for visibility filter
* fix angry goconst linter warnings
* actually finish adding filter visibility code comments for timeline functions
* move home timeline status author check to after visibility
* remove now-unused code
* add more code comments
* add TODO code comment, update printed cache start names
* update printed cache names on stop
* start adding separate follow(request) delete db functions, add specific visibility cache tests
* add relationship type caching
* fix getting local account follows / followed-bys, other small codebase improvements
* simplify invalidation using cache hooks, add more GetAccountBy___() functions
* fix boosting to return 404 if not boostable but no error (to not leak status ID)
* remove dead code
* improved placement of cache invalidation
* update license headers
* add example follow, follow-request config entries
* add example visibility cache configuration to config file
* use specific PutFollowRequest() instead of just Put()
* add tests for all GetAccountBy()
* add GetBlockBy() tests
* update block to check primitive fields
* update and finish adding Get{Account,Block,Follow,FollowRequest}By() tests
* fix copy-pasted code
* update envparsing test
* whitespace
* fix bun struct tag
* add license header to gtscontext
* fix old license header
* improved error creation to not use fmt.Errorf() when not needed
* fix various rebase conflicts, fix account test
* remove commented-out code, fix-up mention caching
* fix mention select bun statement
* ensure mention target account populated, pass in context to customrenderer logging
* remove more uncommented code, fix typeutil test
* add statusfave database model caching
* add status fave cache configuration
* add status fave cache example config
* woops, catch missed error. nice catch linter!
* add back testrig panic on nil db
* update example configuration to match defaults, slight tweak to cache configuration defaults
* update envparsing test with new defaults
* fetch followingget to use the follow target account
* use accounnt.IsLocal() instead of empty domain check
* use constants for the cache visibility type check
* use bun.In() for notification type restriction in db query
* include replies when fetching PublicTimeline() (to account for single-author threads in Visibility{}.StatusPublicTimelineable())
* use bun query building for nested select statements to ensure working with postgres
* update public timeline future status checks to match visibility filter
* same as previous, for home timeline
* update public timeline tests to dynamically check for appropriate statuses
* migrate accounts to allow unique constraint on public_key
* provide minimal account with publicKey
---------
Signed-off-by: kim <grufwub@gmail.com>
Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
2023-03-28 15:03:14 +02:00
|
|
|
const future = 24 * time.Hour
|
|
|
|
|
2022-10-29 17:10:28 +02:00
|
|
|
var err error
|
[performance] refactoring + add fave / follow / request / visibility caching (#1607)
* refactor visibility checking, add caching for visibility
* invalidate visibility cache items on account / status deletes
* fix requester ID passed to visibility cache nil ptr
* de-interface caches, fix home / public timeline caching + visibility
* finish adding code comments for visibility filter
* fix angry goconst linter warnings
* actually finish adding filter visibility code comments for timeline functions
* move home timeline status author check to after visibility
* remove now-unused code
* add more code comments
* add TODO code comment, update printed cache start names
* update printed cache names on stop
* start adding separate follow(request) delete db functions, add specific visibility cache tests
* add relationship type caching
* fix getting local account follows / followed-bys, other small codebase improvements
* simplify invalidation using cache hooks, add more GetAccountBy___() functions
* fix boosting to return 404 if not boostable but no error (to not leak status ID)
* remove dead code
* improved placement of cache invalidation
* update license headers
* add example follow, follow-request config entries
* add example visibility cache configuration to config file
* use specific PutFollowRequest() instead of just Put()
* add tests for all GetAccountBy()
* add GetBlockBy() tests
* update block to check primitive fields
* update and finish adding Get{Account,Block,Follow,FollowRequest}By() tests
* fix copy-pasted code
* update envparsing test
* whitespace
* fix bun struct tag
* add license header to gtscontext
* fix old license header
* improved error creation to not use fmt.Errorf() when not needed
* fix various rebase conflicts, fix account test
* remove commented-out code, fix-up mention caching
* fix mention select bun statement
* ensure mention target account populated, pass in context to customrenderer logging
* remove more uncommented code, fix typeutil test
* add statusfave database model caching
* add status fave cache configuration
* add status fave cache example config
* woops, catch missed error. nice catch linter!
* add back testrig panic on nil db
* update example configuration to match defaults, slight tweak to cache configuration defaults
* update envparsing test with new defaults
* fetch followingget to use the follow target account
* use accounnt.IsLocal() instead of empty domain check
* use constants for the cache visibility type check
* use bun.In() for notification type restriction in db query
* include replies when fetching PublicTimeline() (to account for single-author threads in Visibility{}.StatusPublicTimelineable())
* use bun query building for nested select statements to ensure working with postgres
* update public timeline future status checks to match visibility filter
* same as previous, for home timeline
* update public timeline tests to dynamically check for appropriate statuses
* migrate accounts to allow unique constraint on public_key
* provide minimal account with publicKey
---------
Signed-off-by: kim <grufwub@gmail.com>
Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
2023-03-28 15:03:14 +02:00
|
|
|
|
|
|
|
// don't return statuses more than 24hr in the future
|
|
|
|
maxID, err = id.NewULIDFromTime(time.Now().Add(future))
|
2022-10-29 17:10:28 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-09 18:32:48 +02:00
|
|
|
}
|
|
|
|
|
2022-10-29 17:10:28 +02:00
|
|
|
// return only statuses LOWER (ie., older) than maxID
|
|
|
|
q = q.Where("? < ?", bun.Ident("status.id"), maxID)
|
|
|
|
|
2021-07-09 18:32:48 +02:00
|
|
|
if sinceID != "" {
|
2021-08-15 18:43:08 +02:00
|
|
|
// return only statuses HIGHER (ie., newer) than sinceID
|
2022-10-08 13:50:48 +02:00
|
|
|
q = q.Where("? > ?", bun.Ident("status.id"), sinceID)
|
2021-07-09 18:32:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if minID != "" {
|
2021-08-15 18:43:08 +02:00
|
|
|
// return only statuses HIGHER (ie., newer) than minID
|
2022-10-08 13:50:48 +02:00
|
|
|
q = q.Where("? > ?", bun.Ident("status.id"), minID)
|
2023-04-06 13:43:13 +02:00
|
|
|
|
|
|
|
// page up
|
|
|
|
frontToBack = false
|
2021-07-09 18:32:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if local {
|
2021-08-15 18:43:08 +02:00
|
|
|
// return only statuses posted by local account havers
|
2022-10-08 13:50:48 +02:00
|
|
|
q = q.Where("? = ?", bun.Ident("status.local"), local)
|
2021-07-09 18:32:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if limit > 0 {
|
2021-08-15 18:43:08 +02:00
|
|
|
// limit amount of statuses returned
|
2021-07-09 18:32:48 +02:00
|
|
|
q = q.Limit(limit)
|
|
|
|
}
|
|
|
|
|
2023-04-06 13:43:13 +02:00
|
|
|
if frontToBack {
|
|
|
|
// Page down.
|
|
|
|
q = q.Order("status.id DESC")
|
|
|
|
} else {
|
|
|
|
// Page up.
|
|
|
|
q = q.Order("status.id ASC")
|
|
|
|
}
|
|
|
|
|
2023-08-22 15:41:51 +02:00
|
|
|
// As this is the home timeline, it should be
|
|
|
|
// populated by statuses from accounts followed
|
|
|
|
// by accountID, and posts from accountID itself.
|
|
|
|
//
|
|
|
|
// So, begin by seeing who accountID follows.
|
|
|
|
// It should be a little cheaper to do this in
|
|
|
|
// a separate query like this, rather than using
|
|
|
|
// a join, since followIDs are cached in memory.
|
|
|
|
follows, err := t.state.DB.GetAccountFollows(
|
|
|
|
gtscontext.SetBarebones(ctx),
|
|
|
|
accountID,
|
|
|
|
)
|
|
|
|
if err != nil && !errors.Is(err, db.ErrNoEntries) {
|
|
|
|
return nil, gtserror.Newf("db error getting follows for account %s: %w", accountID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extract just the accountID from each follow.
|
|
|
|
targetAccountIDs := make([]string, len(follows)+1)
|
|
|
|
for i, f := range follows {
|
|
|
|
targetAccountIDs[i] = f.TargetAccountID
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add accountID itself as a pseudo follow so that
|
|
|
|
// accountID can see its own posts in the timeline.
|
|
|
|
targetAccountIDs[len(targetAccountIDs)-1] = accountID
|
|
|
|
|
|
|
|
// Select only statuses authored by
|
|
|
|
// accounts with IDs in the slice.
|
|
|
|
q = q.Where(
|
|
|
|
"? IN (?)",
|
|
|
|
bun.Ident("status.account_id"),
|
|
|
|
bun.In(targetAccountIDs),
|
|
|
|
)
|
2021-07-09 18:32:48 +02:00
|
|
|
|
2022-07-10 17:18:21 +02:00
|
|
|
if err := q.Scan(ctx, &statusIDs); err != nil {
|
2023-08-17 18:26:21 +02:00
|
|
|
return nil, err
|
2021-08-29 16:41:41 +02:00
|
|
|
}
|
2022-07-10 17:18:21 +02:00
|
|
|
|
2023-04-06 13:43:13 +02:00
|
|
|
if len(statusIDs) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2022-07-10 17:18:21 +02:00
|
|
|
|
2023-04-06 13:43:13 +02:00
|
|
|
// If we're paging up, we still want statuses
|
|
|
|
// to be sorted by ID desc, so reverse ids slice.
|
|
|
|
// https://zchee.github.io/golang-wiki/SliceTricks/#reversing
|
|
|
|
if !frontToBack {
|
|
|
|
for l, r := 0, len(statusIDs)-1; l < r; l, r = l+1, r-1 {
|
|
|
|
statusIDs[l], statusIDs[r] = statusIDs[r], statusIDs[l]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
statuses := make([]*gtsmodel.Status, 0, len(statusIDs))
|
2022-07-10 17:18:21 +02:00
|
|
|
for _, id := range statusIDs {
|
|
|
|
// Fetch status from db for ID
|
2022-12-08 18:35:14 +01:00
|
|
|
status, err := t.state.DB.GetStatusByID(ctx, id)
|
2022-07-10 17:18:21 +02:00
|
|
|
if err != nil {
|
2023-02-17 12:02:29 +01:00
|
|
|
log.Errorf(ctx, "error fetching status %q: %v", id, err)
|
2022-07-10 17:18:21 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append status to slice
|
|
|
|
statuses = append(statuses, status)
|
|
|
|
}
|
|
|
|
|
2021-08-29 16:41:41 +02:00
|
|
|
return statuses, nil
|
2021-07-09 18:32:48 +02:00
|
|
|
}
|
|
|
|
|
2023-07-25 10:34:05 +02:00
|
|
|
func (t *timelineDB) GetPublicTimeline(ctx context.Context, maxID string, sinceID string, minID string, limit int, local bool) ([]*gtsmodel.Status, error) {
|
2021-08-29 16:41:41 +02:00
|
|
|
// Ensure reasonable
|
|
|
|
if limit < 0 {
|
|
|
|
limit = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make educated guess for slice size
|
2022-07-10 17:18:21 +02:00
|
|
|
statusIDs := make([]string, 0, limit)
|
2021-07-09 18:32:48 +02:00
|
|
|
|
2023-07-25 10:34:05 +02:00
|
|
|
q := t.db.
|
2021-08-25 15:34:33 +02:00
|
|
|
NewSelect().
|
2022-10-08 13:50:48 +02:00
|
|
|
TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
|
|
|
|
Column("status.id").
|
2023-07-05 12:34:37 +02:00
|
|
|
// Public only.
|
2022-10-08 13:50:48 +02:00
|
|
|
Where("? = ?", bun.Ident("status.visibility"), gtsmodel.VisibilityPublic).
|
2023-07-05 12:34:37 +02:00
|
|
|
// Ignore boosts.
|
|
|
|
Where("? IS NULL", bun.Ident("status.boost_of_id")).
|
2022-10-08 13:50:48 +02:00
|
|
|
Order("status.id DESC")
|
2021-07-09 18:32:48 +02:00
|
|
|
|
2022-10-29 17:10:28 +02:00
|
|
|
if maxID == "" {
|
[performance] refactoring + add fave / follow / request / visibility caching (#1607)
* refactor visibility checking, add caching for visibility
* invalidate visibility cache items on account / status deletes
* fix requester ID passed to visibility cache nil ptr
* de-interface caches, fix home / public timeline caching + visibility
* finish adding code comments for visibility filter
* fix angry goconst linter warnings
* actually finish adding filter visibility code comments for timeline functions
* move home timeline status author check to after visibility
* remove now-unused code
* add more code comments
* add TODO code comment, update printed cache start names
* update printed cache names on stop
* start adding separate follow(request) delete db functions, add specific visibility cache tests
* add relationship type caching
* fix getting local account follows / followed-bys, other small codebase improvements
* simplify invalidation using cache hooks, add more GetAccountBy___() functions
* fix boosting to return 404 if not boostable but no error (to not leak status ID)
* remove dead code
* improved placement of cache invalidation
* update license headers
* add example follow, follow-request config entries
* add example visibility cache configuration to config file
* use specific PutFollowRequest() instead of just Put()
* add tests for all GetAccountBy()
* add GetBlockBy() tests
* update block to check primitive fields
* update and finish adding Get{Account,Block,Follow,FollowRequest}By() tests
* fix copy-pasted code
* update envparsing test
* whitespace
* fix bun struct tag
* add license header to gtscontext
* fix old license header
* improved error creation to not use fmt.Errorf() when not needed
* fix various rebase conflicts, fix account test
* remove commented-out code, fix-up mention caching
* fix mention select bun statement
* ensure mention target account populated, pass in context to customrenderer logging
* remove more uncommented code, fix typeutil test
* add statusfave database model caching
* add status fave cache configuration
* add status fave cache example config
* woops, catch missed error. nice catch linter!
* add back testrig panic on nil db
* update example configuration to match defaults, slight tweak to cache configuration defaults
* update envparsing test with new defaults
* fetch followingget to use the follow target account
* use accounnt.IsLocal() instead of empty domain check
* use constants for the cache visibility type check
* use bun.In() for notification type restriction in db query
* include replies when fetching PublicTimeline() (to account for single-author threads in Visibility{}.StatusPublicTimelineable())
* use bun query building for nested select statements to ensure working with postgres
* update public timeline future status checks to match visibility filter
* same as previous, for home timeline
* update public timeline tests to dynamically check for appropriate statuses
* migrate accounts to allow unique constraint on public_key
* provide minimal account with publicKey
---------
Signed-off-by: kim <grufwub@gmail.com>
Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
2023-03-28 15:03:14 +02:00
|
|
|
const future = 24 * time.Hour
|
|
|
|
|
2022-10-29 17:10:28 +02:00
|
|
|
var err error
|
[performance] refactoring + add fave / follow / request / visibility caching (#1607)
* refactor visibility checking, add caching for visibility
* invalidate visibility cache items on account / status deletes
* fix requester ID passed to visibility cache nil ptr
* de-interface caches, fix home / public timeline caching + visibility
* finish adding code comments for visibility filter
* fix angry goconst linter warnings
* actually finish adding filter visibility code comments for timeline functions
* move home timeline status author check to after visibility
* remove now-unused code
* add more code comments
* add TODO code comment, update printed cache start names
* update printed cache names on stop
* start adding separate follow(request) delete db functions, add specific visibility cache tests
* add relationship type caching
* fix getting local account follows / followed-bys, other small codebase improvements
* simplify invalidation using cache hooks, add more GetAccountBy___() functions
* fix boosting to return 404 if not boostable but no error (to not leak status ID)
* remove dead code
* improved placement of cache invalidation
* update license headers
* add example follow, follow-request config entries
* add example visibility cache configuration to config file
* use specific PutFollowRequest() instead of just Put()
* add tests for all GetAccountBy()
* add GetBlockBy() tests
* update block to check primitive fields
* update and finish adding Get{Account,Block,Follow,FollowRequest}By() tests
* fix copy-pasted code
* update envparsing test
* whitespace
* fix bun struct tag
* add license header to gtscontext
* fix old license header
* improved error creation to not use fmt.Errorf() when not needed
* fix various rebase conflicts, fix account test
* remove commented-out code, fix-up mention caching
* fix mention select bun statement
* ensure mention target account populated, pass in context to customrenderer logging
* remove more uncommented code, fix typeutil test
* add statusfave database model caching
* add status fave cache configuration
* add status fave cache example config
* woops, catch missed error. nice catch linter!
* add back testrig panic on nil db
* update example configuration to match defaults, slight tweak to cache configuration defaults
* update envparsing test with new defaults
* fetch followingget to use the follow target account
* use accounnt.IsLocal() instead of empty domain check
* use constants for the cache visibility type check
* use bun.In() for notification type restriction in db query
* include replies when fetching PublicTimeline() (to account for single-author threads in Visibility{}.StatusPublicTimelineable())
* use bun query building for nested select statements to ensure working with postgres
* update public timeline future status checks to match visibility filter
* same as previous, for home timeline
* update public timeline tests to dynamically check for appropriate statuses
* migrate accounts to allow unique constraint on public_key
* provide minimal account with publicKey
---------
Signed-off-by: kim <grufwub@gmail.com>
Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
2023-03-28 15:03:14 +02:00
|
|
|
|
|
|
|
// don't return statuses more than 24hr in the future
|
|
|
|
maxID, err = id.NewULIDFromTime(time.Now().Add(future))
|
2022-10-29 17:10:28 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-09 18:32:48 +02:00
|
|
|
}
|
|
|
|
|
2022-10-29 17:10:28 +02:00
|
|
|
// return only statuses LOWER (ie., older) than maxID
|
|
|
|
q = q.Where("? < ?", bun.Ident("status.id"), maxID)
|
|
|
|
|
2021-07-09 18:32:48 +02:00
|
|
|
if sinceID != "" {
|
2022-10-08 13:50:48 +02:00
|
|
|
q = q.Where("? > ?", bun.Ident("status.id"), sinceID)
|
2021-07-09 18:32:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if minID != "" {
|
2022-10-08 13:50:48 +02:00
|
|
|
q = q.Where("? > ?", bun.Ident("status.id"), minID)
|
2021-07-09 18:32:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if local {
|
2022-10-08 13:50:48 +02:00
|
|
|
q = q.Where("? = ?", bun.Ident("status.local"), local)
|
2021-07-09 18:32:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if limit > 0 {
|
|
|
|
q = q.Limit(limit)
|
|
|
|
}
|
|
|
|
|
2022-07-10 17:18:21 +02:00
|
|
|
if err := q.Scan(ctx, &statusIDs); err != nil {
|
2023-08-17 18:26:21 +02:00
|
|
|
return nil, err
|
2021-08-29 16:41:41 +02:00
|
|
|
}
|
2022-07-10 17:18:21 +02:00
|
|
|
|
|
|
|
statuses := make([]*gtsmodel.Status, 0, len(statusIDs))
|
|
|
|
|
|
|
|
for _, id := range statusIDs {
|
|
|
|
// Fetch status from db for ID
|
2022-12-08 18:35:14 +01:00
|
|
|
status, err := t.state.DB.GetStatusByID(ctx, id)
|
2022-07-10 17:18:21 +02:00
|
|
|
if err != nil {
|
2023-02-17 12:02:29 +01:00
|
|
|
log.Errorf(ctx, "error fetching status %q: %v", id, err)
|
2022-07-10 17:18:21 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append status to slice
|
|
|
|
statuses = append(statuses, status)
|
|
|
|
}
|
|
|
|
|
2021-08-29 16:41:41 +02:00
|
|
|
return statuses, nil
|
2021-07-09 18:32:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO optimize this query and the logic here, because it's slow as balls -- it takes like a literal second to return with a limit of 20!
|
|
|
|
// It might be worth serving it through a timeline instead of raw DB queries, like we do for Home feeds.
|
2023-07-25 10:34:05 +02:00
|
|
|
func (t *timelineDB) GetFavedTimeline(ctx context.Context, accountID string, maxID string, minID string, limit int) ([]*gtsmodel.Status, string, string, error) {
|
2021-08-29 16:41:41 +02:00
|
|
|
// Ensure reasonable
|
|
|
|
if limit < 0 {
|
|
|
|
limit = 0
|
|
|
|
}
|
2021-07-09 18:32:48 +02:00
|
|
|
|
2021-08-29 16:41:41 +02:00
|
|
|
// Make educated guess for slice size
|
|
|
|
faves := make([]*gtsmodel.StatusFave, 0, limit)
|
2021-07-09 18:32:48 +02:00
|
|
|
|
2023-07-25 10:34:05 +02:00
|
|
|
fq := t.db.
|
2021-08-25 15:34:33 +02:00
|
|
|
NewSelect().
|
|
|
|
Model(&faves).
|
2022-10-08 13:50:48 +02:00
|
|
|
Where("? = ?", bun.Ident("status_fave.account_id"), accountID).
|
|
|
|
Order("status_fave.id DESC")
|
2021-07-09 18:32:48 +02:00
|
|
|
|
|
|
|
if maxID != "" {
|
2022-10-08 13:50:48 +02:00
|
|
|
fq = fq.Where("? < ?", bun.Ident("status_fave.id"), maxID)
|
2021-07-09 18:32:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if minID != "" {
|
2022-10-08 13:50:48 +02:00
|
|
|
fq = fq.Where("? > ?", bun.Ident("status_fave.id"), minID)
|
2021-07-09 18:32:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if limit > 0 {
|
|
|
|
fq = fq.Limit(limit)
|
|
|
|
}
|
|
|
|
|
2021-08-25 15:34:33 +02:00
|
|
|
err := fq.Scan(ctx)
|
2021-07-09 18:32:48 +02:00
|
|
|
if err != nil {
|
2023-08-17 18:26:21 +02:00
|
|
|
return nil, "", "", err
|
2021-07-09 18:32:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(faves) == 0 {
|
2021-08-20 12:26:56 +02:00
|
|
|
return nil, "", "", db.ErrNoEntries
|
2021-07-09 18:32:48 +02:00
|
|
|
}
|
|
|
|
|
2022-07-10 17:18:21 +02:00
|
|
|
// Sort by favourite ID rather than status ID
|
|
|
|
slices.SortFunc(faves, func(a, b *gtsmodel.StatusFave) bool {
|
2022-12-13 12:33:49 +01:00
|
|
|
return a.ID > b.ID
|
2022-07-10 17:18:21 +02:00
|
|
|
})
|
2021-07-09 18:32:48 +02:00
|
|
|
|
2022-07-10 17:18:21 +02:00
|
|
|
statuses := make([]*gtsmodel.Status, 0, len(faves))
|
2021-08-29 16:41:41 +02:00
|
|
|
|
2022-07-10 17:18:21 +02:00
|
|
|
for _, fave := range faves {
|
|
|
|
// Fetch status from db for corresponding favourite
|
2022-12-08 18:35:14 +01:00
|
|
|
status, err := t.state.DB.GetStatusByID(ctx, fave.StatusID)
|
2022-07-10 17:18:21 +02:00
|
|
|
if err != nil {
|
2023-02-17 12:02:29 +01:00
|
|
|
log.Errorf(ctx, "error fetching status for fave %q: %v", fave.ID, err)
|
2022-07-10 17:18:21 +02:00
|
|
|
continue
|
|
|
|
}
|
2021-07-09 18:32:48 +02:00
|
|
|
|
2022-07-10 17:18:21 +02:00
|
|
|
// Append status to slice
|
|
|
|
statuses = append(statuses, status)
|
2021-07-09 18:32:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
nextMaxID := faves[len(faves)-1].ID
|
|
|
|
prevMinID := faves[0].ID
|
|
|
|
return statuses, nextMaxID, prevMinID, nil
|
|
|
|
}
|
2023-05-25 10:37:38 +02:00
|
|
|
|
|
|
|
func (t *timelineDB) GetListTimeline(
|
|
|
|
ctx context.Context,
|
|
|
|
listID string,
|
|
|
|
maxID string,
|
|
|
|
sinceID string,
|
|
|
|
minID string,
|
|
|
|
limit int,
|
|
|
|
) ([]*gtsmodel.Status, error) {
|
|
|
|
// Ensure reasonable
|
|
|
|
if limit < 0 {
|
|
|
|
limit = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make educated guess for slice size
|
|
|
|
var (
|
|
|
|
statusIDs = make([]string, 0, limit)
|
|
|
|
frontToBack = true
|
|
|
|
)
|
|
|
|
|
|
|
|
// Fetch all listEntries entries from the database.
|
|
|
|
listEntries, err := t.state.DB.GetListEntries(
|
|
|
|
// Don't need actual follows
|
|
|
|
// for this, just the IDs.
|
|
|
|
gtscontext.SetBarebones(ctx),
|
|
|
|
listID,
|
|
|
|
"", "", "", 0,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error getting entries for list %s: %w", listID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extract just the IDs of each follow.
|
|
|
|
followIDs := make([]string, 0, len(listEntries))
|
|
|
|
for _, listEntry := range listEntries {
|
|
|
|
followIDs = append(followIDs, listEntry.FollowID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Select target account IDs from follows.
|
2023-07-25 10:34:05 +02:00
|
|
|
subQ := t.db.
|
2023-05-25 10:37:38 +02:00
|
|
|
NewSelect().
|
|
|
|
TableExpr("? AS ?", bun.Ident("follows"), bun.Ident("follow")).
|
|
|
|
Column("follow.target_account_id").
|
|
|
|
Where("? IN (?)", bun.Ident("follow.id"), bun.In(followIDs))
|
|
|
|
|
|
|
|
// Select only status IDs created
|
|
|
|
// by one of the followed accounts.
|
2023-07-25 10:34:05 +02:00
|
|
|
q := t.db.
|
2023-05-25 10:37:38 +02:00
|
|
|
NewSelect().
|
|
|
|
TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
|
|
|
|
// Select only IDs from table
|
|
|
|
Column("status.id").
|
|
|
|
Where("? IN (?)", bun.Ident("status.account_id"), subQ)
|
|
|
|
|
|
|
|
if maxID == "" || maxID >= id.Highest {
|
|
|
|
const future = 24 * time.Hour
|
|
|
|
|
|
|
|
var err error
|
|
|
|
|
|
|
|
// don't return statuses more than 24hr in the future
|
|
|
|
maxID, err = id.NewULIDFromTime(time.Now().Add(future))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// return only statuses LOWER (ie., older) than maxID
|
|
|
|
q = q.Where("? < ?", bun.Ident("status.id"), maxID)
|
|
|
|
|
|
|
|
if sinceID != "" {
|
|
|
|
// return only statuses HIGHER (ie., newer) than sinceID
|
|
|
|
q = q.Where("? > ?", bun.Ident("status.id"), sinceID)
|
|
|
|
}
|
|
|
|
|
|
|
|
if minID != "" {
|
|
|
|
// return only statuses HIGHER (ie., newer) than minID
|
|
|
|
q = q.Where("? > ?", bun.Ident("status.id"), minID)
|
|
|
|
|
|
|
|
// page up
|
|
|
|
frontToBack = false
|
|
|
|
}
|
|
|
|
|
|
|
|
if limit > 0 {
|
|
|
|
// limit amount of statuses returned
|
|
|
|
q = q.Limit(limit)
|
|
|
|
}
|
|
|
|
|
|
|
|
if frontToBack {
|
|
|
|
// Page down.
|
|
|
|
q = q.Order("status.id DESC")
|
|
|
|
} else {
|
|
|
|
// Page up.
|
|
|
|
q = q.Order("status.id ASC")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := q.Scan(ctx, &statusIDs); err != nil {
|
2023-08-17 18:26:21 +02:00
|
|
|
return nil, err
|
2023-05-25 10:37:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(statusIDs) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're paging up, we still want statuses
|
|
|
|
// to be sorted by ID desc, so reverse ids slice.
|
|
|
|
// https://zchee.github.io/golang-wiki/SliceTricks/#reversing
|
|
|
|
if !frontToBack {
|
|
|
|
for l, r := 0, len(statusIDs)-1; l < r; l, r = l+1, r-1 {
|
|
|
|
statusIDs[l], statusIDs[r] = statusIDs[r], statusIDs[l]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
statuses := make([]*gtsmodel.Status, 0, len(statusIDs))
|
|
|
|
for _, id := range statusIDs {
|
|
|
|
// Fetch status from db for ID
|
|
|
|
status, err := t.state.DB.GetStatusByID(ctx, id)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf(ctx, "error fetching status %q: %v", id, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append status to slice
|
|
|
|
statuses = append(statuses, status)
|
|
|
|
}
|
|
|
|
|
|
|
|
return statuses, nil
|
|
|
|
}
|
2023-07-31 15:47:35 +02:00
|
|
|
|
|
|
|
func (t *timelineDB) GetTagTimeline(
|
|
|
|
ctx context.Context,
|
|
|
|
tagID string,
|
|
|
|
maxID string,
|
|
|
|
sinceID string,
|
|
|
|
minID string,
|
|
|
|
limit int,
|
|
|
|
) ([]*gtsmodel.Status, error) {
|
|
|
|
// Ensure reasonable
|
|
|
|
if limit < 0 {
|
|
|
|
limit = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make educated guess for slice size
|
|
|
|
var (
|
|
|
|
statusIDs = make([]string, 0, limit)
|
|
|
|
frontToBack = true
|
|
|
|
)
|
|
|
|
|
|
|
|
q := t.db.
|
|
|
|
NewSelect().
|
|
|
|
TableExpr("? AS ?", bun.Ident("status_to_tags"), bun.Ident("status_to_tag")).
|
|
|
|
Column("status_to_tag.status_id").
|
|
|
|
// Join with statuses for filtering.
|
|
|
|
Join(
|
|
|
|
"INNER JOIN ? AS ? ON ? = ?",
|
|
|
|
bun.Ident("statuses"), bun.Ident("status"),
|
|
|
|
bun.Ident("status.id"), bun.Ident("status_to_tag.status_id"),
|
|
|
|
).
|
|
|
|
// Public only.
|
|
|
|
Where("? = ?", bun.Ident("status.visibility"), gtsmodel.VisibilityPublic).
|
|
|
|
// This tag only.
|
|
|
|
Where("? = ?", bun.Ident("status_to_tag.tag_id"), tagID)
|
|
|
|
|
|
|
|
if maxID == "" || maxID >= id.Highest {
|
|
|
|
const future = 24 * time.Hour
|
|
|
|
|
|
|
|
var err error
|
|
|
|
|
|
|
|
// don't return statuses more than 24hr in the future
|
|
|
|
maxID, err = id.NewULIDFromTime(time.Now().Add(future))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// return only statuses LOWER (ie., older) than maxID
|
|
|
|
q = q.Where("? < ?", bun.Ident("status_to_tag.status_id"), maxID)
|
|
|
|
|
|
|
|
if sinceID != "" {
|
|
|
|
// return only statuses HIGHER (ie., newer) than sinceID
|
|
|
|
q = q.Where("? > ?", bun.Ident("status_to_tag.status_id"), sinceID)
|
|
|
|
}
|
|
|
|
|
|
|
|
if minID != "" {
|
|
|
|
// return only statuses HIGHER (ie., newer) than minID
|
|
|
|
q = q.Where("? > ?", bun.Ident("status_to_tag.status_id"), minID)
|
|
|
|
|
|
|
|
// page up
|
|
|
|
frontToBack = false
|
|
|
|
}
|
|
|
|
|
|
|
|
if limit > 0 {
|
|
|
|
// limit amount of statuses returned
|
|
|
|
q = q.Limit(limit)
|
|
|
|
}
|
|
|
|
|
|
|
|
if frontToBack {
|
|
|
|
// Page down.
|
|
|
|
q = q.Order("status_to_tag.status_id DESC")
|
|
|
|
} else {
|
|
|
|
// Page up.
|
|
|
|
q = q.Order("status_to_tag.status_id ASC")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := q.Scan(ctx, &statusIDs); err != nil {
|
2023-08-17 18:26:21 +02:00
|
|
|
return nil, err
|
2023-07-31 15:47:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(statusIDs) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're paging up, we still want statuses
|
|
|
|
// to be sorted by ID desc, so reverse ids slice.
|
|
|
|
// https://zchee.github.io/golang-wiki/SliceTricks/#reversing
|
|
|
|
if !frontToBack {
|
|
|
|
for l, r := 0, len(statusIDs)-1; l < r; l, r = l+1, r-1 {
|
|
|
|
statusIDs[l], statusIDs[r] = statusIDs[r], statusIDs[l]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
statuses := make([]*gtsmodel.Status, 0, len(statusIDs))
|
|
|
|
for _, id := range statusIDs {
|
|
|
|
// Fetch status from db for ID
|
|
|
|
status, err := t.state.DB.GetStatusByID(ctx, id)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf(ctx, "error fetching status %q: %v", id, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append status to slice
|
|
|
|
statuses = append(statuses, status)
|
|
|
|
}
|
|
|
|
|
|
|
|
return statuses, nil
|
|
|
|
}
|