mirror of
1
Fork 0
gotosocial/internal/processing/workers/util.go

650 lines
18 KiB
Go
Raw Normal View History

// GoToSocial
// Copyright (C) GoToSocial Authors admin@gotosocial.org
// SPDX-License-Identifier: AGPL-3.0-or-later
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package workers
import (
"context"
"errors"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/log"
"github.com/superseriousbusiness/gotosocial/internal/processing/account"
"github.com/superseriousbusiness/gotosocial/internal/processing/media"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/internal/util"
)
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
// util provides util functions used by both
// the fromClientAPI and fromFediAPI functions.
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
type utils struct {
state *state.State
media *media.Processor
account *account.Processor
surface *Surface
converter *typeutils.Converter
}
// wipeStatus encapsulates common logic used to
// totally delete a status + all its attachments,
// notifications, boosts, and timeline entries.
//
// If deleteAttachments is true, then any status
// attachments will also be deleted, else they
// will just be detached.
//
// If copyToSinBin is true, then a version of the
// status will be put in the `sin_bin_statuses`
// table prior to deletion.
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
func (u *utils) wipeStatus(
ctx context.Context,
status *gtsmodel.Status,
deleteAttachments bool,
copyToSinBin bool,
) error {
var errs gtserror.MultiError
if copyToSinBin {
// Copy this status to the sin bin before we delete it.
sbStatus, err := u.converter.StatusToSinBinStatus(ctx, status)
if err != nil {
errs.Appendf("error converting status to sinBinStatus: %w", err)
} else {
if err := u.state.DB.PutSinBinStatus(ctx, sbStatus); err != nil {
errs.Appendf("db error storing sinBinStatus: %w", err)
}
}
}
// Either delete all attachments for this status,
// or simply detach + clean them separately later.
//
// Reason to detach rather than delete is that
// the author might want to reattach them to another
// status immediately (in case of delete + redraft).
if deleteAttachments {
// todo:u.state.DB.DeleteAttachmentsForStatus
for _, id := range status.AttachmentIDs {
if err := u.media.Delete(ctx, id); err != nil {
errs.Appendf("error deleting media: %w", err)
}
}
} else {
// todo:u.state.DB.UnattachAttachmentsForStatus
for _, id := range status.AttachmentIDs {
if _, err := u.media.Unattach(ctx, status.Account, id); err != nil {
errs.Appendf("error unattaching media: %w", err)
}
}
}
// Delete all mentions generated by this status.
// todo:u.state.DB.DeleteMentionsForStatus
for _, id := range status.MentionIDs {
if err := u.state.DB.DeleteMentionByID(ctx, id); err != nil {
errs.Appendf("error deleting status mention: %w", err)
}
}
// Delete all notifications generated by this status.
if err := u.state.DB.DeleteNotificationsForStatus(ctx, status.ID); err != nil {
errs.Appendf("error deleting status notifications: %w", err)
}
// Delete all bookmarks of this status.
if err := u.state.DB.DeleteStatusBookmarksForStatus(ctx, status.ID); err != nil {
errs.Appendf("error deleting status bookmarks: %w", err)
}
// Delete all faves of this status.
if err := u.state.DB.DeleteStatusFavesForStatus(ctx, status.ID); err != nil {
errs.Appendf("error deleting status faves: %w", err)
}
if pollID := status.PollID; pollID != "" {
// Delete this poll by ID from the database.
if err := u.state.DB.DeletePollByID(ctx, pollID); err != nil {
errs.Appendf("error deleting status poll: %w", err)
}
// Cancel any scheduled expiry task for poll.
_ = u.state.Workers.Scheduler.Cancel(pollID)
}
// Get all boost of this status so that we can
// delete those boosts + remove them from timelines.
boosts, err := u.state.DB.GetStatusBoosts(
// We MUST set a barebones context here,
// as depending on where it came from the
// original BoostOf may already be gone.
gtscontext.SetBarebones(ctx),
status.ID)
if err != nil {
errs.Appendf("error fetching status boosts: %w", err)
}
for _, boost := range boosts {
// Delete the boost itself.
if err := u.state.DB.DeleteStatusByID(ctx, boost.ID); err != nil {
errs.Appendf("error deleting boost: %w", err)
}
// Remove the boost from any and all timelines.
if err := u.surface.deleteStatusFromTimelines(ctx, boost.ID); err != nil {
errs.Appendf("error deleting boost from timelines: %w", err)
}
}
// Delete the status itself from any and all timelines.
if err := u.surface.deleteStatusFromTimelines(ctx, status.ID); err != nil {
errs.Appendf("error deleting status from timelines: %w", err)
}
// Delete this status from any conversations it's part of.
if err := u.state.DB.DeleteStatusFromConversations(ctx, status.ID); err != nil {
[feature] Conversations API (#3013) * Implement conversations API * Sort and page conversations by last status ID * Appease linter * Fix deleting conversations and statuses * Refactor to make migrations automatic * Lint * Update tests post-merge * Fixes from live-fire testing * Linter caught a format problem * Refactor tests, fix cache * Negative test for non-DMs * Run conversations advanced migration on testrig startup as well as regular server startup * Document (lack of) side effects of API method for deleting a conversation * Make not-found check less nested for readability * Rename PutConversation to UpsertConversation * Use util.Ptr instead of IIFE * Reduce cache used by conversations * Remove unnecessary TableExpr/ColumnExpr * Use struct tags for both unique constraints on Conversation * Make it clear how paging with GetDirectStatusIDsBatch should be used * Let conversation paging skip conversations it can't render * Use Bun NewDropTable * Convert delete raw query to Bun * Convert update raw query to Bun * Convert latestConversationStatusesTempTable raw query partially to Bun * Convert conversationStatusesTempTable raw query partially to Bun * Rename field used to store result of MaxDirectStatusID * Move advanced migrations to their own tiny processor * Catch up util function name with main * Remove json.… wrappers * Remove redundant check * Combine error checks * Replace map with slice of structs * Address processor/type converter comments - Add context info for errors - Extract some common processor code into shared methods - Move conversation eligibility check ahead of populating conversation * Add error context when dropping temp tables
2024-07-23 21:44:31 +02:00
errs.Appendf("error deleting status from conversations: %w", err)
}
// Finally delete the status itself.
if err := u.state.DB.DeleteStatusByID(ctx, status.ID); err != nil {
errs.Appendf("error deleting status: %w", err)
}
return errs.Combine()
}
// redirectFollowers redirects all local
// followers of originAcct to targetAcct.
//
// Both accounts must be fully dereferenced
// already, and the Move must be valid.
//
// Return bool will be true if all goes OK.
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
func (u *utils) redirectFollowers(
ctx context.Context,
originAcct *gtsmodel.Account,
targetAcct *gtsmodel.Account,
) bool {
// Any local followers of originAcct should
// send follow requests to targetAcct instead,
// and have followers of originAcct removed.
//
// Select local followers with barebones, since
// we only need follow.Account and we can get
// that ourselves.
followers, err := u.state.DB.GetAccountLocalFollowers(
gtscontext.SetBarebones(ctx),
originAcct.ID,
)
if err != nil && !errors.Is(err, db.ErrNoEntries) {
log.Errorf(ctx,
"db error getting follows targeting originAcct: %v",
err,
)
return false
}
for _, follow := range followers {
// Fetch the local account that
// owns the follow targeting originAcct.
if follow.Account, err = u.state.DB.GetAccountByID(
gtscontext.SetBarebones(ctx),
follow.AccountID,
); err != nil {
log.Errorf(ctx,
"db error getting follow account %s: %v",
follow.AccountID, err,
)
return false
}
// Use the account processor FollowCreate
// function to send off the new follow,
// carrying over the Reblogs and Notify
// values from the old follow to the new.
//
// This will also handle cases where our
// account has already followed the target
// account, by just updating the existing
// follow of target account.
//
// Also, ensure new follow wouldn't be a
// self follow, since that will error.
if follow.AccountID != targetAcct.ID {
if _, err := u.account.FollowCreate(
ctx,
follow.Account,
&apimodel.AccountFollowRequest{
ID: targetAcct.ID,
Reblogs: follow.ShowReblogs,
Notify: follow.Notify,
},
); err != nil {
log.Errorf(ctx,
"error creating new follow for account %s: %v",
follow.AccountID, err,
)
return false
}
}
// New follow is in the process of
// sending, remove the existing follow.
// This will send out an Undo Activity for each Follow.
if _, err := u.account.FollowRemove(
ctx,
follow.Account,
follow.TargetAccountID,
); err != nil {
log.Errorf(ctx,
"error removing old follow for account %s: %v",
follow.AccountID, err,
)
return false
}
}
return true
}
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
func (u *utils) incrementStatusesCount(
ctx context.Context,
account *gtsmodel.Account,
status *gtsmodel.Status,
) error {
// Lock on this account since we're changing stats.
unlock := u.state.ProcessingLocks.Lock(account.URI)
defer unlock()
// Ensure account stats are populated.
if err := u.state.DB.PopulateAccountStats(ctx, account); err != nil {
return gtserror.Newf("db error getting account stats: %w", err)
}
// Update status meta for account.
*account.Stats.StatusesCount++
account.Stats.LastStatusAt = status.CreatedAt
// Update details in the database for stats.
if err := u.state.DB.UpdateAccountStats(ctx,
account.Stats,
"statuses_count",
"last_status_at",
); err != nil {
return gtserror.Newf("db error updating account stats: %w", err)
}
return nil
}
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
func (u *utils) decrementStatusesCount(
ctx context.Context,
account *gtsmodel.Account,
status *gtsmodel.Status,
) error {
// Lock on this account since we're changing stats.
unlock := u.state.ProcessingLocks.Lock(account.URI)
defer unlock()
// Ensure account stats are populated.
if err := u.state.DB.PopulateAccountStats(ctx, account); err != nil {
return gtserror.Newf("db error getting account stats: %w", err)
}
// Update status meta for account (safely checking for zero value).
*account.Stats.StatusesCount = util.Decr(*account.Stats.StatusesCount)
if !status.PinnedAt.IsZero() {
// Update status pinned count for account (safely checking for zero value).
*account.Stats.StatusesPinnedCount = util.Decr(*account.Stats.StatusesPinnedCount)
}
// Update details in the database for stats.
if err := u.state.DB.UpdateAccountStats(ctx,
account.Stats,
"statuses_count",
"statuses_pinned_count",
); err != nil {
return gtserror.Newf("db error updating account stats: %w", err)
}
return nil
}
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
func (u *utils) incrementFollowersCount(
ctx context.Context,
account *gtsmodel.Account,
) error {
// Lock on this account since we're changing stats.
unlock := u.state.ProcessingLocks.Lock(account.URI)
defer unlock()
// Ensure account stats are populated.
if err := u.state.DB.PopulateAccountStats(ctx, account); err != nil {
return gtserror.Newf("db error getting account stats: %w", err)
}
// Update stats by incrementing followers
// count by one and setting last posted.
*account.Stats.FollowersCount++
if err := u.state.DB.UpdateAccountStats(
ctx,
account.Stats,
"followers_count",
); err != nil {
return gtserror.Newf("db error updating account stats: %w", err)
}
return nil
}
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
func (u *utils) decrementFollowersCount(
ctx context.Context,
account *gtsmodel.Account,
) error {
// Lock on this account since we're changing stats.
unlock := u.state.ProcessingLocks.Lock(account.URI)
defer unlock()
// Ensure account stats are populated.
if err := u.state.DB.PopulateAccountStats(ctx, account); err != nil {
return gtserror.Newf("db error getting account stats: %w", err)
}
// Update stats by decrementing
// followers count by one.
//
// Clamp to 0 to avoid funny business.
*account.Stats.FollowersCount--
if *account.Stats.FollowersCount < 0 {
*account.Stats.FollowersCount = 0
}
if err := u.state.DB.UpdateAccountStats(
ctx,
account.Stats,
"followers_count",
); err != nil {
return gtserror.Newf("db error updating account stats: %w", err)
}
return nil
}
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
func (u *utils) incrementFollowingCount(
ctx context.Context,
account *gtsmodel.Account,
) error {
// Lock on this account since we're changing stats.
unlock := u.state.ProcessingLocks.Lock(account.URI)
defer unlock()
// Ensure account stats are populated.
if err := u.state.DB.PopulateAccountStats(ctx, account); err != nil {
return gtserror.Newf("db error getting account stats: %w", err)
}
// Update stats by incrementing
// followers count by one.
*account.Stats.FollowingCount++
if err := u.state.DB.UpdateAccountStats(
ctx,
account.Stats,
"following_count",
); err != nil {
return gtserror.Newf("db error updating account stats: %w", err)
}
return nil
}
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
func (u *utils) decrementFollowingCount(
ctx context.Context,
account *gtsmodel.Account,
) error {
// Lock on this account since we're changing stats.
unlock := u.state.ProcessingLocks.Lock(account.URI)
defer unlock()
// Ensure account stats are populated.
if err := u.state.DB.PopulateAccountStats(ctx, account); err != nil {
return gtserror.Newf("db error getting account stats: %w", err)
}
// Update stats by decrementing
// following count by one.
//
// Clamp to 0 to avoid funny business.
*account.Stats.FollowingCount--
if *account.Stats.FollowingCount < 0 {
*account.Stats.FollowingCount = 0
}
if err := u.state.DB.UpdateAccountStats(
ctx,
account.Stats,
"following_count",
); err != nil {
return gtserror.Newf("db error updating account stats: %w", err)
}
return nil
}
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
func (u *utils) incrementFollowRequestsCount(
ctx context.Context,
account *gtsmodel.Account,
) error {
// Lock on this account since we're changing stats.
unlock := u.state.ProcessingLocks.Lock(account.URI)
defer unlock()
// Ensure account stats are populated.
if err := u.state.DB.PopulateAccountStats(ctx, account); err != nil {
return gtserror.Newf("db error getting account stats: %w", err)
}
// Update stats by incrementing
// follow requests count by one.
*account.Stats.FollowRequestsCount++
if err := u.state.DB.UpdateAccountStats(
ctx,
account.Stats,
"follow_requests_count",
); err != nil {
return gtserror.Newf("db error updating account stats: %w", err)
}
return nil
}
[performance] update remaining worker pools to use queues (#2865) * start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
2024-04-26 14:50:46 +02:00
func (u *utils) decrementFollowRequestsCount(
ctx context.Context,
account *gtsmodel.Account,
) error {
// Lock on this account since we're changing stats.
unlock := u.state.ProcessingLocks.Lock(account.URI)
defer unlock()
// Ensure account stats are populated.
if err := u.state.DB.PopulateAccountStats(ctx, account); err != nil {
return gtserror.Newf("db error getting account stats: %w", err)
}
// Update stats by decrementing
// follow requests count by one.
//
// Clamp to 0 to avoid funny business.
*account.Stats.FollowRequestsCount--
if *account.Stats.FollowRequestsCount < 0 {
*account.Stats.FollowRequestsCount = 0
}
if err := u.state.DB.UpdateAccountStats(
ctx,
account.Stats,
"follow_requests_count",
); err != nil {
return gtserror.Newf("db error updating account stats: %w", err)
}
return nil
}
// requestFave stores an interaction request
// for the given fave, and notifies the interactee.
func (u *utils) requestFave(
ctx context.Context,
fave *gtsmodel.StatusFave,
) error {
// Only create interaction request
// if fave targets a local status.
if fave.Status == nil ||
!fave.Status.IsLocal() {
return nil
}
// Lock on the interaction URI.
unlock := u.state.ProcessingLocks.Lock(fave.URI)
defer unlock()
// Ensure no req with this URI exists already.
req, err := u.state.DB.GetInteractionRequestByInteractionURI(ctx, fave.URI)
if err != nil && !errors.Is(err, db.ErrNoEntries) {
return gtserror.Newf("db error checking for existing interaction request: %w", err)
}
if req != nil {
// Interaction req already exists,
// no need to do anything else.
return nil
}
// Create + store new interaction request.
req, err = typeutils.StatusFaveToInteractionRequest(ctx, fave)
if err != nil {
return gtserror.Newf("error creating interaction request: %w", err)
}
if err := u.state.DB.PutInteractionRequest(ctx, req); err != nil {
return gtserror.Newf("db error storing interaction request: %w", err)
}
// Notify *local* account of pending announce.
if err := u.surface.notifyPendingFave(ctx, fave); err != nil {
return gtserror.Newf("error notifying pending fave: %w", err)
}
return nil
}
// requestReply stores an interaction request
// for the given reply, and notifies the interactee.
func (u *utils) requestReply(
ctx context.Context,
reply *gtsmodel.Status,
) error {
// Only create interaction request if
// status replies to a local status.
if reply.InReplyTo == nil ||
!reply.InReplyTo.IsLocal() {
return nil
}
// Lock on the interaction URI.
unlock := u.state.ProcessingLocks.Lock(reply.URI)
defer unlock()
// Ensure no req with this URI exists already.
req, err := u.state.DB.GetInteractionRequestByInteractionURI(ctx, reply.URI)
if err != nil && !errors.Is(err, db.ErrNoEntries) {
return gtserror.Newf("db error checking for existing interaction request: %w", err)
}
if req != nil {
// Interaction req already exists,
// no need to do anything else.
return nil
}
// Create + store interaction request.
req, err = typeutils.StatusToInteractionRequest(ctx, reply)
if err != nil {
return gtserror.Newf("error creating interaction request: %w", err)
}
if err := u.state.DB.PutInteractionRequest(ctx, req); err != nil {
return gtserror.Newf("db error storing interaction request: %w", err)
}
// Notify *local* account of pending reply.
if err := u.surface.notifyPendingReply(ctx, reply); err != nil {
return gtserror.Newf("error notifying pending reply: %w", err)
}
return nil
}
// requestAnnounce stores an interaction request
// for the given announce, and notifies the interactee.
func (u *utils) requestAnnounce(
ctx context.Context,
boost *gtsmodel.Status,
) error {
// Only create interaction request if
// status announces a local status.
if boost.BoostOf == nil ||
!boost.BoostOf.IsLocal() {
return nil
}
// Lock on the interaction URI.
unlock := u.state.ProcessingLocks.Lock(boost.URI)
defer unlock()
// Ensure no req with this URI exists already.
req, err := u.state.DB.GetInteractionRequestByInteractionURI(ctx, boost.URI)
if err != nil && !errors.Is(err, db.ErrNoEntries) {
return gtserror.Newf("db error checking for existing interaction request: %w", err)
}
if req != nil {
// Interaction req already exists,
// no need to do anything else.
return nil
}
// Create + store interaction request.
req, err = typeutils.StatusToInteractionRequest(ctx, boost)
if err != nil {
return gtserror.Newf("error creating interaction request: %w", err)
}
if err := u.state.DB.PutInteractionRequest(ctx, req); err != nil {
return gtserror.Newf("db error storing interaction request: %w", err)
}
// Notify *local* account of pending announce.
if err := u.surface.notifyPendingAnnounce(ctx, boost); err != nil {
return gtserror.Newf("error notifying pending announce: %w", err)
}
return nil
}