feat(database): optimize task sorting functions and introduce bulk update capability

- Added new SQL migration to fix window function errors in task sorting functions, replacing CTEs with direct updates for better performance.
- Introduced a bulk update function for task sort orders, allowing multiple updates in a single call to improve efficiency.
- Updated socket command to support bulk updates, enhancing the task sorting experience in the frontend.
- Simplified task update handling in the frontend to utilize the new bulk update feature, improving overall performance and user experience.
This commit is contained in:
chamikaJ
2025-07-11 14:46:07 +05:30
parent f2f12a2dfa
commit e4dfae9f1d
6 changed files with 385 additions and 107 deletions

View File

@@ -0,0 +1,143 @@
-- Fix window function error in task sort optimized functions
-- Error: window functions are not allowed in UPDATE
-- Replace the optimized sort functions to avoid CTE usage in UPDATE statements
CREATE OR REPLACE FUNCTION handle_task_list_sort_between_groups_optimized(_from_index integer, _to_index integer, _task_id uuid, _project_id uuid, _batch_size integer DEFAULT 100) RETURNS void
LANGUAGE plpgsql
AS
$$
DECLARE
_offset INT := 0;
_affected_rows INT;
BEGIN
-- PERFORMANCE OPTIMIZATION: Use direct updates without CTE in UPDATE
IF (_to_index = -1)
THEN
_to_index = COALESCE((SELECT MAX(sort_order) + 1 FROM tasks WHERE project_id = _project_id), 0);
END IF;
-- PERFORMANCE OPTIMIZATION: Batch updates for large datasets
IF _to_index > _from_index
THEN
LOOP
UPDATE tasks
SET sort_order = sort_order - 1
WHERE project_id = _project_id
AND sort_order > _from_index
AND sort_order < _to_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
UPDATE tasks SET sort_order = _to_index - 1 WHERE id = _task_id AND project_id = _project_id;
END IF;
IF _to_index < _from_index
THEN
_offset := 0;
LOOP
UPDATE tasks
SET sort_order = sort_order + 1
WHERE project_id = _project_id
AND sort_order > _to_index
AND sort_order < _from_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
UPDATE tasks SET sort_order = _to_index + 1 WHERE id = _task_id AND project_id = _project_id;
END IF;
END
$$;
-- Replace the second optimized sort function
CREATE OR REPLACE FUNCTION handle_task_list_sort_inside_group_optimized(_from_index integer, _to_index integer, _task_id uuid, _project_id uuid, _batch_size integer DEFAULT 100) RETURNS void
LANGUAGE plpgsql
AS
$$
DECLARE
_offset INT := 0;
_affected_rows INT;
BEGIN
-- PERFORMANCE OPTIMIZATION: Batch updates for large datasets without CTE in UPDATE
IF _to_index > _from_index
THEN
LOOP
UPDATE tasks
SET sort_order = sort_order - 1
WHERE project_id = _project_id
AND sort_order > _from_index
AND sort_order <= _to_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
END IF;
IF _to_index < _from_index
THEN
_offset := 0;
LOOP
UPDATE tasks
SET sort_order = sort_order + 1
WHERE project_id = _project_id
AND sort_order >= _to_index
AND sort_order < _from_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
END IF;
UPDATE tasks SET sort_order = _to_index WHERE id = _task_id AND project_id = _project_id;
END
$$;
-- Add simple bulk update function as alternative
CREATE OR REPLACE FUNCTION update_task_sort_orders_bulk(_updates json) RETURNS void
LANGUAGE plpgsql
AS
$$
DECLARE
_update_record RECORD;
BEGIN
-- Simple approach: update each task's sort_order from the provided array
FOR _update_record IN
SELECT
(item->>'task_id')::uuid as task_id,
(item->>'sort_order')::int as sort_order,
(item->>'status_id')::uuid as status_id,
(item->>'priority_id')::uuid as priority_id,
(item->>'phase_id')::uuid as phase_id
FROM json_array_elements(_updates) as item
LOOP
UPDATE tasks
SET
sort_order = _update_record.sort_order,
status_id = COALESCE(_update_record.status_id, status_id),
priority_id = COALESCE(_update_record.priority_id, priority_id)
WHERE id = _update_record.task_id;
-- Handle phase updates separately since it's in a different table
IF _update_record.phase_id IS NOT NULL THEN
INSERT INTO task_phase (task_id, phase_id)
VALUES (_update_record.task_id, _update_record.phase_id)
ON CONFLICT (task_id) DO UPDATE SET phase_id = _update_record.phase_id;
END IF;
END LOOP;
END
$$;

View File

@@ -5498,6 +5498,7 @@ DECLARE
_iterator NUMERIC := 0;
_status_id TEXT;
_project_id UUID;
_base_sort_order NUMERIC;
BEGIN
-- Get the project_id from the first status to ensure we update all statuses in the same project
SELECT project_id INTO _project_id
@@ -5513,17 +5514,28 @@ BEGIN
_iterator := _iterator + 1;
END LOOP;
-- Ensure any remaining statuses in the project (not in the provided list) get sequential sort_order
-- This handles edge cases where not all statuses are provided
UPDATE task_statuses
SET sort_order = (
SELECT COUNT(*)
FROM task_statuses ts2
WHERE ts2.project_id = _project_id
AND ts2.id = ANY(SELECT (TRIM(BOTH '"' FROM JSON_ARRAY_ELEMENTS(_status_ids)::TEXT))::UUID)
) + ROW_NUMBER() OVER (ORDER BY sort_order) - 1
WHERE project_id = _project_id
AND id NOT IN (SELECT (TRIM(BOTH '"' FROM JSON_ARRAY_ELEMENTS(_status_ids)::TEXT))::UUID);
-- Get the base sort order for remaining statuses (simple count approach)
SELECT COUNT(*) INTO _base_sort_order
FROM task_statuses ts2
WHERE ts2.project_id = _project_id
AND ts2.id = ANY(SELECT (TRIM(BOTH '"' FROM JSON_ARRAY_ELEMENTS(_status_ids)::TEXT))::UUID);
-- Update remaining statuses with simple sequential numbering
-- Reset iterator to start from base_sort_order
_iterator := _base_sort_order;
-- Use a cursor approach to avoid window functions
FOR _status_id IN
SELECT id::TEXT FROM task_statuses
WHERE project_id = _project_id
AND id NOT IN (SELECT (TRIM(BOTH '"' FROM JSON_ARRAY_ELEMENTS(_status_ids)::TEXT))::UUID)
ORDER BY sort_order
LOOP
UPDATE task_statuses
SET sort_order = _iterator
WHERE id = _status_id::UUID;
_iterator := _iterator + 1;
END LOOP;
RETURN;
END
@@ -6412,7 +6424,7 @@ DECLARE
_offset INT := 0;
_affected_rows INT;
BEGIN
-- PERFORMANCE OPTIMIZATION: Use CTE for better query planning
-- PERFORMANCE OPTIMIZATION: Use direct updates without CTE in UPDATE
IF (_to_index = -1)
THEN
_to_index = COALESCE((SELECT MAX(sort_order) + 1 FROM tasks WHERE project_id = _project_id), 0);
@@ -6422,18 +6434,15 @@ BEGIN
IF _to_index > _from_index
THEN
LOOP
WITH batch_update AS (
UPDATE tasks
SET sort_order = sort_order - 1
WHERE project_id = _project_id
AND sort_order > _from_index
AND sort_order < _to_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size
RETURNING 1
)
SELECT COUNT(*) INTO _affected_rows FROM batch_update;
UPDATE tasks
SET sort_order = sort_order - 1
WHERE project_id = _project_id
AND sort_order > _from_index
AND sort_order < _to_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
@@ -6445,18 +6454,15 @@ BEGIN
THEN
_offset := 0;
LOOP
WITH batch_update AS (
UPDATE tasks
SET sort_order = sort_order + 1
WHERE project_id = _project_id
AND sort_order > _to_index
AND sort_order < _from_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size
RETURNING 1
)
SELECT COUNT(*) INTO _affected_rows FROM batch_update;
UPDATE tasks
SET sort_order = sort_order + 1
WHERE project_id = _project_id
AND sort_order > _to_index
AND sort_order < _from_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
@@ -6475,22 +6481,19 @@ DECLARE
_offset INT := 0;
_affected_rows INT;
BEGIN
-- PERFORMANCE OPTIMIZATION: Batch updates for large datasets
-- PERFORMANCE OPTIMIZATION: Batch updates for large datasets without CTE in UPDATE
IF _to_index > _from_index
THEN
LOOP
WITH batch_update AS (
UPDATE tasks
SET sort_order = sort_order - 1
WHERE project_id = _project_id
AND sort_order > _from_index
AND sort_order <= _to_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size
RETURNING 1
)
SELECT COUNT(*) INTO _affected_rows FROM batch_update;
UPDATE tasks
SET sort_order = sort_order - 1
WHERE project_id = _project_id
AND sort_order > _from_index
AND sort_order <= _to_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
@@ -6500,18 +6503,15 @@ BEGIN
THEN
_offset := 0;
LOOP
WITH batch_update AS (
UPDATE tasks
SET sort_order = sort_order + 1
WHERE project_id = _project_id
AND sort_order >= _to_index
AND sort_order < _from_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size
RETURNING 1
)
SELECT COUNT(*) INTO _affected_rows FROM batch_update;
UPDATE tasks
SET sort_order = sort_order + 1
WHERE project_id = _project_id
AND sort_order >= _to_index
AND sort_order < _from_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
@@ -6520,3 +6520,38 @@ BEGIN
UPDATE tasks SET sort_order = _to_index WHERE id = _task_id AND project_id = _project_id;
END
$$;
-- Simple function to update task sort orders in bulk
CREATE OR REPLACE FUNCTION update_task_sort_orders_bulk(_updates json) RETURNS void
LANGUAGE plpgsql
AS
$$
DECLARE
_update_record RECORD;
BEGIN
-- Simple approach: update each task's sort_order from the provided array
FOR _update_record IN
SELECT
(item->>'task_id')::uuid as task_id,
(item->>'sort_order')::int as sort_order,
(item->>'status_id')::uuid as status_id,
(item->>'priority_id')::uuid as priority_id,
(item->>'phase_id')::uuid as phase_id
FROM json_array_elements(_updates) as item
LOOP
UPDATE tasks
SET
sort_order = _update_record.sort_order,
status_id = COALESCE(_update_record.status_id, status_id),
priority_id = COALESCE(_update_record.priority_id, priority_id)
WHERE id = _update_record.task_id;
-- Handle phase updates separately since it's in a different table
IF _update_record.phase_id IS NOT NULL THEN
INSERT INTO task_phase (task_id, phase_id)
VALUES (_update_record.task_id, _update_record.phase_id)
ON CONFLICT (task_id) DO UPDATE SET phase_id = _update_record.phase_id;
END IF;
END LOOP;
END
$$;

View File

@@ -24,6 +24,14 @@ interface ChangeRequest {
priority: string;
};
team_id: string;
// New simplified approach
task_updates?: Array<{
task_id: string;
sort_order: number;
status_id?: string;
priority_id?: string;
phase_id?: string;
}>;
}
interface Config {
@@ -64,38 +72,72 @@ function updateUnmappedStatus(config: Config) {
export async function on_task_sort_order_change(_io: Server, socket: Socket, data: ChangeRequest) {
try {
const q = `SELECT handle_task_list_sort_order_change($1);`;
const config: Config = {
from_index: data.from_index,
to_index: data.to_index,
task_id: data.task.id,
from_group: data.from_group,
to_group: data.to_group,
project_id: data.project_id,
group_by: data.group_by,
to_last_index: Boolean(data.to_last_index)
};
if ((config.group_by === GroupBy.STATUS) && config.to_group) {
const canContinue = await TasksControllerV2.checkForCompletedDependencies(config.task_id, config?.to_group);
if (!canContinue) {
return socket.emit(SocketEvents.TASK_SORT_ORDER_CHANGE.toString(), {
completed_deps: canContinue
});
// New simplified approach - use bulk updates if provided
if (data.task_updates && data.task_updates.length > 0) {
// Check dependencies for status changes
if (data.group_by === GroupBy.STATUS && data.to_group) {
const canContinue = await TasksControllerV2.checkForCompletedDependencies(data.task.id, data.to_group);
if (!canContinue) {
return socket.emit(SocketEvents.TASK_SORT_ORDER_CHANGE.toString(), {
completed_deps: canContinue
});
}
}
notifyStatusChange(socket, config);
// Use the simple bulk update function
const q = `SELECT update_task_sort_orders_bulk($1);`;
await db.query(q, [JSON.stringify(data.task_updates)]);
await emitSortOrderChange(data, socket);
// Handle notifications and logging
if (data.group_by === GroupBy.STATUS && data.to_group) {
notifyStatusChange(socket, {
task_id: data.task.id,
to_group: data.to_group,
from_group: data.from_group,
from_index: data.from_index,
to_index: data.to_index,
project_id: data.project_id,
group_by: data.group_by,
to_last_index: data.to_last_index
});
}
} else {
// Fallback to old complex method
const q = `SELECT handle_task_list_sort_order_change($1);`;
const config: Config = {
from_index: data.from_index,
to_index: data.to_index,
task_id: data.task.id,
from_group: data.from_group,
to_group: data.to_group,
project_id: data.project_id,
group_by: data.group_by,
to_last_index: Boolean(data.to_last_index)
};
if ((config.group_by === GroupBy.STATUS) && config.to_group) {
const canContinue = await TasksControllerV2.checkForCompletedDependencies(config.task_id, config?.to_group);
if (!canContinue) {
return socket.emit(SocketEvents.TASK_SORT_ORDER_CHANGE.toString(), {
completed_deps: canContinue
});
}
notifyStatusChange(socket, config);
}
if (config.group_by === GroupBy.PHASE) {
updateUnmappedStatus(config);
}
await db.query(q, [JSON.stringify(config)]);
await emitSortOrderChange(data, socket);
}
if (config.group_by === GroupBy.PHASE) {
updateUnmappedStatus(config);
}
await db.query(q, [JSON.stringify(config)]);
await emitSortOrderChange(data, socket);
if (config.group_by === GroupBy.STATUS) {
// Common post-processing logic for both approaches
if (data.group_by === GroupBy.STATUS) {
const userId = getLoggedInUserIdFromSocket(socket);
const isAlreadyAssigned = await TasksControllerV2.checkUserAssignedToTask(data.task.id, userId as string, data.team_id);
@@ -104,7 +146,7 @@ export async function on_task_sort_order_change(_io: Server, socket: Socket, dat
}
}
if (config.group_by === GroupBy.PHASE) {
if (data.group_by === GroupBy.PHASE) {
void logPhaseChange({
task_id: data.task.id,
socket,
@@ -113,7 +155,7 @@ export async function on_task_sort_order_change(_io: Server, socket: Socket, dat
});
}
if (config.group_by === GroupBy.STATUS) {
if (data.group_by === GroupBy.STATUS) {
void logStatusChange({
task_id: data.task.id,
socket,
@@ -122,7 +164,7 @@ export async function on_task_sort_order_change(_io: Server, socket: Socket, dat
});
}
if (config.group_by === GroupBy.PRIORITY) {
if (data.group_by === GroupBy.PRIORITY) {
void logPriorityChange({
task_id: data.task.id,
socket,
@@ -131,7 +173,7 @@ export async function on_task_sort_order_change(_io: Server, socket: Socket, dat
});
}
void notifyProjectUpdates(socket, config.task_id);
void notifyProjectUpdates(socket, data.task.id);
return;
} catch (error) {
log_error(error);