feat(database): optimize task sorting functions and introduce bulk update capability

- Added new SQL migration to fix window function errors in task sorting functions, replacing CTEs with direct updates for better performance.
- Introduced a bulk update function for task sort orders, allowing multiple updates in a single call to improve efficiency.
- Updated socket command to support bulk updates, enhancing the task sorting experience in the frontend.
- Simplified task update handling in the frontend to utilize the new bulk update feature, improving overall performance and user experience.
This commit is contained in:
chamikaJ
2025-07-11 14:46:07 +05:30
parent f2f12a2dfa
commit e4dfae9f1d
6 changed files with 385 additions and 107 deletions

View File

@@ -0,0 +1,143 @@
-- Fix window function error in task sort optimized functions
-- Error: window functions are not allowed in UPDATE
-- Replace the optimized sort functions to avoid CTE usage in UPDATE statements
CREATE OR REPLACE FUNCTION handle_task_list_sort_between_groups_optimized(_from_index integer, _to_index integer, _task_id uuid, _project_id uuid, _batch_size integer DEFAULT 100) RETURNS void
LANGUAGE plpgsql
AS
$$
DECLARE
_offset INT := 0;
_affected_rows INT;
BEGIN
-- PERFORMANCE OPTIMIZATION: Use direct updates without CTE in UPDATE
IF (_to_index = -1)
THEN
_to_index = COALESCE((SELECT MAX(sort_order) + 1 FROM tasks WHERE project_id = _project_id), 0);
END IF;
-- PERFORMANCE OPTIMIZATION: Batch updates for large datasets
IF _to_index > _from_index
THEN
LOOP
UPDATE tasks
SET sort_order = sort_order - 1
WHERE project_id = _project_id
AND sort_order > _from_index
AND sort_order < _to_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
UPDATE tasks SET sort_order = _to_index - 1 WHERE id = _task_id AND project_id = _project_id;
END IF;
IF _to_index < _from_index
THEN
_offset := 0;
LOOP
UPDATE tasks
SET sort_order = sort_order + 1
WHERE project_id = _project_id
AND sort_order > _to_index
AND sort_order < _from_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
UPDATE tasks SET sort_order = _to_index + 1 WHERE id = _task_id AND project_id = _project_id;
END IF;
END
$$;
-- Replace the second optimized sort function
CREATE OR REPLACE FUNCTION handle_task_list_sort_inside_group_optimized(_from_index integer, _to_index integer, _task_id uuid, _project_id uuid, _batch_size integer DEFAULT 100) RETURNS void
LANGUAGE plpgsql
AS
$$
DECLARE
_offset INT := 0;
_affected_rows INT;
BEGIN
-- PERFORMANCE OPTIMIZATION: Batch updates for large datasets without CTE in UPDATE
IF _to_index > _from_index
THEN
LOOP
UPDATE tasks
SET sort_order = sort_order - 1
WHERE project_id = _project_id
AND sort_order > _from_index
AND sort_order <= _to_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
END IF;
IF _to_index < _from_index
THEN
_offset := 0;
LOOP
UPDATE tasks
SET sort_order = sort_order + 1
WHERE project_id = _project_id
AND sort_order >= _to_index
AND sort_order < _from_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
END IF;
UPDATE tasks SET sort_order = _to_index WHERE id = _task_id AND project_id = _project_id;
END
$$;
-- Add simple bulk update function as alternative
CREATE OR REPLACE FUNCTION update_task_sort_orders_bulk(_updates json) RETURNS void
LANGUAGE plpgsql
AS
$$
DECLARE
_update_record RECORD;
BEGIN
-- Simple approach: update each task's sort_order from the provided array
FOR _update_record IN
SELECT
(item->>'task_id')::uuid as task_id,
(item->>'sort_order')::int as sort_order,
(item->>'status_id')::uuid as status_id,
(item->>'priority_id')::uuid as priority_id,
(item->>'phase_id')::uuid as phase_id
FROM json_array_elements(_updates) as item
LOOP
UPDATE tasks
SET
sort_order = _update_record.sort_order,
status_id = COALESCE(_update_record.status_id, status_id),
priority_id = COALESCE(_update_record.priority_id, priority_id)
WHERE id = _update_record.task_id;
-- Handle phase updates separately since it's in a different table
IF _update_record.phase_id IS NOT NULL THEN
INSERT INTO task_phase (task_id, phase_id)
VALUES (_update_record.task_id, _update_record.phase_id)
ON CONFLICT (task_id) DO UPDATE SET phase_id = _update_record.phase_id;
END IF;
END LOOP;
END
$$;

View File

@@ -5498,6 +5498,7 @@ DECLARE
_iterator NUMERIC := 0;
_status_id TEXT;
_project_id UUID;
_base_sort_order NUMERIC;
BEGIN
-- Get the project_id from the first status to ensure we update all statuses in the same project
SELECT project_id INTO _project_id
@@ -5513,17 +5514,28 @@ BEGIN
_iterator := _iterator + 1;
END LOOP;
-- Ensure any remaining statuses in the project (not in the provided list) get sequential sort_order
-- This handles edge cases where not all statuses are provided
UPDATE task_statuses
SET sort_order = (
SELECT COUNT(*)
FROM task_statuses ts2
WHERE ts2.project_id = _project_id
AND ts2.id = ANY(SELECT (TRIM(BOTH '"' FROM JSON_ARRAY_ELEMENTS(_status_ids)::TEXT))::UUID)
) + ROW_NUMBER() OVER (ORDER BY sort_order) - 1
WHERE project_id = _project_id
AND id NOT IN (SELECT (TRIM(BOTH '"' FROM JSON_ARRAY_ELEMENTS(_status_ids)::TEXT))::UUID);
-- Get the base sort order for remaining statuses (simple count approach)
SELECT COUNT(*) INTO _base_sort_order
FROM task_statuses ts2
WHERE ts2.project_id = _project_id
AND ts2.id = ANY(SELECT (TRIM(BOTH '"' FROM JSON_ARRAY_ELEMENTS(_status_ids)::TEXT))::UUID);
-- Update remaining statuses with simple sequential numbering
-- Reset iterator to start from base_sort_order
_iterator := _base_sort_order;
-- Use a cursor approach to avoid window functions
FOR _status_id IN
SELECT id::TEXT FROM task_statuses
WHERE project_id = _project_id
AND id NOT IN (SELECT (TRIM(BOTH '"' FROM JSON_ARRAY_ELEMENTS(_status_ids)::TEXT))::UUID)
ORDER BY sort_order
LOOP
UPDATE task_statuses
SET sort_order = _iterator
WHERE id = _status_id::UUID;
_iterator := _iterator + 1;
END LOOP;
RETURN;
END
@@ -6412,7 +6424,7 @@ DECLARE
_offset INT := 0;
_affected_rows INT;
BEGIN
-- PERFORMANCE OPTIMIZATION: Use CTE for better query planning
-- PERFORMANCE OPTIMIZATION: Use direct updates without CTE in UPDATE
IF (_to_index = -1)
THEN
_to_index = COALESCE((SELECT MAX(sort_order) + 1 FROM tasks WHERE project_id = _project_id), 0);
@@ -6422,18 +6434,15 @@ BEGIN
IF _to_index > _from_index
THEN
LOOP
WITH batch_update AS (
UPDATE tasks
SET sort_order = sort_order - 1
WHERE project_id = _project_id
AND sort_order > _from_index
AND sort_order < _to_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size
RETURNING 1
)
SELECT COUNT(*) INTO _affected_rows FROM batch_update;
UPDATE tasks
SET sort_order = sort_order - 1
WHERE project_id = _project_id
AND sort_order > _from_index
AND sort_order < _to_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
@@ -6445,18 +6454,15 @@ BEGIN
THEN
_offset := 0;
LOOP
WITH batch_update AS (
UPDATE tasks
SET sort_order = sort_order + 1
WHERE project_id = _project_id
AND sort_order > _to_index
AND sort_order < _from_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size
RETURNING 1
)
SELECT COUNT(*) INTO _affected_rows FROM batch_update;
UPDATE tasks
SET sort_order = sort_order + 1
WHERE project_id = _project_id
AND sort_order > _to_index
AND sort_order < _from_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
@@ -6475,22 +6481,19 @@ DECLARE
_offset INT := 0;
_affected_rows INT;
BEGIN
-- PERFORMANCE OPTIMIZATION: Batch updates for large datasets
-- PERFORMANCE OPTIMIZATION: Batch updates for large datasets without CTE in UPDATE
IF _to_index > _from_index
THEN
LOOP
WITH batch_update AS (
UPDATE tasks
SET sort_order = sort_order - 1
WHERE project_id = _project_id
AND sort_order > _from_index
AND sort_order <= _to_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size
RETURNING 1
)
SELECT COUNT(*) INTO _affected_rows FROM batch_update;
UPDATE tasks
SET sort_order = sort_order - 1
WHERE project_id = _project_id
AND sort_order > _from_index
AND sort_order <= _to_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
@@ -6500,18 +6503,15 @@ BEGIN
THEN
_offset := 0;
LOOP
WITH batch_update AS (
UPDATE tasks
SET sort_order = sort_order + 1
WHERE project_id = _project_id
AND sort_order >= _to_index
AND sort_order < _from_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size
RETURNING 1
)
SELECT COUNT(*) INTO _affected_rows FROM batch_update;
UPDATE tasks
SET sort_order = sort_order + 1
WHERE project_id = _project_id
AND sort_order >= _to_index
AND sort_order < _from_index
AND sort_order > _offset
AND sort_order <= _offset + _batch_size;
GET DIAGNOSTICS _affected_rows = ROW_COUNT;
EXIT WHEN _affected_rows = 0;
_offset := _offset + _batch_size;
END LOOP;
@@ -6520,3 +6520,38 @@ BEGIN
UPDATE tasks SET sort_order = _to_index WHERE id = _task_id AND project_id = _project_id;
END
$$;
-- Simple function to update task sort orders in bulk
CREATE OR REPLACE FUNCTION update_task_sort_orders_bulk(_updates json) RETURNS void
LANGUAGE plpgsql
AS
$$
DECLARE
_update_record RECORD;
BEGIN
-- Simple approach: update each task's sort_order from the provided array
FOR _update_record IN
SELECT
(item->>'task_id')::uuid as task_id,
(item->>'sort_order')::int as sort_order,
(item->>'status_id')::uuid as status_id,
(item->>'priority_id')::uuid as priority_id,
(item->>'phase_id')::uuid as phase_id
FROM json_array_elements(_updates) as item
LOOP
UPDATE tasks
SET
sort_order = _update_record.sort_order,
status_id = COALESCE(_update_record.status_id, status_id),
priority_id = COALESCE(_update_record.priority_id, priority_id)
WHERE id = _update_record.task_id;
-- Handle phase updates separately since it's in a different table
IF _update_record.phase_id IS NOT NULL THEN
INSERT INTO task_phase (task_id, phase_id)
VALUES (_update_record.task_id, _update_record.phase_id)
ON CONFLICT (task_id) DO UPDATE SET phase_id = _update_record.phase_id;
END IF;
END LOOP;
END
$$;

View File

@@ -24,6 +24,14 @@ interface ChangeRequest {
priority: string;
};
team_id: string;
// New simplified approach
task_updates?: Array<{
task_id: string;
sort_order: number;
status_id?: string;
priority_id?: string;
phase_id?: string;
}>;
}
interface Config {
@@ -64,38 +72,72 @@ function updateUnmappedStatus(config: Config) {
export async function on_task_sort_order_change(_io: Server, socket: Socket, data: ChangeRequest) {
try {
const q = `SELECT handle_task_list_sort_order_change($1);`;
const config: Config = {
from_index: data.from_index,
to_index: data.to_index,
task_id: data.task.id,
from_group: data.from_group,
to_group: data.to_group,
project_id: data.project_id,
group_by: data.group_by,
to_last_index: Boolean(data.to_last_index)
};
if ((config.group_by === GroupBy.STATUS) && config.to_group) {
const canContinue = await TasksControllerV2.checkForCompletedDependencies(config.task_id, config?.to_group);
if (!canContinue) {
return socket.emit(SocketEvents.TASK_SORT_ORDER_CHANGE.toString(), {
completed_deps: canContinue
});
// New simplified approach - use bulk updates if provided
if (data.task_updates && data.task_updates.length > 0) {
// Check dependencies for status changes
if (data.group_by === GroupBy.STATUS && data.to_group) {
const canContinue = await TasksControllerV2.checkForCompletedDependencies(data.task.id, data.to_group);
if (!canContinue) {
return socket.emit(SocketEvents.TASK_SORT_ORDER_CHANGE.toString(), {
completed_deps: canContinue
});
}
}
notifyStatusChange(socket, config);
// Use the simple bulk update function
const q = `SELECT update_task_sort_orders_bulk($1);`;
await db.query(q, [JSON.stringify(data.task_updates)]);
await emitSortOrderChange(data, socket);
// Handle notifications and logging
if (data.group_by === GroupBy.STATUS && data.to_group) {
notifyStatusChange(socket, {
task_id: data.task.id,
to_group: data.to_group,
from_group: data.from_group,
from_index: data.from_index,
to_index: data.to_index,
project_id: data.project_id,
group_by: data.group_by,
to_last_index: data.to_last_index
});
}
} else {
// Fallback to old complex method
const q = `SELECT handle_task_list_sort_order_change($1);`;
const config: Config = {
from_index: data.from_index,
to_index: data.to_index,
task_id: data.task.id,
from_group: data.from_group,
to_group: data.to_group,
project_id: data.project_id,
group_by: data.group_by,
to_last_index: Boolean(data.to_last_index)
};
if ((config.group_by === GroupBy.STATUS) && config.to_group) {
const canContinue = await TasksControllerV2.checkForCompletedDependencies(config.task_id, config?.to_group);
if (!canContinue) {
return socket.emit(SocketEvents.TASK_SORT_ORDER_CHANGE.toString(), {
completed_deps: canContinue
});
}
notifyStatusChange(socket, config);
}
if (config.group_by === GroupBy.PHASE) {
updateUnmappedStatus(config);
}
await db.query(q, [JSON.stringify(config)]);
await emitSortOrderChange(data, socket);
}
if (config.group_by === GroupBy.PHASE) {
updateUnmappedStatus(config);
}
await db.query(q, [JSON.stringify(config)]);
await emitSortOrderChange(data, socket);
if (config.group_by === GroupBy.STATUS) {
// Common post-processing logic for both approaches
if (data.group_by === GroupBy.STATUS) {
const userId = getLoggedInUserIdFromSocket(socket);
const isAlreadyAssigned = await TasksControllerV2.checkUserAssignedToTask(data.task.id, userId as string, data.team_id);
@@ -104,7 +146,7 @@ export async function on_task_sort_order_change(_io: Server, socket: Socket, dat
}
}
if (config.group_by === GroupBy.PHASE) {
if (data.group_by === GroupBy.PHASE) {
void logPhaseChange({
task_id: data.task.id,
socket,
@@ -113,7 +155,7 @@ export async function on_task_sort_order_change(_io: Server, socket: Socket, dat
});
}
if (config.group_by === GroupBy.STATUS) {
if (data.group_by === GroupBy.STATUS) {
void logStatusChange({
task_id: data.task.id,
socket,
@@ -122,7 +164,7 @@ export async function on_task_sort_order_change(_io: Server, socket: Socket, dat
});
}
if (config.group_by === GroupBy.PRIORITY) {
if (data.group_by === GroupBy.PRIORITY) {
void logPriorityChange({
task_id: data.task.id,
socket,
@@ -131,7 +173,7 @@ export async function on_task_sort_order_change(_io: Server, socket: Socket, dat
});
}
void notifyProjectUpdates(socket, config.task_id);
void notifyProjectUpdates(socket, data.task.id);
return;
} catch (error) {
log_error(error);

View File

@@ -15,7 +15,6 @@ import { IKanbanTaskStatus } from '@/types/tasks/taskStatus.types';
import { Modal as AntModal } from 'antd';
import { fetchTasksV3 } from '@/features/task-management/task-management.slice';
import { fetchEnhancedKanbanGroups } from '@/features/enhanced-kanban/enhanced-kanban.slice';
import { fetchTaskGroups } from '@/features/tasks/tasks.slice';
import './ManageStatusModal.css';
const { Title, Text } = Typography;
@@ -594,7 +593,6 @@ const ManageStatusModal: React.FC<ManageStatusModalProps> = ({
// Refresh from server to ensure consistency
dispatch(fetchStatuses(finalProjectId));
dispatch(fetchTasksV3(finalProjectId));
dispatch(fetchTaskGroups(finalProjectId));
dispatch(fetchEnhancedKanbanGroups(finalProjectId));
} catch (error) {
console.error('Error changing status category:', error);
@@ -736,7 +734,6 @@ const ManageStatusModal: React.FC<ManageStatusModalProps> = ({
statusApiService.updateStatusOrder(requestBody, finalProjectId).then(() => {
// Refresh task lists after status order change
dispatch(fetchTasksV3(finalProjectId));
dispatch(fetchTaskGroups(finalProjectId));
dispatch(fetchEnhancedKanbanGroups(finalProjectId));
}).catch(error => {
console.error('Error updating status order:', error);
@@ -767,7 +764,6 @@ const ManageStatusModal: React.FC<ManageStatusModalProps> = ({
if (res.done) {
dispatch(fetchStatuses(finalProjectId));
dispatch(fetchTasksV3(finalProjectId));
dispatch(fetchTaskGroups(finalProjectId));
dispatch(fetchEnhancedKanbanGroups(finalProjectId));
}
} catch (error) {
@@ -791,7 +787,6 @@ const ManageStatusModal: React.FC<ManageStatusModalProps> = ({
await statusApiService.updateNameOfStatus(id, body, finalProjectId);
dispatch(fetchStatuses(finalProjectId));
dispatch(fetchTasksV3(finalProjectId));
dispatch(fetchTaskGroups(finalProjectId));
dispatch(fetchEnhancedKanbanGroups(finalProjectId));
} catch (error) {
console.error('Error renaming status:', error);
@@ -813,7 +808,6 @@ const ManageStatusModal: React.FC<ManageStatusModalProps> = ({
await statusApiService.deleteStatus(id, finalProjectId, replacingStatusId);
dispatch(fetchStatuses(finalProjectId));
dispatch(fetchTasksV3(finalProjectId));
dispatch(fetchTaskGroups(finalProjectId));
dispatch(fetchEnhancedKanbanGroups(finalProjectId));
} catch (error) {
console.error('Error deleting status:', error);

View File

@@ -524,6 +524,69 @@ const TaskGroupWrapper = ({ taskGroups, groupBy }: TaskGroupWrapperProps) => {
});
}
// NEW SIMPLIFIED APPROACH: Calculate all affected task updates and send them
const taskUpdates: Array<{
task_id: string;
sort_order: number;
status_id?: string;
priority_id?: string;
phase_id?: string;
}> = [];
// Add updates for all tasks in affected groups
if (activeGroupId === overGroupId) {
// Same group - just reorder
const updatedTasks = [...sourceGroup.tasks];
updatedTasks.splice(fromIndex, 1);
updatedTasks.splice(toIndex, 0, task);
updatedTasks.forEach((task, index) => {
taskUpdates.push({
task_id: task.id,
sort_order: index + 1, // 1-based indexing
});
});
} else {
// Different groups - update both source and target
const updatedSourceTasks = sourceGroup.tasks.filter((_, i) => i !== fromIndex);
const updatedTargetTasks = [...targetGroup.tasks];
if (isTargetGroupEmpty) {
updatedTargetTasks.push(task);
} else if (toIndex >= 0 && toIndex <= updatedTargetTasks.length) {
updatedTargetTasks.splice(toIndex, 0, task);
} else {
updatedTargetTasks.push(task);
}
// Add updates for source group
updatedSourceTasks.forEach((task, index) => {
taskUpdates.push({
task_id: task.id,
sort_order: index + 1,
});
});
// Add updates for target group (including the moved task)
updatedTargetTasks.forEach((task, index) => {
const update: any = {
task_id: task.id,
sort_order: index + 1,
};
// Add group-specific updates
if (groupBy === IGroupBy.STATUS) {
update.status_id = targetGroup.id;
} else if (groupBy === IGroupBy.PRIORITY) {
update.priority_id = targetGroup.id;
} else if (groupBy === IGroupBy.PHASE) {
update.phase_id = targetGroup.id;
}
taskUpdates.push(update);
});
}
socket?.emit(SocketEvents.TASK_SORT_ORDER_CHANGE.toString(), {
project_id: projectId,
from_index: sourceGroup.tasks[fromIndex].sort_order,
@@ -534,6 +597,7 @@ const TaskGroupWrapper = ({ taskGroups, groupBy }: TaskGroupWrapperProps) => {
group_by: groupBy,
task: sourceGroup.tasks[fromIndex],
team_id: currentSession?.team_id,
task_updates: taskUpdates, // NEW: Send calculated updates
});
setTimeout(resetTaskRowStyles, 0);

View File

@@ -208,6 +208,18 @@ const TaskListTableWrapper = ({
>
<Flex vertical>
<Flex style={{ transform: 'translateY(6px)' }}>
{groupBy !== IGroupBy.PRIORITY &&
!showRenameInput &&
isEditable &&
name !== 'Unmapped' && (
<Dropdown menu={{ items }}>
<Button
icon={<EllipsisOutlined />}
className="borderless-icon-btn"
title={isEditable ? undefined : t('noPermission')}
/>
</Dropdown>
)}
<Button
className="custom-collapse-button"
style={{
@@ -243,18 +255,6 @@ const TaskListTableWrapper = ({
</Typography.Text>
)}
</Button>
{groupBy !== IGroupBy.PRIORITY &&
!showRenameInput &&
isEditable &&
name !== 'Unmapped' && (
<Dropdown menu={{ items }}>
<Button
icon={<EllipsisOutlined />}
className="borderless-icon-btn"
title={isEditable ? undefined : t('noPermission')}
/>
</Dropdown>
)}
</Flex>
<Collapsible
isOpen={isExpanded}