Maximo Database Table Data Purging involves removing unused data, indexes, and attachments that may slow down database read operations and consume unnecessary storage.
Performing regular data purging helps improve database performance and reduce the cost of owning the cloud database service for clients.
Some of the cleanup activities can be performed include:
- Removing non-critical business data from out-of-the-box Maximo tables
-- delete crontask history records that are more than 2 years old based on start time
delete from crontaskhistory where starttime < dateadd(YEAR,-2,GETDATE());
-- delete escalation repeat tracking records that have records in historyflag
delete from escrepeattrack where escalation='POCLOSE' and ownerid in (select poid from po where historyflag=1);
delete from escrepeattrack where escalation='SRCLOSED' and ownerid in (select ticketuid from sr where historyflag=1);
delete from escrepeattrack where escalation='ESCBLTNEXP' and ownerid in (select BULLETINBOARDuid from BULLETINBOARD where expiredate < dateadd(DAY,-1,GETDATE()));
delete from escrepeattrack where escalation in ('WOCAN','WOCLOSE') and ownerid in (select workorderid from workorder where historyflag = 1);
delete from escrepeattrack where escalation in (select escalation from escalation where active = 0);
-- delete communication log (includes escalation) that are more than 8 years old based on created date
delete from commlog where createdate < dateadd (YEAR,-8,GETDATE());
-- delete Message Tracking records that are more than 3 months old based on initial date time
delete from maxintmsgtrkdtl where meamsgid in (select meamsgid from MAXINTMSGTRK where initialdatetime < dateadd(DAY,-90,GETDATE()));
delete from maxintmsgtrk where initialdatetime < dateadd(DAY,-90,GETDATE());
-- delete Integration Error Messages that are more than 2 years old based on statusdate
delete from maxinterrormsg where messageid in (select messageid from maxinterror where statusdate < dateadd(YEAR,-2,GETDATE()));
delete from maxinterror where statusdate < dateadd(YEAR,-2,GETDATE());- Batch Deletions
SET NOCOUNT ON;
DECLARE @DeletedRows INT = 0;
DECLARE @TotalDeleted BIGINT = 0;
DECLARE @BatchStart DATETIME;
DECLARE @StartTime DATETIME = GETDATE();
PRINT CONCAT('Cleanup started at ', CONVERT(VARCHAR(19), @StartTime, 120), '.');
WHILE 1 = 1
BEGIN
SET @BatchStart = GETDATE();
delete top (50000) from escrepeattrack
where escalation='WOCLOSE' and objectname = 'WORKORDER'
and not exists (select 1 from workorder (nolock)
where escrepeattrack.ownerid = workorderid) ;
SET @DeletedRows = @@ROWCOUNT;
SET @TotalDeleted += @DeletedRows;
IF @DeletedRows = 0
BREAK;
PRINT CONCAT('Deleted ', @DeletedRows, ' rows in this batch (started at ',
CONVERT(VARCHAR(19), @BatchStart, 120),
'). Total deleted so far: ', @TotalDeleted, '.');
-- Checkpoint after each batch to flush data and logs
CHECKPOINT;
-- Optional pause to reduce load
WAITFOR DELAY '00:00:04';
END;
- Rebuilding Indexes After Large Deletions
ALTER INDEX [indexname] ON [dbo].[tablename] REBUILD PARTITION = ALL WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, SORT_IN_TEMPDB = OFF, IGNORE_DUP_KEY = OFF, ONLINE = ON (WAIT_AT_LOW_PRIORITY (MAX_DURATION = 0 MINUTES, ABORT_AFTER_WAIT = NONE)), ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = OFF, DATA_COMPRESSION = PAGE)- Identifying Unused Tables and Indexes
- Cleaning Up Unlinked Attachments
select * from doclinks a where a.doclinksid in (select doclinksid from doclinks where (ownertable='WORKORDER' and ownerid not in (select workorderid from workorder)))










