-- ================================================================================================ -- -- Subject: iUG July 2021 - Dueling SQL – An IBM ACS and IBM i Services Master Class.sql -- Author: Scott Forstie -- Date : July, 2021 -- -- Resources: -- ========== -- ibm.biz/Db2foriSQLTutor -- ibm.biz/Db2foriServices -- ================================================================================================ stop; -- =========================================================================================== -- -- description: History log (today and yesterday) -- -- =========================================================================================== select * from table ( qsys2.history_log_info() ); stop; -- =========================================================================================== -- -- description: Jobs (today and yesterday) -- -- =========================================================================================== SELECT * FROM TABLE(qsys2.history_log_info()) where message_id in ('CPF1164', 'CPF1124'); stop;stop; -- =========================================================================================== -- -- description: Jobs (today and previous 6 days) -- -- =========================================================================================== SELECT message_timestamp as time, message_id, from_user, from_job, message_tokens FROM TABLE(qsys2.history_log_info(START_TIME => CURRENT date - 6 days, END_TIME => CURRENT TIMESTAMP)); stop; -- =========================================================================================== -- -- description: Batch jobs (today and previous 6 days) -- -- =========================================================================================== SELECT message_timestamp as time, message_id, from_user, substr(message_tokens, 59, 10) as subsystem, from_job, message_tokens FROM TABLE(qsys2.history_log_info(START_TIME => CURRENT date - 6 days, END_TIME => CURRENT TIMESTAMP)) where substr(message_tokens, 59, 10) = 'QBATCH'; stop; -- =========================================================================================== -- -- description: Batch jobs extended (today and previous 6 days) -- -- =========================================================================================== WITH JOB_START(start_time, from_user, sbs, from_job, tokens) AS ( SELECT message_timestamp as time, from_user, substr(message_tokens, 59, 10) as subsystem, from_job, message_tokens FROM TABLE(qsys2.history_log_info(START_TIME => CURRENT date - 6 days, END_TIME => CURRENT TIMESTAMP)) x WHERE message_id = 'CPF1124' and substr(message_tokens, 59, 10) = 'QBATCH' ORDER BY ORDINAL_POSITION DESC ) SELECT b.message_timestamp AS end_time, b.FROM_USER, b.FROM_JOB FROM JOB_START A INNER JOIN TABLE(qsys2.history_log_info(START_TIME => CURRENT date - 6 days, END_TIME => CURRENT TIMESTAMP)) b ON b.from_job = a.from_job WHERE b.message_id = 'CPF1164' order by end_time desc; stop; -- =========================================================================================== -- -- description: How many batch jobs are processed each day? (today and previous 6 days) -- -- =========================================================================================== WITH JOB_START(start_time, from_user, sbs, from_job, tokens) AS ( SELECT message_timestamp as time, from_user, substr(message_tokens, 59, 10) as subsystem, from_job, message_tokens FROM TABLE(qsys2.history_log_info(START_TIME => CURRENT date - 6 days, END_TIME => CURRENT TIMESTAMP)) x WHERE message_id = 'CPF1124' and substr(message_tokens, 59, 10) = 'QBATCH' ORDER BY ORDINAL_POSITION DESC ) SELECT DATE(b.message_timestamp) AS DATE, count(*) as job_count -- <<<==== FROM JOB_START A INNER JOIN TABLE(qsys2.history_log_info(START_TIME => CURRENT date - 6 days, END_TIME => CURRENT TIMESTAMP)) b ON b.from_job = a.from_job WHERE b.message_id = 'CPF1164' Group by DATE(b.message_timestamp) -- <<<==== order by date; stop; -- =========================================================================================== -- -- description: Break down by day, user, with counts (today and previous 6 days) -- -- =========================================================================================== with JOB_START (start_time, from_user, sbs, from_job, tokens) as ( select message_timestamp as time, from_user, substr(message_tokens, 59, 10) as subsystem, from_job, message_tokens from table ( qsys2.history_log_info( START_TIME => current date - 6 days, END_TIME => current timestamp) ) x where message_id = 'CPF1124' and substr(message_tokens, 59, 10) = 'QBATCH' order by ORDINAL_POSITION desc), batch_jobs (date, user_name) as ( select date(b.message_timestamp) as DATE, a.from_user from JOB_START A inner join table ( qsys2.history_log_info( START_TIME => current date - 6 days, END_TIME => current timestamp) ) b on b.from_job = a.from_job where b.message_id = 'CPF1164' ) select date, user_name, count(*) as total_jobs from batch_jobs group by date, user_name -- <<<==== order by 1, 3 desc; stop; -- -- author: Scott Forstie -- date : November 1, 2020 -- updated: November 3, 2020 -- title : Job End and the CPF1164 message -- When a job is started, the CPF1124 message is sent to the history log. When the job end, the CPF1164 message is sent to the history log. -- With SQL built-in functions, this example shows how SQL can extract and transform the CPF1164 job end message tokens into a useful form. -- minvrm: V7R3M0 -- -- Enabling PTF Group: SF99704 Level 7 & SF99703 Level 18 (or higher) -- Interpret: https://www.ibm.com/support/pages/node/6172749 -- create or replace function systools.job_end_info ( start_time timestamp default (current_date - 1 day), end_time timestamp default '9999-12-30-00.00.00.000000' ) returns table ( ordinal_position integer, job_end timestamp, user_name varchar(10) for sbcs data, job_name varchar(28) for sbcs data, from_program varchar(10) for sbcs data, cpu_time integer, number_of_steps smallint, job_end_code smallint, job_end_detail varchar(100) for sbcs data, secondary_ending_code smallint, secondary_ending_code_detail varchar(100) for sbcs data, cpu_time_precise decimal(15, 3), job_entry timestamp(0), job_start timestamp(0), total_response_time integer, transaction_count integer, sync_aux_io_count integer, job_type varchar(11) for sbcs data, peak_temp_storage integer ) specific systools.jobendinfo not deterministic not fenced no external action set option usrprf = *user, dynusrprf = *user return select ordinal_position, message_timestamp, from_user, from_job, from_program, interpret(substr(message_tokens, 43, 4) as integer) as cpu_time, interpret(substr(message_tokens, 47, 2) as smallint) as number_of_steps, interpret(substr(message_tokens, 49, 2) as smallint) as job_end_code, case interpret(substr(message_tokens, 49, 2) as smallint) when 0 then 'The job completed normally' when 10 then 'The job completed normally during controlled ending or controlled subsystem ending' when 20 then 'The job exceeded end severity (ENDSEV job attribute)' when 30 then 'The job ended abnormally' when 40 then 'The job ended before becoming active' when 50 then 'The job ended while the job was active' when 60 then 'The subsystem ended abnormally while the job was active' when 70 then 'The system ended abnormally while the job was active' when 80 then 'The job ended (ENDJOBABN command)' when 90 then 'The job was forced to end after the time limit ended (ENDJOBABN command)' end as job_end_detail, interpret(substr(message_tokens, 51, 2) as smallint) as secondary_ending_code, case interpret(substr(message_tokens, 51, 2) as smallint) when 0 then 'No secondary ending code' when 100 then 'Disconnect time interval exceeded' when 101 then 'Session device deleted' when 102 then 'Error calling Disconnect Job (DSCJOB)' when 300 then 'Device error and DEVRCYACN set to *ENDJOB' when 301 then 'Job ended due to looping on device errors' end as secondary_ending_code_detail, interpret(substr(message_tokens, 53, 8) as decimal(15, 3)) as cpu_time_precise, case when hex(substr(message_tokens, 81, 1)) = '00' then null else timestamp_format( interpret(substr(message_tokens, 81, 8) as char(8) ccsid 37) concat ' ' concat interpret(substr(message_tokens, 73, 8) as char(8) ccsid 37), 'MM/DD/YY HH24:MI:SS') end as job_entry, case when hex(substr(message_tokens, 97, 1)) = '00' then null else timestamp_format( interpret(substr(message_tokens, 97, 8) as char(8) ccsid 37) concat ' ' concat interpret(substr(message_tokens, 89, 8) as char(8) ccsid 37), 'MM/DD/YY HH24:MI:SS') end as job_start, interpret(substr(message_tokens, 105, 4) as integer) as total_response_time, interpret(substr(message_tokens, 109, 4) as integer) as transaction_count, interpret(substr(message_tokens, 113, 4) as integer) as sync_aux_io_count, case interpret(substr(message_tokens, 117, 1) as char(1) ccsid 37) when 'B' then 'Batch' when 'I' then 'Interactive' else interpret(substr(message_tokens, 117, 1) as char(1) ccsid 37) end as job_type, interpret(substr(message_tokens, 284, 4) as integer) as peak_temp_storage from table ( qsys2.history_log_info(start_time => start_time, end_time => end_time) ) where message_id = 'CPF1164'; stop; -- -- View all CPF1164 messages, from today and yesterday -- select * from table ( systools.job_end_info() ); stop; -- -- Count the ways jobs ended, from today and yesterday -- select job_end_detail, count(*) as job_end_count from table ( systools.job_end_info() ) group by job_end_detail order by 2 desc; stop; -- -- top 10 CPU consumers, from today and yesterday -- select user_name, job_name, cpu_time, cpu_time_precise, job_start from table ( systools.job_end_info() ) order by cpu_time_precise desc limit 10; stop; -- -- top 10 peak temp storage consumers, from today and yesterday -- select user_name, job_name, peak_temp_storage, job_start from table ( systools.job_end_info() ) order by peak_temp_storage desc limit 10; stop; -- -- top 10 transaction counts, from today and yesterday -- select user_name, job_name, transaction_count, job_start from table ( systools.job_end_info() ) order by transaction_count desc limit 10; stop; -- -- top 10 synchronous auxiliary I/O counts, from today and yesterday -- select user_name, job_name, sync_aux_io_count, job_start from table ( systools.job_end_info() ) order by sync_aux_io_count desc limit 10; stop; -- -- top 10 longest run jobs, from today and yesterday -- select user_name, job_name, job_start, job_end from table ( systools.job_end_info() ) order by job_end - job_start desc limit 10; stop; -- ======================================================================================================= -- -- description: Instead of going to history log over and over for the same data, establish a local copy -- -- ======================================================================================================= declare global temporary table SESSION.batch_jobs (job_end, user_name, job_minutes, max_temp_stg_mb) as (with JOB_START (start_time, from_user, sbs, from_job, tokens) as ( select message_timestamp as time, from_user, substr(message_tokens, 59, 10) as subsystem, from_job, message_tokens from table ( qsys2.history_log_info( START_TIME => current date - 6 days, END_TIME => current timestamp) ) x where message_id = 'CPF1124' and substr(message_tokens, 59, 10) = 'QBATCH' order by ORDINAL_POSITION desc) select date(b.message_timestamp) as DATE, a.from_user, timestampdiff( 4, cast(b.message_timestamp - a.start_time as char(22))), interpret(binary(right(message_tokens, 4),4) as integer) from JOB_START A inner join table ( qsys2.history_log_info( START_TIME => current date - 6 days, END_TIME => current timestamp) ) b on b.from_job = a.from_job where b.message_id = 'CPF1164') with data with REPLACE; stop; -- ======================================================================================================= -- -- description: Do we need to apply PTF Groups from IBM? -- -- ======================================================================================================= with ilevel (iversion, irelease) as ( select os_version, os_release from sysibmadm.env_sys_info ) select p.* from ilevel, systools.group_ptf_currency p where ptf_group_release = 'R' concat iversion concat irelease concat '0' order by ptf_group_level_available - ptf_group_level_installed desc; stop; -- ======================================================================================================= -- -- description: Search the IFS -- -- ======================================================================================================= -- -- 3 ways to read the contents of an IFS stream file: -- ====== -- -- 1) Character data (CCSID of the job) - CLOB(2G) -- select * from table ( qsys2.ifs_read(path_name => '/home/SCOTTF/ALLOBJusers09122019.xlsx',END_OF_LINE => 'NONE') ); stop; -- -- 2) UTF8 data (UNICODE) - CLOB(2G) CCSID 1208 -- select * from table ( qsys2.ifs_read_utf8(path_name => '/home/SCOTTF/ALLOBJusers09122019.xlsx') ); stop; -- -- 3) Binary data - BLOB(2G) -- select * from table ( qsys2.ifs_read_binary( path_name => '/home/SCOTTF/ALLOBJusers09122019.xlsx', maximum_line_length => 100) ); stop; -- ======================================================================================================= -- -- Discover stream files and read their contents looking for a string -- -- ======================================================================================================= select distinct (o.path_name), i.* from table ( qsys2.ifs_object_statistics( start_path_name => '/home/SCOTTF', subtree_directories => 'YES', object_type_list => '*ALLSTMF',) ) o, lateral ( select * from table ( qsys2.ifs_read(path_name => path_name, end_of_line => 'ANY') ) ) i where o.path_name not like '%.xlsx%' and upper(line) like '%IFS%' order by 1, 2; stop; -- ======================================================================================================= -- -- Where does TIM appear in these scripts? -- -- ======================================================================================================= select distinct (o.path_name), i.* from table ( qsys2.ifs_object_statistics( start_path_name => '/home/SCOTTF', subtree_directories => 'YES', object_type_list => '*ALLSTMF') ) o, lateral ( select * from table ( qsys2.ifs_read(path_name => path_name, end_of_line => 'ANY') ) ) i where o.path_name not like '%.xlsx%' and upper(line) like '%TIMMR%' order by 1, 2; stop; select * from table ( qsys2.ifs_read(path_name => '/home/SCOTTF/ACS session with TIm.sql', end_of_line => 'ANY') ); -- -- Subject: IFS stream file transformation -- Author: Scott Forstie (thanks to Sue Romano for helping with this Gist) -- Date : June 28, 2021 -- Features Used : This Gist uses ifs_read, ifs_write, regexp_replace, and the previously unknown fx designation for a unicode literal. -- -- Function - Point is routine at an existing IFS steam file, which contains some character data that you want to globally replace. -- The function extracts (reads) the contents, replaces the search string occurrences with the replacement string, and then -- writes everything to the target IFS stream file. -- cl: crtlib coolstuff; create or replace procedure coolstuff.IFS_search_replace_and_create( Source_IFS_file DBCLOB(16M) ccsid 1200, Target_IFS_file DBCLOB(16M) ccsid 1200, search_string CLOB(100) ccsid 1208, replace_string CLOB(100) ccsid 1208 ) begin declare a_line CLOB(2G) ccsid 1208; declare not_found condition for '02000'; declare at_end integer default 0; declare local_sqlcode integer; declare local_sqlstate char(5); declare v_message_text varchar(70) for sbcs data; declare ifs_cursor_stmt_text clob(10k) ccsid 37; declare ifs_cursor cursor with hold for ifs_cursor_stmt; declare continue handler for sqlexception begin get diagnostics condition 1 local_sqlcode = db2_returned_sqlcode, local_sqlstate = returned_sqlstate; set v_message_text = 'coolstuff.IFS_search_replace_and_create() failed with: ' concat local_sqlcode concat ' AND ' concat local_sqlstate; signal sqlstate 'QZZ01' set message_text = v_message_text; set at_end = 1; end; declare continue handler for not_found set at_end = 1; set ifs_cursor_stmt_text = 'select regexp_replace(line, fx''' concat hex(search_string) concat ''', fx''' concat hex(replace_string) concat ''') from table(QSYS2.IFS_READ_UTF8(?, END_OF_LINE => ''NONE'') )' ; -- In case debug is needed... -- call systools.lprintf('ifs_cursor_stmt_text: ' concat ifs_cursor_stmt_text); -- -- Remove the target IFS stream file, should it exist -- begin declare ignore_error integer default 0; declare continue handler for sqlexception set ignore_error = 1; call qsys2.qcmdexc('RMVLNK OBJLNK(''' concat Target_IFS_file concat ''')'); end; prepare ifs_cursor_stmt from ifs_cursor_stmt_text; open ifs_cursor using Source_IFS_file; -- -- Each fetch will bring back up to 2GB of data -- An IFS stream file could be as large as 1TB, so we need to loop until all the data is processed -- fetch from ifs_cursor into a_line; while (at_end = 0) do CALL QSYS2.IFS_WRITE_UTF8(Target_IFS_file, a_line, END_OF_LINE => 'NONE'); fetch from ifs_cursor into a_line; end while; close ifs_cursor; end; stop; -- -- Example usage: -- call coolstuff.IFS_search_replace_and_create(Source_IFS_file => '/home/SCOTTF/ACS session with TIm.sql', Target_IFS_file => '/home/SCOTTF/ACS session with the King.sql', search_string => 'TIMMR', replace_string => 'THEKING'); stop; select * from table ( QSYS2.IFS_READ('/home/SCOTTF/ACS session with the King.sql') ); stop; -- Session: How to manage spooled files with SQL and CL -- Author : Scott Forstie -- -- -- Preview deleting spooled files older than 3 months -- CALL SYSTOOLS.DELETE_OLD_SPOOLED_FILES( DELETE_OLDER_THAN => ( CURRENT_TIMESTAMP - 3 MONTHS ), P_OUTPUT_QUEUE_LIBRARY_NAME => '*ALL', P_OUTPUT_QUEUE_NAME => '*ALL', P_USER_NAME => '*ALL', PREVIEW => 'YES' ); stop; -- -- Preview deleting SCOTTF's spooled files older than 3 months -- CALL SYSTOOLS.DELETE_OLD_SPOOLED_FILES( DELETE_OLDER_THAN => ( CURRENT_TIMESTAMP - 3 MONTHS ), P_OUTPUT_QUEUE_LIBRARY_NAME => '*ALL', P_OUTPUT_QUEUE_NAME => '*ALL', P_USER_NAME => 'SCOTTF', PREVIEW => 'YES' ); stop; -- -- Prove to myself that the preview looks good -- select * from QSYS2.OUTPUT_QUEUE_ENTRIES_BASIC where USER_NAME = 'SCOTTF' and CREATE_TIMESTAMP < CURRENT_TIMESTAMP - 3 months; stop; -- -- Create an SQL script in the IFS to delete SCOTTF's spooled files older than 3 months -- call qsys2.ifs_write( path_name => '/home/SCOTTF/dltsplf_3_months.sql', file_ccsid => 37, overwrite => 'REPLACE', line => 'call systools.delete_old_spooled_files( delete_older_than => current timestamp - 3 months, p_user_name => ''SCOTTF'', preview => ''NO'');' ); stop; -- -- Review the script contents -- select * from table ( qsys2.ifs_read(path_name => '/home/SCOTTF/dltsplf_3_months.sql') ); stop; -- -- Execute the script now, in a batch job -- cl: SBMJOB CMD(RUNSQLSTM SRCSTMF('/home/SCOTTF/dltsplf_3_months.sql') COMMIT(*NONE) NAMING(*SQL)) JOB(SPOOLSCOTT) INLASPGRP(*CURRENT) JOBMSGQFL(*PRTWRAP) LOG(4 0 *SECLVL); -- ========== -- -- Watch the execution of the job -- select count(*) from table ( qsys2.active_job_info(subsystem_list_filter => 'QBATCH', JOB_NAME_FILTER => 'SPOOLSCOTT') ); stop; -- -- Schedule the spool cleanup to occur once per week, in a batch job -- cl: ADDJOBSCDE JOB(SPOOLSCOTT) CMD(RUNSQLSTM SRCSTMF('/home/SCOTTF/dltsplf_3_months.sql') COMMIT(*NONE) NAMING(*SQL)) FRQ(*WEEKLY) SCDDATE(*NONE) SCDDAY(*FRI) SCDTIME(235500) ; stop; -- -- Review the Scheduled job -- select * from QSYS2.SCHEDULED_JOB_INFO where SCHEDULED_JOB_NAME = 'SPOOLSCOTT'; stop; -- -- Read Tim's spooled files -- WITH Tims_spooled_files ( job, FILE, file_number, user_data, create_timestamp ) AS (SELECT job_name, spooled_file_name, file_number, user_data, create_timestamp FROM qsys2.output_queue_entries_basic WHERE user_name = 'TIMMR' ORDER BY create_timestamp DESC ) SELECT job, FILE, file_number, spooled_data FROM tims_spooled_files, TABLE ( systools.spooled_file_data( job_name => job, spooled_file_name => FILE, spooled_file_number => file_number) ) ; stop; -- -- Read MY spooled files -- WITH my_spooled_files ( job, FILE, file_number, user_data, create_timestamp ) AS (SELECT job_name, spooled_file_name, file_number, user_data, create_timestamp FROM qsys2.output_queue_entries_basic WHERE user_name = USER ORDER BY create_timestamp DESC ) SELECT job, FILE, file_number, spooled_data FROM my_spooled_files, TABLE ( systools.spooled_file_data( job_name => job, spooled_file_name => FILE, spooled_file_number => file_number) );