SQL Query: Writing update statement in a new way – Matthew Jeffries

UPDATE [ED] SET [ED].[NNumber] = [P].[NNumber]

FROM #El AS [ED]

OUTER APPLY

(

SELECT TOP 1 [N].[NNumber]

FROM [In].[temp].[NumberMAP] AS [SK]

INNER JOIN [In].[temp].[NumberMap2] AS [N]

ON [SK].GUID_ID=[N].GUID_ID

WHERE [SK].[PID] = [ED].[PID]

) AS [P];

Advertisements

SQL Query: Schedule multiple jobs to run over night

I had about 40 sql agent jobs. I had to schedule them to run one time only over night. It was a bit of hectic to open every job, change the ownership, modify the schedule time, then change the ownership back to the service account. In order to minimize the work effort, I have written a script based on CURSOR. Where I will have a control table, it will define the job name, schedule name, schedule start date, schedule start time. I will read this from the control table and the change the schedule accordingly.

 

USE msdb;USE msdb;go
/*create the control table and insert some dummy data.In order to make this script work, there has to be job exist defined in the control table.*/
CREATE TABLE [ETL].[SqlJobControl]( [SqlJobControlID] [int] IDENTITY(1,1) NOT NULL, [job_id] [int] NULL, [job_name] [nvarchar](200) NULL, [schedule_name] [nvarchar](50) NULL, [job_description] [nvarchar](100) NULL, [active_start_date] [int] NULL, [active_start_time] [int] NULL, [owner_login_name_user] [nvarchar](50) NULL, [owner_login_name_service_account] [nvarchar](50) NULL, [enabled] [bit] NULL, CONSTRAINT [PK__SqlJobCo__D32BB6AA03DE9823] PRIMARY KEY CLUSTERED ( [SqlJobControlID] ASC)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]) ON [PRIMARY]
GOSET IDENTITY_INSERT [ETL].[SqlJobControl] ON
INSERT [ETL].[SqlJobControl] ([SqlJobControlID], [job_id], [job_name], [schedule_name], [job_description], [active_start_date], [active_start_time], [owner_login_name_user], [owner_login_name_service_account], [enabled]) VALUES (1, NULL, N’Developer_Test_Job1′, N’OneOff’, N’Dummy job’, 20170613, 175500, N’blue\ahmedma’, N’ELC\srvc.DWHSQLAgent’, 1)INSERT [ETL].[SqlJobControl] ([SqlJobControlID], [job_id], [job_name], [schedule_name], [job_description], [active_start_date], [active_start_time], [owner_login_name_user], [owner_login_name_service_account], [enabled]) VALUES (2, NULL, N’Developer_Test_Job2′, N’OneOff’, N’Dummy job’, 20170614, 180000, N’blue\ahmedma’, N’ELC\srvc.DWHSQLAgent’, 1)SET IDENTITY_INSERT [ETL].[SqlJobControl] OFF;
DECLARE  @job_id int, @job_name nvarchar(200), @schedule_name nvarchar(50), @job_description nvarchar(100), @active_start_date int, @active_start_time int, @owner_login_name_user nvarchar(50), @owner_login_name_service_account nvarchar(50), @enabled bit
;
DECLARE @oneoff_sqljob_cursor AS CURSOR;
SET @oneoff_sqljob_cursor = CURSOR FAST_FORWARD FOR SELECT  [job_id] ,[job_name] ,[schedule_name] ,[job_description] ,[active_start_date] ,[active_start_time] ,[owner_login_name_user] ,[owner_login_name_service_account] ,[enabled] FROM [UNIFY2].[ETL].[SqlJobControl] WHERE [enabled]=1;
OPEN @oneoff_sqljob_cursor;FETCH NEXT FROM @oneoff_sqljob_cursor INTO  @job_id ,@job_name ,@schedule_name ,@job_description ,@active_start_date ,@active_start_time ,@owner_login_name_user ,@owner_login_name_service_account ,@enabled ;
WHILE @@FETCH_STATUS = 0BEGIN EXEC [dbo].[sp_update_job_for_developers]   @job_name=@job_name,  @owner_login_name=@owner_login_name_user; EXEC msdb.dbo.sp_detach_schedule  @job_name=@job_name,  @schedule_name=@schedule_name, @delete_unused_schedule=1 ;
EXEC msdb.dbo.sp_add_jobschedule  @job_name=@job_name,  @name=@schedule_name,  @enabled=1,  @freq_type=1,  @freq_interval=1,  @freq_subday_type=0,  @freq_subday_interval=0,  @freq_relative_interval=0,  @freq_recurrence_factor=1,  @active_start_date=@active_start_date,  @active_start_time=@active_start_time ; EXEC [dbo].[sp_update_job_for_developers]   @job_name=@job_name,  @owner_login_name=@owner_login_name_service_account; FETCH NEXT FROM @oneoff_sqljob_cursor INTO  @job_id ,@job_name ,@schedule_name ,@job_description ,@active_start_date ,@active_start_time ,@owner_login_name_user ,@owner_login_name_service_account ,@enabled ;END

SQL Query: Find all indexes, fragmention, and rebuild in the Database

–Find all the indexes in the database, find the index fragmention size in percentage, reorganize or rebuild indexes

— Find the average fragmentation percentage of all indexes
— in the HumanResources.Employee table.

–avg_fragment_in_percent=(number of recrods*record size)/(total page used*page size)

Generate Scripts for database objects with SMO for SQL Server

USE [DBName];
GO

SELECT f.object_id, s.name AS SchemaName, o.name AS TableName, i.index_id, i.name AS IndexName, p.rows AS TableRowCount, page_count, fragment_count, avg_fragment_size_in_pages, avg_fragmentation_in_percent
FROM sys.dm_db_index_physical_stats (DB_ID(N’DBNAME’), NULL, NULL, NULL, NULL) AS f
INNER JOIN sys.indexes AS i
ON f.object_id = i.object_id
AND f.index_id = i.index_id
INNER JOIN sys.objects AS o
ON i.object_id=o.object_id
INNER JOIN sys.schemas AS s
ON o.schema_id=s.schema_id
INNER JOIN
sys.partitions p
ON i.object_id=p.object_id
AND i.index_id = p.index_id
WHERE o.type=’U’ –include only user table indexes
AND i.index_id>0 –exclude the null indexes
ORDER BY SchemaName, TableName, IndexName
;
GO

 

— Reorganize all indexes on the HumanResources.Employee table.
ALTER INDEX AK_Employee_LoginID ON HumanResources.Employee
REBUILD WITH (ONLINE = ON);  –rebuild works better than reorganizing index
GO

 

Defragmenting compacts as well as changes physical order

Index maintenance compacts a structure *and* changes physical order– both can be important!

Editors Note: this post was updated in June 2014 to link to an online index rebuild bug that can cause corruption.

Once up on a time, there was a database server with 500GB of data and a heavy read workload of dynamic queries. Data was updated frequently throughout the day and index tuning was a serious challenge. At the best of times, performance was dicey.

Then things went bad

Application performance plummeted. Lots of code changes had been released recently, data was growing rapidly, and the hardware wasn’t the absolute freshest. There was no single smoking gun– there were 20 smoking guns!

A team was formed of developers and IT staff to tackle the performance issue. Early in the process they reviewed maintenance on the database server. Someone asked about index fragmentation. The DBA manager said, “Of course we’re handling fragmentation!” But a few queries were run and some large, seriously fragmented indexes were discovered in production.

The DBA explained that fragmentation wasn’t the problem. She didn’t have automated index maintenance set up, but she periodically manually defragmented indexes that were more than 75% fragmented.

Bad, meet ugly

At this point the whole performance team flipped out. Trust disappeared. Managers squirmed. More managers were called in. The DBA tried to change the subject, but it was just too late. More than a week was wasted over Fragmentation-Gate. It was a huge, embarrassing distraction, and it solved nothing.

Here’s the deal– the DBA was right. Fragmentation wasn’t the root cause of the performance problem. The DBA was a super smart person and very talented at performance tuning, too! (And no, I’m not secretly talking about myself here– this is not the “royal she”.) But she made a strategic miscalculation: she should have set up occasional automated index maintenance to align with her team’s normal practices and standards.

Why you need automated index maintenance

When performance gets bad, one of the very first things people look at is whether systems involved are configured according to best practices. If you’re not following a best practice, you need to have a really good reason for it.

Regular index maintenance still has a lot of merit: even in Shangri-La, where your data all fits into memory and your storage system is a rockstar with random IO, index maintenance can help make sure that you don’t have a lot of empty space wasting loads of memory.

It’s still a good idea to automate index maintenance. Absolutely don’t go too crazy with it– monitor the runtime and IO use and run it only at low volume times to make sure it helps more than it hurts. Be careful, but don’t skip it.

How much downtime can you spare?

Before you implement index maintenance, find out how much time tables can be offline in each of your databases. Then, figure out what operations you want to use.

To Rebuild or Reorganize: That is the Question

First off: ‘Reorganize’ and ‘Rebuild’ are two different operations that each reduce fragmentation in an index. They work differently toward the same end. You don’t need to run both against the same index. (I sometimes find that people are doing both against every index in a maintenance plan. That’s just double the work and NOT double the fun.)

Rebuild: An index ‘rebuild’ creates a fresh, sparkling new structure for the index. If the index is disabled, rebuilding brings it back to life. You can apply a new fillfactor when you rebuild an index. If you cancel a rebuild operation midway, it must roll back (and if it’s being done offline, that can take a while).

Reorganize: This option is more lightweight. It runs through the leaf level of the index, and as it goes it fixes physical ordering of pages and also compacts pages to apply any previously set fillfactor settings. This operation is always online, and if you cancel it then it’s able to just stop where it is (it doesn’t have a giant operation to rollback).

Factors to consider:

  • Standard Edition rebuilds ain’t awesome. If you’ve got SQL Server Standard Edition, index rebuilds are always an offline operation. Bad news: they’re also single-threaded. (Ouch!)
  • Enterprise Edition rebuilds have gotchas. With SQL Server Enterprise Edition, you can specify an online rebuild — unless the index contains large object types. (This restriction is relaxed somewhat in SQL Server 2012). You can also use parallelism when creating or rebuilding an index— and that can save a whole lot of time. Even with an online rebuild, a schema modification lock (SCH-M) is needed at the time the fresh new index is put in place. This is an exclusive lock and in highly concurrent environments, getting it can be a big (blocking) problem.
  • There’s a bug in SQL Server 2012 Enterprise Edition Rebuilds that can cause corruption. If you’re running SQL Server 2012 SP1 – SP2, parallel online index rebuilds can cause corruption. Read about your options here.
  • Rebuilding partitioned tables is especially tricky. You can rebuild an entire partitioned index online– but nobody really wants to do that because they’re huge! The whole idea behind horizontal partitioning is to break data into more manageable chunks, right? Unfortunately, partition level rebuilds are offline until SQL Server 2014.
  • Reorganizing can be pretty cool. ‘Reorganizing’ an index is always an online op, no matter what edition of SQL Server you’re using. It doesn’t require a schema mod lock, so it can provide better concurrency. Reorganizing only defragments the leaf level of the index. On large tables it can take longer than a rebuild would take, too. But as I said above, it’s nice that you can reorganize for a while and then stop without facing a massive rollback.

SQL SERVER 2014: WAIT_AT_LOW_PRIORITY, MAX_DURATION, AND ABORT_AFTER_WAIT

I’m really excited about new index rebuild options that are shipping in SQL Server 2014. Check *this* out:

  1. ALTER INDEX OhSoFragmented ON dbo.MyTable REBUILD
  2. WITH (
  3. ONLINE = ON
  4. (WAIT_AT_LOW_PRIORITY
  5. (MAX_DURATION= 5, ABORT_AFTER_WAIT=BLOCKERS)
  6. )
  7. );

So we’ve got new tools for those concurrency problems I was talking about with online rebuilds. Now, we can say how long we’re willing to wait to get that schema modification lock (in minutes), and if we have to keep waiting what should happen. (Options: kill those who are blocking us, kill ourself, or do nothing.) Obviously there are some situations where just killing the blockers could be a terrible problem, but I’m interested to experiment with this.

You Didn’t Answer the Question: Do I Use Rebuild or Reorganize?

Yeah, I totally dodged that question, didn’t I?

If you have a regularly scheduled downtime every weekend, you’re probably fine with straight up index rebuilds, even if you have Standard Edition. Single threaded offline index maintenance may not be the hottest thing in the world, but hey, if you’ve got time for it then embrace the simplicity.

If you have Enterprise Edition, embrace parallel index rebuilds– and use the ONLINE option for indexes that allow it if people need to access the database during your maintenance window.

If you have database mirroring or AlwaysOn Availability Groups, tread with caution– particularly with rebuilds. It’s easy to generate a ton of IO with index maintenance, and it could mean putting your secondaries or mirror so far behind that they can’t catch up.

Maintenance plans or custom scripts?

You can go the easy way and use SQL Server Maintenance Plans, but unfortunately they’re very simplistic: you can only say “rebuild all the indexes” or “reorganize all the indexes”. You cannot say, “If the index is 45% or more fragmented, rebuild it– otherwise do nothing.” If you don’t spend much time with SQL Server and you’ve got downtime available every weekend, this can be a decent option.

If you need to minimize downtime, custom index maintenance scripts are the way to go. Our favorite: Ola Hallengren’s maintenance scripts. These are super flexible, well documented, and … free! The scripts have all sorts of cool options like time boxing and statistics maintenance.

Some tips for using Ola Hallengren’s index maintenance scripts:

  1. Download and configure them on a test instance first. There’s a lot of options on parameters, and you’ll need to play with them.
  2. Get used the ‘cmdexec’ job step types. When you install the scripts you’ll see that the SQL Server Agent jobs run index maintenance using a call to sqlcmd.exe in an MSDOS style step. That’s by design!
  3. Use the examples on the website. If you scroll to the bottom of the index maintenance page you’ll find all sorts of examples showing how to get the procedure to do different useful things.

Find out when maintenance fails

Don’t forget to make sure that your maintenance jobs are successfully logging their progress. Set up Database Mail and operators so jobs let you know if they fail.

Tell your boss you did a good thing

Finally, write up a quick summary of what you did, why you chose custom scripts or maintenance plans, and why. Share it with your manager and explain that you’ve set up automated index maintenance as a proactive step.

Having your manager know you’re taking the time to follow best practices certainly won’t hurt– and one of these days, it just might help you out. (Even if it’s just by keeping everyone from following a red herring.)

SQL Query: Find working days between two dates

—-create function
ALTER FUNCTION [dbo].[sfWorkingDate](@tempDate date, @workingDays int)
RETURNS DATE
AS
BEGIN
DECLARE @i int=0;
WHILE(@i<@workingDays)
BEGIN
SET @tempDate=DATEADD(DAY, 1, @tempDate);
SET @i=CASE
WHEN DATENAME(WEEKDAY, @tempDate) IN (‘Saturday’, ‘Sunday’) THEN @i
ELSE @i+1
END;
END;
RETURN (@tempDate);
END

—-create funtion
ALTER FUNCTION tfnWorkingDays(@startDate DATE, @endDate DATE, @processDays VARCHAR(2), @numWorkingDays int)
RETURNS @retTempProcessDate TABLE
(
StartDate date
,StartDay varchar(20)
,EndDate date
,EndDay varchar(20)
)
AS
BEGIN
DECLARE @processDate DATE;
SET @processDate= CONVERT(DATE, CONVERT(VARCHAR(20), (CONVERT(VARCHAR(4), DATEPART(YEAR, @startDate)) +’-‘+ CONVERT(VARCHAR(2), DATEPART(MONTH, @startDate)) +’-‘ + @processDays)));

DECLARE @tempProcessDate TABLE
(
StartDate date
,EndDate date
)
;
WHILE (@processDate<=@endDate)
BEGIN
INSERT INTO @tempProcessDate(StartDate )
VALUES
(
CASE
WHEN DATENAME(WEEKDAY, @processDate)=’Saturday’ THEN (DATEADD(DAY, 2, @processDate))
WHEN DATENAME(WEEKDAY, @processDate)=’Sunday’ THEN (DATEADD(DAY, 1, @processDate))
ELSE @processDate
END
)
;
SET @processDate=DATEADD(MONTH, 1, @processDate);
END
;
INSERT INTO @retTempProcessDate(StartDate, StartDay, EndDate, EndDay)
SELECT
StartDate, DATENAME(WEEKDAY, StartDate) AS StartDay, dbo.sfWorkingDate(StartDate, @numWorkingDays) AS EndDate, DATENAME(WEEKDAY, dbo.sfWorkingDate(StartDate, @numWorkingDays)) AS EndDay
FROM @tempProcessDate
ORDER BY DATEPART(WEEKDAY, StartDate)
;
RETURN
END

—-select query
SELECT *
FROM dbo.tfnWorkingDays(’01-dec-2016′, ’01-dec-2018′, ’12’, 3)
ORDER BY DATEPART(WEEKDAY, StartDate)

String functions: Remove non-alpha characters, remove multiple spaces

Search hint:

remove non-alpha characters from a string

replace multiple spaces with single space

remove multiple spaces

convert to title case

convert to name case

=============================

DECLARE @inputString nvarchar(1000);
/*remove non-alpha characters from a string*/
SET @inputString=’ (My name is): MR. [Masud AHMED]. \Some unknown /characters- ÄÄ ÖÖ ÜÜ ÉÉ ØØ ?? ÆÆ ‘;
PRINT ‘input string: ‘ + @inputString;

DECLARE @NonAlphaList AS VARCHAR(50);
/*list of all the non-alpha characters except a space ( ) and a dot (.) I want to preserve spaces between words*/
SET @NonAlphaList = ‘%[^a-z .]%’;
/*look for the non-alpha character position in a string*/
WHILE PATINDEX(@NonAlphaList, @inputString) > 0
/*delete the non-alpha character and replace the character with nothing*/
SET @inputString = STUFF(@inputString, PATINDEX(@NonAlphaList, @inputString), 1, ”);
PRINT ‘removed non-alpha: ‘ + @inputString;

/*remove multiple spaces*/
/*select string = replace(replace(replace(@inputString,’ ‘,'<>’),’><‘,”),'<>’,’ ‘)*/
SET @inputString= REPLACE(@inputString, ‘ ‘, ‘<>’);
SET @inputString= REPLACE(@inputString, ‘><‘, ”);
SET @inputString= REPLACE(@inputString, ‘<>’, ‘ ‘);
SET @inputString=LTRIM(RTRIM(@inputString));
PRINT ‘removed multiple spaces: ‘ + @inputString;

/*Title Case*/
DECLARE @index tinyint=1;
DECLARE @currentChar CHAR(1);
SET @inputString=LOWER(@inputString);
WHILE @index<LEN(@inputString)
BEGIN
SET @currentChar=SUBSTRING(@inputString, @index, 1);
IF (@index=1)
SET @inputString=STUFF(@inputString, @index, 1, UPPER(@currentChar))
IF(@currentChar=’ ‘)
SET @inputString=STUFF(@inputString, (@index + 1), 1, UPPER(SUBSTRING(@inputString, @index + 1, 1)))
SET @index+=1;
END

PRINT ‘convert to Title Case: ‘ + @inputString;

C# SQL: Query to find a specific column in each tables and all databases.

Scenario:

In the CustomerID there is some Customer Account information which we do not want. So we have to find the CustomerID column that is used almost in all databases and all the tables and replace the CustomerID value with CustomerIDPseudonymisedNumber.

Plan:

  1. SQL Update query: Write a query which will look CustomerID in a table if CustomerID column exist then it will return information: DatabaseName, SchemaName, TableName, ColumnName. It will also return a dynamically build sql update query. We want to to store the result the query into a table
  2. ETL Toool: Now we want to create an ETL tool which will get the update query from the table and execute it. We also want to log the result of the execute query if it has successfully completed the query or failed. If failed also log the error message

 

Steps 1 – SQL Update query:

USE SUS_SEM;
GO

DECLARE
@RETURN_VALUE INT
,@command1 nvarchar(MAX)

IF OBJECT_ID(‘tempdb..#temp’, ‘U’) IS NOT NULL DROP TABLE tempdb..#temp
IF OBJECT_ID(‘tempdb..#temp2’, ‘U’) IS NOT NULL DROP TABLE tempdb..#temp2

CREATE TABLE #temp
(
ServerName nvarchar(100)
,Table_Catalog nvarchar(256)
,Table_Schema nvarchar(256)
,Table_Name SYSNAME
,Column_Name SYSNAME
,Row_Count int
,Table_Size_MB decimal(10,2)
,Table_Size_GB decimal(10,2)
)

SET @command1 = ‘
USE [?];
DECLARE @dbName nvarchar(100);
SET @dbName=”?”;
INSERT INTO #temp
SELECT
@@SERVERNAME AS ServerName
,@dbName AS DatabaseName
,s.Name AS SchemaName
,t.NAME AS TableName
,c.name AS ColumnName
,p.rows AS RowCounts
,CONVERT(decimal(10, 2), (SUM(a.used_pages) * 8))/(1024) AS SizeInMB
,CONVERT(decimal(10, 2), (SUM(a.used_pages) * 8))/(1024*1024) AS SizeInGB
FROM
sys.tables t
INNER JOIN sys.columns AS c
ON t.object_id=c.object_id
INNER JOIN
sys.indexes i ON t.OBJECT_ID = i.object_id
INNER JOIN
sys.partitions p ON i.object_id = p.OBJECT_ID AND i.index_id = p.index_id
INNER JOIN
sys.allocation_units a ON p.partition_id = a.container_id
LEFT OUTER JOIN
sys.schemas s ON t.schema_id = s.schema_id
WHERE
t.NAME NOT LIKE ”dt%”
AND t.is_ms_shipped = 0
AND i.OBJECT_ID > 255
AND @dbName NOT IN (”RefDB”, ”master”, ”msdb”, ”temp”, ”model”, ”distribution”)
AND @dbName IN (””)
AND t.NAME NOT IN (””)
–AND t.NAME IN(””)
AND t.NAME IN(””)
AND c.name IN ( ”Number”, ”No”, ”Num”, ” Number”, ”_Number”, ”_No”, ”_Num”, ” No#”, ”xNumber”)
AND p.rows>0
GROUP BY
t.Name, s.Name, p.Rows, c.name
ORDER BY
t.Name’

/*EXECUTE master.sys.sp_MSforeachdb ‘USE [?]; EXEC sp_spaceused’
Notice that [?] is used as a placeholder for the unspecified database name.

As another example, you can execute sp_helpfile on each database by running the following command.
EXECUTE master.sys.sp_MSforeachdb ‘USE [?]; EXEC sp_helpfile’*/

EXEC @RETURN_VALUE = sp_MSforeachdb @command1
— = @command1
–SELECT * FROM #temp

CREATE TABLE #temp2
(
TableID INT IDENTITY
,ServerName nvarchar(100)
,Table_Catalog nvarchar(256)
,Table_Schema nvarchar(256)
,Table_Name SYSNAME
,Column_Name SYSNAME
,Row_Count int
,Table_Size_MB decimal(10,2)
,Table_Size_GB decimal(10,2)
,TableFullName nvarchar(200)
,SQLStatement nvarchar(1000)
,InsertColumn_Statement nvarchar(4000)
,UpdateColumn_Statement nvarchar(4000)
,IsNulled_Statement nvarchar(4000)
,IsNulled_Result nvarchar(100)
)

INSERT INTO #temp2 (ServerName, Table_Catalog, Table_Schema, Table_Name, Column_Name, TableFullName, [Row_Count], Table_Size_MB, Table_Size_GB, SQLStatement, InsertColumn_Statement, UpdateColumn_Statement, IsNulled_Statement, IsNulled_Result)
SELECT
ServerName
,Table_Catalog
,Table_Schema
,Table_Name
,Column_Name
,'[‘ + Table_Catalog + ‘].’ + ‘[‘ + Table_Schema + ‘].’ + ‘[‘ + Table_Name + ‘]’
,[Row_Count]
,[Table_Size_MB]
,[Table_Size_GB]
,’SELECT TOP 10 [‘+Column_Name + ‘] From [‘ + Table_Catalog+’].[‘+Table_Schema+’].[‘+Table_Name+’] WHERE [‘+Column_Name+’] IS NOT NULL;’

,’BEGIN TRY ALTER TABLE [‘ + Table_Catalog + ‘].’ + ‘[‘ + Table_Schema + ‘].’ + ‘[‘ + Table_Name + ‘]’ + ‘ ADD SK_CustomerID int; RAISERROR(”SK_CustomerID Column ADDED”, 0, 1); END TRY BEGIN CATCH RAISERROR(”SK_CustomerID Column already exist”, 0, 1); END CATCH;’ AS SK_PaitentID_Statement

/*,’UPDATE target
SET target. [‘ + Column_Name + ‘]=[RefDBTable].[SK_CustomerID]
FROM [‘ + Table_Catalog + ‘].[‘ + Table_Schema + ‘].[‘ + Table_Name + ‘] AS target
INNER JOIN [SUS_SEM].[dbo].[RefDBTable] AS RefDBTable
ON target.[‘ + Column_Name + ‘]=RefDBTable.Number;’ AS UpdateColumn_Statement
*/
/*,’UPDATE target
SET target.[‘ + Column_Name + ‘]=ISNULL([Customer].[SK_CustomerID], 1)
FROM [‘ + Table_Catalog + ‘].[‘ + Table_Schema + ‘].[‘ + Table_Name + ‘] AS target
LEFT JOIN [RefDB].[dbo].[Customer] AS Customer
ON target.[‘ + Column_Name + ‘]=Customer.SK_CustomerID;’ AS UpdateColumn_Statement
*/
,’UPDATE target
SET target.[‘ + Column_Name + ‘]=[Customer].Number
FROM [‘ + Table_Catalog + ‘].[‘ + Table_Schema + ‘].[‘ + Table_Name + ‘] AS target
INNER JOIN [RefDB].[dbo].[Customer] AS Customer
ON target.[‘ + Column_Name + ‘]=convert(varchar(100), Customer.SK_CustomerID);’ AS UpdateColumn_Statement

,’SELECT CASE WHEN COUNT(*)=0 THEN ”NULLED (Number)” ELSE ”EXIST (Number)” END AS IsNulled_Statement
FROM (SELECT TOP 10 [‘+Column_Name + ‘] From [‘ + Table_Catalog+’].[‘+Table_Schema+’].[‘+Table_Name+’] WHERE [‘+Column_Name+’] IS NOT NULL) AS C’
,” AS IsNulled_Result
FROM #temp as t

IF OBJECT_ID(‘[temp].[Statement]’, ‘U’) IS NOT NULL DROP TABLE [temp].[Statement];

SELECT distinct ServerName, Table_Catalog, Table_Schema, Table_Name, Column_Name
,TableFullName
,[Row_Count]
,[Table_Size_MB]
,[Table_Size_GB]
–,SQLStatement
–,InsertColumn_Statement
,UpdateColumn_Statement
–,IsNulled_Statement
–,IsNulled_Result
INTO [temp].[Statement]
FROM #temp2 order by 2, 3, 4
;

IF OBJECT_ID(‘[temp].previewStatement’, ‘U’) IS NOT NULL DROP TABLE [temp].previewStatement;
SELECT *
INTO [temp].previewStatement
FROM [temp].[Statement]
WHERE 1=2
;
SELECT * FROM [temp].[Statement]
;

 

Steps 2 – ETL Toool:

ETL tool will have 3 elements:

  1. Execute SQL: Execute the sql query written in steps 1
  2. Data flow task – select * from result table and flow to another table. Enable data viewer. The purpose of this step is just to see what table and databases will be affected.
  3. Script task:
    1. Read the update query from the table
    2. Execute the update query
    3. Log the execution time, status, table name, table row count, table size

public void Main()
{
/*Name of the log file name*/
String fileName = “H:\\Log\\Log_NHSNumber.txt”;
String rowCount = “”, tableSizeInMB = “”, tableFullName = “”, status = “”;
DateTime today;
ConnectionManager cm;
SqlConnection sqlConn;
//SqlCommand sqlComm, sqlCommCreateStatement, sqlCommExecuteStatement;

/*connect to the DataSources*/
cm = Dts.Connections[“ADONetSUS_SEM”];
sqlConn = (SqlConnection)cm.AcquireConnection(Dts.Transaction);
if (sqlConn.State == ConnectionState.Open)
{
sqlConn.Close();
sqlConn.Open();
}

/*get the statement from the tempStatement table*/
var da = new SqlDataAdapter();
var ds = new DataSet();
var dtStatement= new DataTable();
String serverName = “”;
/*get select statement based on the server name*/
serverName = sqlConn.DataSource.ToString();
if (serverName == “PSFADWHPIT01\\nelcsu”)
{
da.SelectCommand = new SqlCommand(“SELECT [Row_Count], [Table_Size_MB], [TableFullName], [UpdateColumn_Statement] FROM [Unify2].[temp].[Statement]”, sqlConn);
}
else
da.SelectCommand = new SqlCommand(“SELECT [Row_Count], [Table_Size_MB], [TableFullName], [UpdateColumn_Statement] FROM [Unify2.1].[temp].[Statement]”, sqlConn);
da.Fill(ds, “dsTableStatement”);
dtStatement = ds.Tables[“dsTableStatement”];

String statement = “”;
foreach(DataRow dr in dtStatement.Rows)
{
rowCount = dr[0].ToString();
tableSizeInMB = dr[1].ToString();
tableFullName = dr[2].ToString();
statement = dr[3].ToString();
var sqlComm = new SqlCommand(statement, sqlConn);
sqlComm.CommandTimeout = 60000;
String line = “”;
try
{
today = DateTime.Now;
{
status = “Started”;
line = string.Format(“Status: {0}, Time: {1}, Table: {2}, RowCount: {3}, TableSizeInMB: {4}”, status, today, tableFullName, rowCount, tableSizeInMB);
/*append the time, table name, status to the log file*/
if (File.Exists(fileName))
{
using (StreamWriter writer = File.AppendText(fileName))
{
writer.WriteLine(line);
}
}
}
/*Execute the query here*/
sqlComm.ExecuteNonQuery();
}
catch (Exception e)
{
/*log the time, error message*/
today = DateTime.Now;
{
status = “Failed”;
line = string.Format(“Status: {0}, Time: {1}, Table: {2}, RowCount: {3}, TableSizeInMB: {4}, Message:{5}”, status, today, tableFullName, rowCount, tableSizeInMB, e.Message);
}
throw e;
}
finally
{
sqlComm.Dispose();
today = DateTime.Now;
/*log the time, error message*/
if (status != “Failed”)
{
status = “Completed”;
line = string.Format(“Status: {0}, Time: {1}, Table: {2}, RowCount: {3}, TableSizeInMB: {4}”, status, today, tableFullName, rowCount, tableSizeInMB);
}
/*append the time, table name, status to the log file*/
if (File.Exists(fileName))
{
using (StreamWriter writer = File.AppendText(fileName))
{
writer.WriteLine(line);
}
}
}
}

da.Dispose();
ds.Dispose();
}