sql - How can I increase the performace of Stored Procedure -


i have stored procedure taking around 2 minutes executed, have used few temp tables in , while loop , not able figure out best way increase speed of stored procedure. stored procedure follows

    alter procedure _sp_get_patentassignment_mail_test ( @compname nvarchar(max)='canadian spirit,connexus corporation' ) begin set nocount on create table #temp ( id int identity(1,1), assigneename nvarchar(100) )  create table #tmpmainresult ( title nvarchar(1000), pat_pubnum varchar(30), assigneename nvarchar(100) )   if(@compname not null , @compname<>'')    begin     insert #temp     select * dbo.uf_split(@compname,',')    end    declare @maxrownum int  set @maxrownum = (select max(id) #temp)   declare @iter int  set @iter = (select min(id) #temp)   while @iter <= @maxrownum  begin  declare @assigneename nvarchar(100)  set @assigneename= (select assigneename      #temp      id = @iter)  print @assigneename       insert #tmpmainresult     select p.title, case when p.patentnum null or p.patentnum='' p.publicationnum else p.patentnum end 'pat_pubname', pa.assigneename  patent p  inner join patentproperty pp on p.patentid=pp.patentid inner join patentassignee pa on pp.patentassignmentid=pa.patentassignmentid  pa.assigneename '%' +@assigneename+ '%'      set @iter = @iter + 1  end   select * #tmpmainresult   drop table #temp  drop table #tmpmainresult  end 

please give suggestions reduce execution time.

function used above follows:

alter function [dbo].[uf_split](@string varchar(max), @delimiter char(1))        returns @temptable table (items varchar(max))               begin     declare @idx int        declare @slice varchar(8000)         select @idx = 1            if len(@string)<1 or @string null  return         while @idx!= 0        begin            set @idx = charindex(@delimiter,@string)            if @idx!=0                set @slice = left(@string,@idx - 1)            else                set @slice = @string             if(len(@slice)>0)           insert @temptable(items) values(@slice)             set @string = right(@string,len(@string) - @idx)            if len(@string) = 0 break        end    

return end;

since don't have access execution plan, mileage of recommendations may vary.

if using split function, it's easy wrong using while. post code split function. alternately use delimitedsplit8k jeff moden

once have splitted string in temp table, don't need while.

what doing cross join filter pa.assigneename '%' +@assigneename+ '%'.

change insert this.

 insert #tmpmainresult     select p.title, case when p.patentnum null or p.patentnum='' p.publicationnum else p.patentnum end 'pat_pubname', pa.assigneename  patent p  inner join patentproperty pp on p.patentid=pp.patentid inner join patentassignee pa on pp.patentassignmentid=pa.patentassignmentid cross join #temp t  pa.assigneename '%' + t.assigneename + '%' 

since filtering using '%' + t.assigneename + '%' index on assigneename or assigneename might not help.

also check if have appropriate indexes on patentid , patentassignmentid on both tables

edit

the split function jeff moden [dbo].[delimitedsplit8k]

create function [dbo].[delimitedsplit8k] /**********************************************************************************************************************  purpose:  split given string @ given delimiter , return list of split elements (items).   notes:  1.  leading trailing delimiters treated if empty string element present.  2.  consecutive delimiters treated if empty string element present between them.  3.  except when spaces used delimiter, spaces present in each element preserved.   returns:  itvf containing following:  itemnumber = element position of item bigint (not converted int eliminate cast)  item       = element value varchar(8000)   statistics on function may found @ following url:  http://www.sqlservercentral.com/forums/topic1101315-203-4.aspx   cross apply usage examples , tests: --===================================================================================================================== -- test 1: -- tests various possible conditions in string using comma delimiter.  expected results -- laid out in comments --===================================================================================================================== --===== conditionally drop test tables make reruns easier testing.      -- (this not part of solution)      if object_id('tempdb..#jbmtest') not null drop table #jbmtest ; --===== create , populate test table on fly (this not part of solution).      -- in following comments, "b" blank , "e" element in left right order.      -- double quotes used encapsulate output of "item" can see blanks      -- preserved no matter may appear.  select *    #jbmtest    (                                               --# & type of return row(s)          select  0, null                      union --1 null          select  1, space(0)                  union --1 b (empty string)          select  2, space(1)                  union --1 b (1 space)          select  3, space(5)                  union --1 b (5 spaces)          select  4, ','                       union --2 b b (both empty strings)          select  5, '55555'                   union --1 e          select  6, ',55555'                  union --2 b e          select  7, ',55555,'                 union --3 b e b          select  8, '55555,'                  union --2 b b          select  9, '55555,1'                 union --2 e e          select 10, '1,55555'                 union --2 e e          select 11, '55555,4444,333,22,1'     union --5 e e e e e           select 12, '55555,4444,,333,22,1'    union --6 e e b e e e          select 13, ',55555,4444,,333,22,1,'  union --8 b e e b e e e b          select 14, ',55555,4444,,,333,22,1,' union --9 b e e b b e e e b          select 15, ' 4444,55555 '            union --2 e (w/leading space) e (w/trailing space)          select 16, 'this,is,a,test.'                   --e e e e         ) d (someid, somevalue) ; --===== split csv column whole table using cross apply (this solution)  select test.someid, test.somevalue, split.itemnumber, item = quotename(split.item,'"')    #jbmtest test   cross apply dbo.delimitedsplit8k(test.somevalue,',') split ; --===================================================================================================================== -- test 2: -- tests various "alpha" splits , collation using ascii characters 0 255 delimiter against -- given string.  note not of delimiters visible , show tiny squares because -- "control" characters.  more specifically, test show happens various non-accented  -- letters given collation depending on delimiter chose. --=====================================================================================================================  ctebuildallcharacters (string,delimiter)  (  select top 256          'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789',         char(row_number() on (order (select null))-1)    master.sys.all_columns )  select ascii_value = ascii(c.delimiter), c.delimiter, split.itemnumber, item = quotename(split.item,'"')    ctebuildallcharacters c   cross apply dbo.delimitedsplit8k(c.string,c.delimiter) split   order ascii_value, split.itemnumber ; -----------------------------------------------------------------------------------------------------------------------  other notes:  1. optimized varchar(8000) or less.  no testing or error reporting truncation @ 8000 characters done.  2. optimized single character delimiter.  multi-character delimiters should resolvedexternally      function.  3. optimized use cross apply.  4. not "trim" elements in case leading or trailing blanks intended.  5. if don't know how tally table can used replace loops, please see following...     http://www.sqlservercentral.com/articles/t-sql/62867/  6. changing function use nvarchar(max) cause run twice slow.  it's nature of      varchar(max) whether fits in-row or not.  7. multi-machine testing method of using unpivot instead of 10 select/union alls shows unpivot method     quite machine dependent , can slow things down quite bit. -----------------------------------------------------------------------------------------------------------------------  credits:  code product of many people's efforts including not limited following:  ctetally concept iztek ben gan , "decimalized" lynn pettis (and others) bit of speed  , redacted jeff moden different slant on readability , compactness. hat's off paul white  simple explanations of cross apply , detailed testing efforts. last not least,  ron "bitbucket" mccullough , wayne sheffield extreme performance testing across multiple machines ,  versions of sql server.  latest improvement brought additional 15-20% improvement on rev 05.  special  "nadrek" , "peter-757102" (aka peter de heer) bringing such improvements light.  nadrek's original  improvement brought 10% performance gain , peter followed content of rev 07.     thank whoever wrote first article ever saw on "numbers tables" located @ following url  , adam machanic leading me many years ago.  http://sqlserver2000.databases.aspfaq.com/why-should-i-consider-using-an-auxiliary-numbers-table.html -----------------------------------------------------------------------------------------------------------------------  revision history:  rev 00 - 20 jan 2010 - concept inline ctetally: lynn pettis , others.                         redaction/implementation: jeff moden          - base 10 redaction , reduction cte.  (total rewrite)   rev 01 - 13 mar 2010 - jeff moden         - removed 1 additional concatenation , 1 subtraction substring in select list tiny           bit of speed.   rev 02 - 14 apr 2010 - jeff moden         - no code changes.  added cross apply usage example header, additional credits, ,            documentation.   rev 03 - 18 apr 2010 - jeff moden         - no code changes.  added notes 7, 8, , 9 "optimizations" don't work           type of function.   rev 04 - 29 jun 2010 - jeff moden         - added schemabinding note paul white.  prevents unnecessary "table spool" when           function used in update statement though function makes no external references.   rev 05 - 02 apr 2011 - jeff moden         - rewritten extreme performance improvement larger strings approaching 8k boundary ,           strings have wider elements.  redaction of code involved removing concatenation of            delimiters, optimization of maximum "n" value using top instead of including in clause,           , reduction of previous calculations (thanks switch "zero based" ctetally) 1            instance of 1 add , 1 instance of subtract. length calculation final element (not            followed delimiter) in string split has been simplified using isnull/nullif            combination determine when charindex returned 0 indicates there no more delimiters           had or start with. depending on width of elements, code between 4 , 8 times faster on           single cpu box original code near 8k boundary.         - modified comments include more sanity checks on usage example, etc.         - removed "other" notes 8 , 9 no longer applicable.   rev 06 - 12 apr 2011 - jeff moden         - based on suggestion ron "bitbucket" mccullough, additional test rows added sample code ,           code changed encapsulate output in pipes spaces , empty strings perceived            in output.  first "notes" section added.  finally, test added comments above.   rev 07 - 06 may 2011 - peter de heer, further 15-20% performance enhancement has been discovered , incorporated            code eliminated need "zero" position in ctetally table.  **********************************************************************************************************************/ --===== define i/o parameters         (@pstring varchar(8000), @pdelimiter char(1)) returns table schemabinding  return --===== "inline" cte driven "tally table" produces values 0 10,000...      -- enough cover nvarchar(4000)   e1(n) (                  select 1 union select 1 union select 1 union                   select 1 union select 1 union select 1 union                   select 1 union select 1 union select 1 union select 1                 ),                          --10e+1 or 10 rows        e2(n) (select 1 e1 a, e1 b), --10e+2 or 100 rows        e4(n) (select 1 e2 a, e2 b), --10e+4 or 10,000 rows max  ctetally(n) (--==== provides "base" cte , limits number of rows right front                      -- both performance gain , prevention of accidental "overruns"                  select top (isnull(datalength(@pstring),0)) row_number() on (order (select null)) e4                 ), ctestart(n1) (--==== returns n+1 (starting position of each "element" once each delimiter)                  select 1 union                  select t.n+1 ctetally t substring(@pstring,t.n,1) = @pdelimiter                 ), ctelen(n1,l1) as(--==== return start , length (for use in substring)                  select s.n1,                         isnull(nullif(charindex(@pdelimiter,@pstring,s.n1),0)-s.n1,8000)                    ctestart s                 ) --===== actual split. isnull/nullif combo handles length final element when no delimiter found.  select itemnumber = row_number() over(order l.n1),         item       = substring(@pstring, l.n1, l.l1)    ctelen l ;  go 

Comments

Popular posts from this blog

Magento/PHP - Get phones on all members in a customer group -

php - .htaccess mod_rewrite for dynamic url which has domain names -

Website Login Issue developed in magento -