首页 » ORACLE 9i-23ai » bulk collect into forall insert批量分次提交

bulk collect into forall insert批量分次提交

难免开始以为批量提交的小事务会比一个大事务完成后一次提示会更快,可以TOM也再三强调这种想法是不对的,如果非要分次,最好的方法是按业务过程的要求以适当的频度提交,并且相应地设置undo大小 。

今天刚好网友问到我这个问题,下面看我的例子

SQL> create table obj as select * from all_objects;

Table created.

Elapsed: 00:00:04.15
SQL> select count(*) from obj;

  COUNT(*)
----------
     40696

Elapsed: 00:00:00.05

方法1 
SQL> l
  1  declare
  2   v_cmt_cnt number:=0;
  3   cursor c is select * from obj;
  4   type v_rows is table of c%rowtype;
  5   v_t v_rows;
  6   begin
  7   open c;
  8   loop
  9     v_cmt_cnt:=v_cmt_cnt+1;
 10     fetch c bulk collect into v_t limit 1000;
 11       forall i in 1..v_t.count
 12         insert into obj values v_t(i);
 13      commit;
 14     exit when c%notfound;
 15   end loop;
 16   close c;
 17   dbms_output.put_line('commited times:'||v_cmt_cnt);
 18*  end;

commited times:41

PL/SQL procedure successfully completed.

Elapsed: 00:00:00.49
SQL> select count(*) from obj;

  COUNT(*)
----------
     81392

Elapsed: 00:00:00.02
SQL> select 40696*2 from dual;

   40696*2
----------
     81392

Elapsed: 00:00:00.01
SQL> rollback;

Rollback complete.

Elapsed: 00:00:00.01
SQL> select count(*) from obj;

  COUNT(*)
----------
     81392
--也可以加append ,archive模式下 table nologging
declare
 v_cmt_cnt number:=0;
 cursor c is select * from obj;
 type v_rows is table of c%rowtype;
 v_t v_rows;
 begin
 open c;
 loop
   v_cmt_cnt:=v_cmt_cnt+1;
   fetch c bulk collect into v_t limit 1000;
     forall i in 1..v_t.count
       insert /*+ append */ into obj values v_t(i);
    commit;
   exit when c%notfound;
 end loop;
 close c;
 dbms_output.put_line('commited times:'||v_cmt_cnt);
 end;
 
Elapsed: 00:00:00.53

--比较了一下执行速度是logging 无append hint 要快几毫秒,append nologging是REDO 的生成少了些
--redo检查方法select a.name,b.value from v$statname a,v$sesstat b where a.statistic#=b.statistic# and a.name = 'redo size' and sid=:sid(v$mystat);

 SQL> select 13991040-9331292 nolog_append,9331292-4659380 log_noappend from dual;

NOLOG_APPE LOG_NOAPPE
---------- ----------
   4659748    4671912

方法2,


SQL> l
  1  declare
  2   v_cmt_cnt number:=0;
  3   begin
  4   for c in(select rowid rid,rownum r from obj)
  5   loop
  6    insert into obj select * from obj where rowid=c.rid;
  7    if(mod(c.r,1000)=0)then
  8      commit;
  9      v_cmt_cnt:=v_cmt_cnt+1;
 10     end if;
 11     end loop;
 12     commit;
 13     dbms_output.put_line('commited times:'||v_cmt_cnt);
 14*  end;

commited times:40

PL/SQL procedure successfully completed.

Elapsed: 00:00:14.98

redo前后值
SQL> select 40867092-27459428
  2  from dual;

40867092-27459428
-----------------
         13407664

可以看到这种方法比方法1慢了14秒,redo也大约增加了300%

SQL> insert /*+append*/ into obj select * from obj;

40696 rows created.

Elapsed: 00:00:00.30
SQL> commit;

Commit complete.

Elapsed: 00:00:00.02

redo:4709556

--各测试都是drop table 后重建的
SQL> insert  into obj select * from obj;

40696 rows created.

Elapsed: 00:00:00.36
SQL> commit;

Commit complete.

Elapsed: 00:00:00.02

redo:4643984

--为了避免查询REDO附加REDO,以上的测试REDO也都是在另一SESSION中查询

note:测试就做到这,一般来说一个事务是最快的,不要认为多个小事务就比大事务快,bulk collect into forall批量的方式会比单条loop更快些,分次提交有可能面临如果执行一半失败的问题。

— batch update —

DECLARE
v_cmt_cnt number:=0;
Type tRowid    Is Table Of   rowid  Index By Binary_Integer;  
type tname     Is Table Of   userinfo.name%TYPE;

vRowId tRowId;
vname  tname;

CURSOR c is 
  SELECT rowid ,name FROM  userinfo  where 1=1;
BEGIN
   OPEN c;
    LOOP
       FETCH c BULK COLLECT INTO vROWID,vname limit 1000; 
	     v_cmt_cnt:=v_cmt_cnt+1;
           forall i in 1..vROWID.count
           update userinfo set cdate=sysdate,name=vname(i)||'t' WHERE rowid=vRowId(i);   
		   commit;
       EXIT when c%NOTFOUND;
    END LOOP;
  CLOSE c;
dbms_output.put_line('Commits:'||v_cmt_cnt);
EXCEPTION
  WHEN OTHERS THEN
  NULL;
END;
打赏

, ,

对不起,这篇文章暂时关闭评论。