J2ME: Test the RMS listener methods

import java.io.*;
import javax.microedition.midlet.*;
import javax.microedition.rms.*;

public class RmsListener extends MIDlet
{
  private RecordStore rs = null;
  static final String REC_STORE = “db_8”;

  public RmsListener()
  {
    // Open record store and add listener
    openRecStore();
    rs.addRecordListener(new TestRecordListener());

    // Initiate actions that will wake up the listener    
    writeRecord(“J2ME and MIDP”);
    updateRecord(“MIDP and J2ME”);
    deleteRecord();
    
    closeRecStore();  // Close record store
    deleteRecStore()// Remove the record store
  }

  public void destroyAppboolean unconditional )
  {
  }

  public void startApp()
  {
    // There is no user interface, go ahead and shutdown
    destroyApp(false);
    notifyDestroyed();
  }

  public void pauseApp()
  {
  }

  public void openRecStore()
  {
    try
    {
      // The second parameter indicates that the record store
      // should be created if it does not exist
      rs = RecordStore.openRecordStore(REC_STORE, true);
    }
    catch (Exception e)
    {
      db(e.toString());
    }
  }    
  
  public void closeRecStore()
  {
    try
    {
      rs.closeRecordStore();
    }
    catch (Exception e)
    {
      db(e.toString());
    }
  }

  public void deleteRecStore()
  {
    if (RecordStore.listRecordStores() != null)
    {
      try
      {
        RecordStore.deleteRecordStore(REC_STORE);
      }
      catch (Exception e)
      {
        db(e.toString());
      }
    }      
  }

  public void writeRecord(String str)
  {
    byte[] rec = str.getBytes();

    try
    {
      rs.addRecord(rec, 0, rec.length);
    }
    catch (Exception e)
    {
      db(e.toString());
    }
  }

  public void updateRecord(String str)
  {
    try
    {
      rs.setRecord(1, str.getBytes()0, str.length());
    }
    catch (Exception e)
    {
      db(e.toString());
    }
  }

  public void deleteRecord()
  {
    try
    {
      rs.deleteRecord(1);
    }
    catch (Exception e)
    {
      db(e.toString());
    }
  }

  /*————————————————–
  * Simple message to console for debug/errors
  * When used with Exceptions we should handle the 
  * error in a more appropriate manner.
  *————————————————-*/
  public void db(String str)
  {
    System.err.println(“Msg: ” + str);
  }

}


/*————————————————–
* Listen for updates to the record store
*————————————————-*/
class TestRecordListener implements RecordListener
{
  public void recordAdded(RecordStore recordStore, int recordId
  
    try
    {
      System.out.println(“Record with ID#: ” + recordId + 
                         ” added to RecordStore: ” + recordStore.getName())
    }
    catch (Exception e)
    {
      System.err.println(e);
    
  

  public void recordDeleted(RecordStore recordStore, int recordId
  {
    try
    {
      System.out.println(“Record with ID#: ” + recordId + 
                         ” deleted from RecordStore: ” + recordStore.getName())
    
    catch (Exception e)
    {
      System.err.println(e);
    }
  }

  public void recordChanged(RecordStore recordStore, int recordId
  {
    try
    {
      System.out.println(“Record with ID#: ” + recordId + 
                         ” changed in RecordStore: ” + recordStore.getName())
    
    catch (Exception e)
    {
      System.err.println(e);
    }
  
}


J2ME : Record Enumeration Example

import javax.microedition.rms.*;
import javax.microedition.midlet.*;
import javax.microedition.lcdui.*;
import java.io.*;

public class RecordEnumerationExample 
             extends MIDlet implements CommandListener
{
  private Display display;
  private Alert alert;
  private Form form
  private Command exit; 
  private Command start;
  private RecordStore recordstore = null;
  private RecordEnumeration recordEnumeration = null;
  public RecordEnumerationExample ()
  {
    display = Display.getDisplay(this);
    exit = new Command(“Exit”, Command.SCREEN, 1);
    start = new Command(“Start”, Command.SCREEN, 1);
    form new Form(“RecordEnumeration”);
    form.addCommand(exit);
    form.addCommand(start);
    form.setCommandListener(this);
  }
  public void startApp()
  {
    display.setCurrent(form);
  }
  public void pauseApp()
  {
  }
  public void destroyAppboolean unconditional )
  {
  }
  public void commandAction(Command command, 
              Displayable displayable)
  {
    if (command == exit)
    {
      destroyApp(true);
      notifyDestroyed();
    }
    else if (command == start)
    {
      try
      {
        recordstore = RecordStore.openRecordStore(
                      “myRecordStore”true );
      }
      catch (Exception error)
      {
        alert = new Alert(“Error Creating”
                error.toString(), null, AlertType.WARNING)
        alert.setTimeout(Alert.FOREVER)
        display.setCurrent(alert);
      }
      try
      {
        String outputData[] {“First Record”
                 “Second Record”“Third Record”};
        for (int x = 0; x < 3; x++)
        {
          byte[] byteOutputData = outputData[x].getBytes();
          recordstore.addRecord(byteOutputData, 
                     0, byteOutputData.length);
        }
      }
      catch Exception error)
      {
        alert = new Alert(“Error Writing”
                error.toString(), null, AlertType.WARNING)
        alert.setTimeout(Alert.FOREVER)
        display.setCurrent(alert);
      }
      try
      {
         StringBuffer buffer = new StringBuffer();
         recordEnumeration = 
                recordstore.enumerateRecords(null, null, false);
         while (recordEnumeration.hasNextElement())
         {
           buffer.append(new String(recordEnumeration.nextRecord()));
           buffer.append(“\n”);
         }
           alert = new Alert(“Reading”
                buffer.toString(), null, AlertType.WARNING)
           alert.setTimeout(Alert.FOREVER)
           display.setCurrent(alert)
      }
      catch (Exception error)
      {
        alert = new Alert(“Error Reading”
                  error.toString(), null, AlertType.WARNING)
        alert.setTimeout(Alert.FOREVER)
        display.setCurrent(alert);
      }
      try
      {
        recordstore.closeRecordStore();
      }
      catch (Exception error)
      {
        alert = new Alert(“Error Closing”
               error.toString(), null, AlertType.WARNING)
        alert.setTimeout(Alert.FOREVER)
        display.setCurrent(alert);
      }
      if (RecordStore.listRecordStores() != null)
      {
        try
        {
          RecordStore.deleteRecordStore(“myRecordStore”);
          recordEnumeration.destroy();
        }
        catch (Exception error)
        {
         alert = new Alert(“Error Removing”
               error.toString(), null, AlertType.WARNING)
         alert.setTimeout(Alert.FOREVER)
         display.setCurrent(alert);
        }
      }      
    }
  }     
}

J2ME : Store Database

import java.io.*;
import javax.microedition.midlet.*;
import javax.microedition.rms.*;

public class TestStore extends MIDlet {

    static final String DBNAME = “mydata”;

    public TestStore() {

        RecordStore rs = null;

        // Data is persistent across MIDlet invocations.
        // So, first clear out the old record store…

        try {
            RecordStore.deleteRecordStoreDBNAME );
        }
        catchException e ){
            // ignore any errors…
        }

        // Now create a new one and dump
        // each element out….

        try {
            rs = RecordStore.openRecordStoreDBNAME,
                                              true );

            byte[] data1 = “Here is the first record”.getBytes();
            byte[] data2 = “And here is the second”.getBytes();
            byte[] data3 = “And then the third”.getBytes();

            data3[00;
            data3[data3.length-1(byte1;

            rs.addRecorddata1, 0, data1.length );
            rs.addRecorddata2, 0, data2.length );
            rs.addRecorddata3, 0, data3.length );

            dumpRecordStorers, System.out );


            rs.closeRecordStore();
        }
        catchRecordStoreException e ){
            System.out.println);
        }

        notifyDestroyed();
    }

    public void dumpRecordStoreRecordStore rs,
                                 PrintStream out )
    {
        ifrs == null return;

        StringBuffer hexLine = new StringBuffer();
        StringBuffer charLine = new StringBuffer();

        try {
            int    lastID = rs.getNextRecordID();
            byte[] data = new byte[100];
            int    size;

            forint i = 1; i < lastID; ++i ){
                try {
                    size = rs.getRecordSize);
                    ifsize > data.length ){
                        data = new bytesize * ];
                    }

                    out.println“Record ” + i +
                              ” of size ” + size );

                    rs.getRecordi, data, );
                    dumpRecorddata, size, out,
                           hexLine, charLine, 16 );

                    out.println“” );
                }
                catchInvalidRecordIDException e ){
                    continue;
                }
            }
        }

        catchRecordStoreException e ){
            out.println“Exception reading record store: ” + e );
        }
    }

    private void dumpRecordbyte[] data, int size,
                             PrintStream out,
                             StringBuffer hexLine,
                             StringBuffer charLine,
                             int maxLen )
    {
        ifsize == return;

        hexLine.setLength);
        charLine.setLength);

        int count = 0;

        forint i = 0; i < size; ++i ){
            char b = (char) ( data[i0xFF );

            ifb < 0x10 ){
                hexLine.append‘0’ );
            }

            hexLine.appendInteger.toHexString) );
            hexLine.append‘ ‘ );

            if( ( b >= 32 && b <= 127 ||
                Character.isDigit||
                Character.isLowerCase||
                Character.isUpperCase) ){
                charLine.append( (char);
            else {
                charLine.append‘.’ );
            }

            if++count >= maxLen || i == size-){
                whilecount++ < maxLen ){
                hexLine.append”   ” );
                }

                hexLine.append‘ ‘ );
                hexLine.appendcharLine.toString() );

                out.printlnhexLine.toString() );
 
                hexLine.setLength);
                charLine.setLength);
                count = 0;
            }
        }
    }

    public void destroyAppboolean unconditional ) {
    }

    public void startApp() {
    }

    public void pauseApp() {
    }
}


J2ME : Sort Record Example

import javax.microedition.rms.*;
import javax.microedition.midlet.*;
import javax.microedition.lcdui.*;
import java.io.*;

public class SortExample extends MIDlet implements CommandListener
{
  private Display display;
  private Alert alert;
  private Form form
  private Command exit; 
  private Command start;
  private RecordStore recordstore = null;
  private RecordEnumeration recordEnumeration = null;
  private Comparator comparator = null;
  public SortExample ()
  {
    display = Display.getDisplay(this);
    exit = new Command(“Exit”, Command.SCREEN, 1);
    start = new Command(“Start”, Command.SCREEN, 1);
    form new Form(“Mixed RecordEnumeration”null);
    form.addCommand(exit);
    form.addCommand(start);
    form.setCommandListener(this);
  }
  public void startApp()
  {
    display.setCurrent(form);
  }
  public void pauseApp()
  {
  }
  public void destroyAppboolean unconditional )
  {
  }
  public void commandAction(Command command, Displayable displayable)
  {
    if (command == exit)
    {
      destroyApp(true);
      notifyDestroyed();
    }
    else if (command == start)
    {
      try
      {
        recordstore = RecordStore.openRecordStore(
                      “myRecordStore”true );
      }
      catch (Exception error)
      {
        alert = new Alert(“Error Creating”
             error.toString(), null, AlertType.WARNING)
        alert.setTimeout(Alert.FOREVER)
        display.setCurrent(alert);
      }
      try
      {
        String outputData[] {“Mary”“Bob”“Adam”};
        for (int x = 0; x < 3; x++)
        {
          byte[] byteOutputData = outputData[x].getBytes();
          recordstore.addRecord(byteOutputData, 0
                   byteOutputData.length);
        }
      }
      catch Exception error)
      {
       alert = new Alert(“Error Writing”
                error.toString(), null, AlertType.WARNING)
       alert.setTimeout(Alert.FOREVER)
       display.setCurrent(alert);
      }
      try
      {
        StringBuffer buffer = new StringBuffer();
        Comparator comparator = new Comparator();
        recordEnumeration = recordstore.enumerateRecords(
                          null, comparator, false);
        while (recordEnumeration.hasNextElement())
        {
          buffer.append(new String(recordEnumeration.nextRecord()));
          buffer.append(“\n”);
        }
        alert = new Alert(“Reading”, buffer.toString() 
                 null, AlertType.WARNING)
        alert.setTimeout(Alert.FOREVER)
        display.setCurrent(alert);
      }
      catch (Exception error)
      {
        alert = new Alert(“Error Reading”
                  error.toString(), null, AlertType.WARNING)
        alert.setTimeout(Alert.FOREVER)
        display.setCurrent(alert);
      }
      try
      {
       recordstore.closeRecordStore();
      }
      catch (Exception error)
      {
        alert = new Alert(“Error Closing”
                    error.toString(), null, AlertType.WARNING)
        alert.setTimeout(Alert.FOREVER)
        display.setCurrent(alert);
      }
      if (RecordStore.listRecordStores() != null)
      {
        try
        {
          RecordStore.deleteRecordStore(“myRecordStore”);
          recordEnumeration.destroy();
        }
        catch (Exception error)
        {
         alert = new Alert(“Error Removing”
                error.toString(), null, AlertType.WARNING)
         alert.setTimeout(Alert.FOREVER)
         display.setCurrent(alert);
        }
      }      
    }
  }     
}
class Comparator implements RecordComparator
{
  public int compare(byte[] record1, byte[] record2)
  {
    String string1 = new String(record1)
               string2= new String(record2);
    int comparison = string1.compareTo(string2);
    if (comparison == 0)
      return RecordComparator.EQUIVALENT;
    else if (comparison < 0)
      return RecordComparator.PRECEDES;
    else
      return RecordComparator.FOLLOWS;
  }
}

oracle里的常用命令

oracle里的常用命令
第一章:日志管理


1.forcing log switches
sql> alter system switch logfile;


2.forcing checkpoints
sql> alter system checkpoint;


3.adding online redo log groups
sql> alter database add logfile [group 4]
sql> (‘/disk3/log4a.rdo’,’/disk4/log4b.rdo’) size 1m;


4.adding online redo log members
sql> alter database add logfile member
sql> ‘/disk3/log1b.rdo’ to group 1,
sql> ‘/disk4/log2b.rdo’ to group 2;


5.changes the name of the online redo logfile
sql> alter database rename file ‘c:/oracle/oradata/oradb/redo01.log’
sql> to ‘c:/oracle/oradata/redo01.log’;


6.drop online redo log groups
sql> alter database drop logfile group 3;


7.drop online redo log members
sql> alter database drop logfile member ‘c:/oracle/oradata/redo01.log’;


8.clearing online redo log files
sql> alter database clear [unarchived] logfile ‘c:/oracle/log2a.rdo’;


9.using logminer analyzing redo logfiles


a. in the init.ora specify utl_file_dir = ‘ ‘
b. sql> execute dbms_logmnr_d.build(‘oradb.ora’,’c:\oracle\oradb\log’);
c. sql> execute dbms_logmnr_add_logfile(‘c:\oracle\oradata\oradb\redo01.log’,
sql> dbms_logmnr.new);
d. sql> execute dbms_logmnr.add_logfile(‘c:\oracle\oradata\oradb\redo02.log’,
sql> dbms_logmnr.addfile);
e. sql> execute dbms_logmnr.start_logmnr(dictfilename=>’c:\oracle\oradb\log\oradb.ora’);
f. sql> select * from v$logmnr_contents(v$logmnr_dictionary,v$logmnr_parameters
sql> v$logmnr_logs);
g. sql> execute dbms_logmnr.end_logmnr;


第二章:表空间管理
1.create tablespaces
sql> create tablespace tablespace_name datafile ‘c:\oracle\oradata\file1.dbf’ size 100m,
sql> ‘c:\oracle\oradata\file2.dbf’ size 100m minimum extent 550k [logging/nologging]
sql> default storage (initial 500k next 500k maxextents 500 pctinccease 0)
sql> [online/offline] [permanent/temporary] [extent_management_clause]


2.locally managed tablespace
sql> create tablespace user_data datafile ‘c:\oracle\oradata\user_data01.dbf’
sql> size 500m extent management local uniform size 10m;


3.temporary tablespace
sql> create temporary tablespace temp tempfile ‘c:\oracle\oradata\temp01.dbf’
sql> size 500m extent management local uniform size 10m;


4.change the storage setting
sql> alter tablespace app_data minimum extent 2m;
sql> alter tablespace app_data default storage(initial 2m next 2m maxextents 999);


5.taking tablespace offline or online
sql> alter tablespace app_data offline;
sql> alter tablespace app_data online;


6.read_only tablespace
sql> alter tablespace app_data read only|write;


7.droping tablespace
sql> drop tablespace app_data including contents;


8.enableing automatic extension of data files
sql> alter tablespace app_data add datafile ‘c:\oracle\oradata\app_data01.dbf’ size 200m
sql> autoextend on next 10m maxsize 500m;


9.change the size fo data files manually
sql> alter database datafile ‘c:\oracle\oradata\app_data.dbf’ resize 200m;


10.Moving data files: alter tablespace
sql> alter tablespace app_data rename datafile ‘c:\oracle\oradata\app_data.dbf’
sql> to ‘c:\oracle\app_data.dbf’;


11.moving data files:alter database
sql> alter database rename file ‘c:\oracle\oradata\app_data.dbf’
sql> to ‘c:\oracle\app_data.dbf’;


第三章:表


1.create a table
sql> create table table_name (column datatype,column datatype]….)
sql> tablespace tablespace_name [pctfree integer] [pctused integer]
sql> [initrans integer] [maxtrans integer]
sql> storage(initial 200k next 200k pctincrease 0 maxextents 50)
sql> [logging|nologging] [cache|nocache]


2.copy an existing table
sql> create table table_name [logging|nologging] as subquery


3.create temporary table
sql> create global temporary table xay_temp as select * from xay;
on commit preserve rows/on commit delete rows


4.pctfree = (average row size – initial row size) *100 /average row size
pctused = 100-pctfree- (average row size*100/available data space)


5.change storage and block utilization parameter
sql> alter table table_name pctfree=30 pctused=50 storage(next 500k
sql> minextents 2 maxextents 100);


6.manually allocating extents
sql> alter table table_name allocate extent(size 500k datafile ‘c:/oracle/data.dbf’);


7.move tablespace
sql> alter table employee move tablespace users;


8.deallocate of unused space
sql> alter table table_name deallocate unused [keep integer]


9.truncate a table
sql> truncate table table_name;


10.drop a table
sql> drop table table_name [cascade constraints];


11.drop a column
sql> alter table table_name drop column comments cascade constraints checkpoint 1000;
alter table table_name drop columns continue;


12.mark a column as unused
sql> alter table table_name set unused column comments cascade constraints;
alter table table_name drop unused columns checkpoint 1000;
alter table orders drop columns continue checkpoint 1000
data_dictionary : dba_unused_col_tabs


第四章:索引


1.creating function-based indexes
sql> create index summit.item_quantity on summit.item(quantity-quantity_shipped);


2.create a B-tree index
sql> create [unique] index index_name on table_name(column,.. asc/desc) tablespace
sql> tablespace_name [pctfree integer] [initrans integer] [maxtrans integer]
sql> [logging | nologging] [nosort] storage(initial 200k next 200k pctincrease 0
sql> maxextents 50);


3.pctfree(index)=(maximum number of rows-initial number of rows)*100/maximum number of rows


4.creating reverse key indexes
sql> create unique index xay_id on xay(a) reverse pctfree 30 storage(initial 200k
sql> next 200k pctincrease 0 maxextents 50) tablespace indx;


5.create bitmap index
sql> create bitmap index xay_id on xay(a) pctfree 30 storage( initial 200k next 200k
sql> pctincrease 0 maxextents 50) tablespace indx;


6.change storage parameter of index
sql> alter index xay_id storage (next 400k maxextents 100);


7.allocating index space
sql> alter index xay_id allocate extent(size 200k datafile ‘c:/oracle/index.dbf’);


8.alter index xay_id deallocate unused;


第五章:约束


1.define constraints as immediate or deferred
sql> alter session set constraint[s] = immediate/deferred/default;
set constraint[s] constraint_name/all immediate/deferred;


2. sql> drop table table_name cascade constraints
sql> drop tablespace tablespace_name including contents cascade constraints


3. define constraints while create a table
sql> create table xay(id number(7) constraint xay_id primary key deferrable
sql> using index storage(initial 100k next 100k) tablespace indx);
primary key/unique/references table(column)/check


4.enable constraints
sql> alter table xay enable novalidate constraint xay_id;


5.enable constraints
sql> alter table xay enable validate constraint xay_id;


第六章:LOAD数据


1.loading data using direct_load insert
sql> insert /*+append */ into emp nologging
sql> select * from emp_old;


2.parallel direct-load insert
sql> alter session enable parallel dml;
sql> insert /*+parallel(emp,2) */ into emp nologging
sql> select * from emp_old;


3.using sql*loader
sql> sqlldr scott/tiger \
sql> control = ulcase6.ctl \
sql> log = ulcase6.log direct=true


第七章:reorganizing data


1.using expoty
$exp scott/tiger tables(dept,emp) file=c:\emp.dmp log=exp.log compress=n direct=y


2.using import
$imp scott/tiger tables(dept,emp) file=emp.dmp log=imp.log ignore=y


3.transporting a tablespace
sql>alter tablespace sales_ts read only;
$exp sys/.. file=xay.dmp transport_tablespace=y tablespace=sales_ts
triggers=n constraints=n
$copy datafile
$imp sys/.. file=xay.dmp transport_tablespace=y datafiles=(/disk1/sles01.dbf,/disk2
/sles02.dbf)
sql> alter tablespace sales_ts read write;


4.checking transport set
sql> DBMS_tts.transport_set_check(ts_list =>’sales_ts’ ..,incl_constraints=>true);
在表transport_set_violations 中查看
sql> dbms_tts.isselfcontained 为true 是, 表示自包含


第八章: managing password security and resources


1.controlling account lock and password
sql> alter user juncky identified by oracle account unlock;


2.user_provided password function
sql> function_name(userid in varchar2(30),password in varchar2(30),
old_password in varchar2(30)) return boolean


3.create a profile : password setting
sql> create profile grace_5 limit failed_login_attempts 3
sql> password_lock_time unlimited password_life_time 30
sql>password_reuse_time 30 password_verify_function verify_function
sql> password_grace_time 5;


4.altering a profile
sql> alter profile default failed_login_attempts 3
sql> password_life_time 60 password_grace_time 10;


5.drop a profile
sql> drop profile grace_5 [cascade];


6.create a profile : resource limit
sql> create profile developer_prof limit sessions_per_user 2
sql> cpu_per_session 10000 idle_time 60 connect_time 480;


7. view => resource_cost : alter resource cost
dba_Users,dba_profiles


8. enable resource limits
sql> alter system set resource_limit=true;


第九章:Managing users


1.create a user: database authentication
sql> create user juncky identified by oracle default tablespace users
sql> temporary tablespace temp quota 10m on data password expire
sql> [account lock|unlock] [profile profilename|default];


2.change user quota on tablespace
sql> alter user juncky quota 0 on users;


3.drop a user
sql> drop user juncky [cascade];


4. monitor user
view: dba_users , dba_ts_quotas


第十章:managing privileges


1.system privileges: view => system_privilege_map ,dba_sys_privs,session_privs


2.grant system privilege
sql> grant create session,create table to managers;
sql> grant create session to scott with admin option;
with admin option can grant or revoke privilege from any user or role;


3.sysdba and sysoper privileges:
sysoper: startup,shutdown,alter database open|mount,alter database backup controlfile,
alter tablespace begin/end backup,recover database
alter database archivelog,restricted session
sysdba: sysoper privileges with admin option,create database,recover database until


4.password file members: view:=> v$pwfile_users


5.O7_dictionary_accessibility =true restriction access to view or tables in other schema


6.revoke system privilege
sql> revoke create table from karen;
sql> revoke create session from scott;


7.grant object privilege
sql> grant execute on dbms_pipe to public;
sql> grant update(first_name,salary) on employee to karen with grant option;


8.display object privilege : view => dba_tab_privs, dba_col_privs


9.revoke object privilege
sql> revoke execute on dbms_pipe from scott [cascade constraints];


10.audit record view :=> sys.aud$


11. protecting the audit trail
sql> audit delete on sys.aud$ by access;


12.statement auditing
sql> audit user;


13.privilege auditing
sql> audit select any table by summit by access;


14.schema object auditing
sql> audit lock on summit.employee by access whenever successful;


15.view audit option : view=> all_def_audit_opts,dba_stmt_audit_opts,dba_priv_audit_opts,dba_obj_audit_opts


16.view audit result: view=> dba_audit_trail,dba_audit_exists,dba_audit_object,dba_audit_session,dba_audit_statement


第十一章: manager role


1.create roles
sql> create role sales_clerk;
sql> create role hr_clerk identified by bonus;
sql> create role hr_manager identified externally;


2.modify role
sql> alter role sales_clerk identified by commission;
sql> alter role hr_clerk identified externally;
sql> alter role hr_manager not identified;


3.assigning roles
sql> grant sales_clerk to scott;
sql> grant hr_clerk to hr_manager;
sql> grant hr_manager to scott with admin option;


4.establish default role
sql> alter user scott default role hr_clerk,sales_clerk;
sql> alter user scott default role all;
sql> alter user scott default role all except hr_clerk;
sql> alter user scott default role none;


5.enable and disable roles
sql> set role hr_clerk;
sql> set role sales_clerk identified by commission;
sql> set role all except sales_clerk;
sql> set role none;


6.remove role from user
sql> revoke sales_clerk from scott;
sql> revoke hr_manager from public;


7.remove role
sql> drop role hr_manager;


8.display role information
view: =>dba_roles,dba_role_privs,role_role_privs,dba_sys_privs,role_sys_privs,role_tab_privs,session_roles


第十二章: BACKUP and RECOVERY


1. v$sga,v$instance,v$process,v$bgprocess,v$database,v$datafile,v$sgastat


2. Rman need set dbwr_IO_slaves or backup_tape_IO_slaves and large_pool_size


3. Monitoring Parallel Rollback
> v$fast_start_servers , v$fast_start_transactions


4.perform a closed database backup (noarchivelog)
> shutdown immediate
> cp files /backup/
> startup


5.restore to a different location
> connect system/manager as sysdba
> startup mount
> alter database rename file ‘/disk1/../user.dbf’ to ‘/disk2/../user.dbf’;
> alter database open;


6.recover syntax
–recover a mounted database
>recover database;
>recover datafile ‘/disk1/data/df2.dbf’;
>alter database recover database;
–recover an opened database
>recover tablespace user_data;
>recover datafile 2;
>alter database recover datafile 2;


7.how to apply redo log files automatically
>set autorecovery on
>recover automatic datafile 4;


8.complete recovery:
–method 1(mounted databae)
>copy c:\backup\user.dbf c:\oradata\user.dbf
>startup mount
>recover datafile ‘c:\oradata\user.dbf;
>alter database open;
–method 2(opened database,initially opened,not system or rollback datafile)
>copy c:\backup\user.dbf c:\oradata\user.dbf (alter tablespace offline)
>recover datafile ‘c:\oradata\user.dbf’ or
>recover tablespace user_data;
>alter database datafile ‘c:\oradata\user.dbf’ online or
>alter tablespace user_data online;
–method 3(opened database,initially closed not system or rollback datafile)
>startup mount
>alter database datafile ‘c:\oradata\user.dbf’ offline;
>alter database open
>copy c:\backup\user.dbf d:\oradata\user.dbf
>alter database rename file ‘c:\oradata\user.dbf’ to ‘d:\oradata\user.dbf’
>recover datafile ‘e:\oradata\user.dbf’ or recover tablespace user_data;
>alter tablespace user_data online;
–method 4(loss of data file with no backup and have all archive log)
>alter tablespace user_data offline immediate;
>alter database create datafile ‘d:\oradata\user.dbf’ as ‘c:\oradata\user.dbf”
>recover tablespace user_data;
>alter tablespace user_data online
5.perform an open database backup
> alter tablespace user_data begin backup;
> copy files /backup/
> alter database datafile ‘/c:/../data.dbf’ end backup;
> alter system switch logfile;
6.backup a control file
> alter database backup controlfile to ‘control1.bkp’;
> alter database backup controlfile to trace;
7.recovery (noarchivelog mode)
> shutdown abort
> cp files
> startup
8.recovery of file in backup mode
>alter database datafile 2 end backup;


9.clearing redo log file
>alter database clear unarchived logfile group 1;
>alter database clear unarchived logfile group 1 unrecoverable datafile;


10.redo log recovery
>alter database add logfile group 3 ‘c:\oradata\redo03.log’ size 1000k;
>alter database drop logfile group 1;
>alter database open;
or >cp c:\oradata\redo02.log’ c:\oradata\redo01.log
>alter database clear logfile ‘c:\oradata\log01.log’;


Oracle 的入门心得

oracle的体系太庞大了,对于初学者来说,难免会有些无从下手的感觉,什么都想学,结果什么都学不好,所以把学习经验共享一下,希望让刚刚入门的人对oracle有一个总体的认识,少走一些弯路。
 
一、定位
oracle分两大块,一块是开发,一块是管理。开发主要是写写存储过程、触发器什么的,还有就是用Oracle的Develop工具做form。有点类似于程序员,需要有较强的逻辑思维和创造能力,个人觉得会比较辛苦,是青春饭J;管理则需要对oracle数据库的原理有深刻的认识,有全局操纵的能力和紧密的思维,责任较大,因为一个小的失误就会down掉整个数据库,相对前者来说,后者更看重经验。
 
因为数据库管理的责任重大,很少公司愿意请一个刚刚接触oracle的人去管理数据库。对于刚刚毕业的年轻人来说,可以先选择做开发,有一定经验后转型,去做数据库的管理。当然,这个还是要看人个的实际情况来定。


二、学习方法 
我的方法很简单,就是:看书、思考、写笔记、做实验、再思考、再写笔记
 
     看完理论的东西,自己静下心来想想,多问自己几个为什么,然后把所学和所想的知识点做个笔记;在想不通或有疑问的时候,就做做实验,想想怎么会这样,同样的,把实验的结果记下来。思考和做实验是为了深入的了解这个知识点。而做笔记的过程,也是理清自己思路的过程。
 
     学习的过程是使一个问题由模糊到清晰,再由清晰到模糊的过程。而每次的改变都代表着你又学到了一个新的知识点。
 
     学习的过程也是从点到线,从线到网,从网到面的过程。当点变成线的时候,你会有总豁然开朗的感觉。当网到面的时候,你就是高手了
 
     很多网友,特别是初学的人,一碰到问题就拿到论坛上来问,在问前,你有没有查过书,自己有没有研究过,有没有搜索一下论坛?这就叫思维惰性。由别人来回答你的问题,会让你在短时间内不费劲地弄懂这个知识点,然而通过自己的努力去研究它,不但会更深入的了解这个知识点,更重要的是在研究的过程会提高你解决问题和分析问题的能力。总的来说,没有钻研的学习态度,不管学什么东西,都不会成功的。
 
     当然,初学的人很多时候是因为遇到问题时,无从下手,也不知道去哪里找资料,才会到论坛上提问题的。但我认为,在提问的时候,是不是可以问别人是如何分析这个问题?从哪里可以找到相关的资料?而不是这个问题的答案是什么?授人以鱼不如授人以渔。


   下面我讲下我处理问题的过程


   首先要知道oracle的官方网站:www.oracle.com 这里有oracle的各种版本的数据库、应用工具和权威的官方文档。其次,还要知道http://metalink.oracle.com/这里是买了oracle服务或是oracle的合作伙伴才可以进去的,里面有很多权威的解决方案和补丁。然后就是一些著名网站:asktom.oracle.com www.orafaq.net, www.dbazine.com。这里有很多经验之谈。


    遇到问题了。如果是概念上的问题,第一时间可以找tahiti.oracle.com,这里会给你最详细的解释。如果在运行的过程中出了什么错误。可以去metalink看看。如果是想知道事务的处理的经验之谈。可以去asktom。当然。这里只是相对而言。


三、oracle的体系
oracle的体系很庞大,要学习它,首先要了解oracle的框架。在这里,简要的讲一下oracle的架构,让初学者对oracle有一个整体的认识。
 
1、物理结构(由控制文件、数据文件、重做日志文件、参数文件、归档文件、密码文件组成)
控制文件:包含维护和验证数据库完整性的必要信息、例如,控制文件用于识别数据文件和重做日志文件,一个数据库至少需要一个控制文件
数据文件:存储数据的文件
重做日志文件:含对数据库所做的更改记录,这样万一出现故障可以启用数据恢复。一个数据库至少需要两个重做日志文件
参数文件:定义Oracle 例程的特性,例如它包含调整SGA 中一些内存结构大小的参数
归档文件:是重做日志文件的脱机副本,这些副本可能对于从介质失败中进行恢复很必要。
密码文件:认证哪些用户有权限启动和关闭Oracle例程
 
2、逻辑结构(表空间、段、区、块)
表空间:是数据库中的基本逻辑结构,一系列数据文件的集合。
段:是对象在数据库中占用的空间
区:是为数据一次性预留的一个较大的存储空间
块:ORACLE最基本的存储单位,在建立数据库的时候指定
 
3、内存分配(SGA和PGA)
SGA:是用于存储数据库信息的内存区,该信息为数据库进程所共享。它包含Oracle 服务器的数据和控制信息, 它是在Oracle 服务器所驻留的计算机的实际内存中得以分配,如果实际内存不够再往虚拟内存中写。
PGA:包含单个服务器进程或单个后台进程的数据和控制信息,与几个进程共享的SGA 正相反PGA 是只被一个进程使用的区域,PGA 在创建进程时分配在终止进程时回收
 
4、后台进程(数据写进程、日志写进程、系统监控、进程监控、检查点进程、归档进程、服务进程、用户进程)
数据写进程:负责将更改的数据从数据库缓冲区高速缓存写入数据文件
日志写进程:将重做日志缓冲区中的更改写入在线重做日志文件
系统监控:检查数据库的一致性如有必要还会在数据库打开时启动数据库的恢复
进程监控:负责在一个Oracle 进程失败时清理资源
检查点进程:负责在每当缓冲区高速缓存中的更改永久地记录在数据库中时,更新控制文件和数据文件中的数据库状态信息。
归档进程:在每次日志切换时把已满的日志组进行备份或归档
服务进程:用户进程服务。
用户进程:在客户端,负责将用户的SQL 语句传递给服务进程,并从服务器段拿回查询数据。
 
5、oracle例程:Oracle 例程由SGA 内存结构和用于管理数据库的后台进程组成。例程一次只能打开和使用一个数据库。
 
6、SCN(System Change Number):系统改变号,一个由系统内部维护的序列号。当系统需要更新的时候自动增加,他是系统中维持数据的一致性和顺序恢复的重要标志。


四、深入学习 
管理:可以考OCP证书,对oracle先有一个系统的学习,然后看Oracle Concepts、oracle online document,对oracle的原理会有更深入的了解,同时可以开始进行一些专题的研究如:RMAN、RAS、STATSPACT、DATAGUARD、TUNING、BACKUP&RECOVER等等。
 
开发:对于想做Oracle开发的,在了解完Oracle基本的体系结构之后,可以重点关注PL/SQL及Oracle的开发工具这一部分。 PL/SQL主要是包括怎么写SQL语句,怎么使用Oracle本身的函数,怎么写存储过程、存储函数、触发器等。 Oracle的开发工具主要就是Oracle自己的Developer Suite(Oracle Forms Developer and Reports Developer这些),学会如何熟练使用这些工具。



介绍几本oracle入门的好书


oracle官方文档:《concept》上面讲了oracle的体系和概念,很适合初学者看。


OCP的教学用书,也就是STUDY GUIDE(SG)。
Oracle8i 备份恢复手册
Oracle8高级管理与优化
Oracle8i PLSQL程序设计
Oracle8数据库管理员手册
以上书本都是机械工业出版社出版。
 
介绍几个网站
http://tahiti.oracle.com oracle的官方文档
现在http://www.oracle.com.cn/onlinedoc/index.htm也有官方文档,速度奇快
http://metalink.oracle.com/ oracle的技术支持网站。需要购买Oracle服务才能有一个帐号,才能登陆,有大量的Knowledge Base,大量问题解决经验。
http://www.oracle.com oracle的官方网站,可以在这里down oracle的软件、官方文档和获得最新的消息
http://www.dbazine.com/ Oracle的杂志
http://asktom.oracle.com 
http://www.orafaq.net/ 
http://www.ixora.com.au/
http://www.oracle-base.com
http://www.dba-oracle.com/oracle_links.htm


Joost - 網路電視不打烊

Joost - 網路電視不打烊

軟體:Joost(版本:0.10.3)
類別:網路多媒體
性質:Freeware(9.7 M)

【編輯/王國淵】

近年來,由於網路建設發展迅速,使用者們家中所使用的網路已經由原本的數據機撥接、ADSL 、Cabel 進步到目前最新的 FTTB 了,隨著所使用的頻寬持續成長,許多的多媒體應用就能夠被透過網路來實現,而網路電視就是很好的一個例子。但是,要欣賞網路電視前,你當然要有一套合適的播放軟體囉,Joost 就是一款值得你一試選擇。

Joost 是一款免費的網路電視軟體,使用者只要安裝之後,就可以透過它內建的選台器功能,選擇觀看數百個線上頻道,這麼多頻道數,可是沒有一家第四台業者比得上的喔。舉凡新聞、體育、電影、卡通,都可能在這包羅萬象的網路電視上找到,讓你想看什麼、就看什麼,再也不用擔心電視上撥的都是老舊又重複的節目或影片了。

網路是無遠弗屆的,也因此,網路電視台的種類也是千奇百怪。而使用 Joost 的好處就是,它內建的節目頻道功能會不斷地擴充,讓你每天都有不同的選擇,永遠不會嫌膩。如果想要看看多元化的節目,甚至是想聽聽外語新聞,那麼 Joost 可是一個免費又方便的選擇喔!


下載:http://www.joost.com/getjoost.html

Concurrency Testing in JAVA Applications


Testing and optimizing Java code to handle concurrent activities is difficult without test automation. Even with test automation, being able to correlate the test activity from the client side to observations of thread, memory, object and database connection use on the server side is difficult at best. In this article, Frank Cohen describes methods for concurrency testing in Java applications and shows a new technique to correlate what a Java application server is doing on the server side while a load test automation tool drives a test on the client side.


Introduction


This article picks up where Brian Goetz’s article Testing Concurrent Programs ends. Goetz describes the need to use concurrency mechanisms in Java programs and gives tips to test concurrency mechanisms. For instance, Goetz article introduces a technique to avoid Java’s inherent concurrency problems – mutable and static objects – by shielding the mutable objects by wrapping them in immutable objects. Goetz says “all this sounds like a lot of work, and it is.” At this point in your concurrency testing, an investigation into test automation tools may help your effort.


Concurrency testing is well known, but hard to do


Most IT managers agree that concurrency testing is the appropriate way to determine many performance bottlenecks, resource contention issues, and service interruptions. However, in my experience few ever do concurrency testing because the available test tools are not satisfactory.


Consider the minimum functions a test automation tool needs to support to expose concurrency issues:



  • Unit test of a specific function.

  • Variations on the input data the unit test sends to a function.

  • Business flows through a system function using sequences of functional unit tests.

  • Operating the business flows in concurrently running threads. Additionally, in large-scale concurrency tests the flows may need to be operated across multiple test machines.

  • Variations in the mix of business flows in the concurrent running threads. For instance, the first mix may test at 20% of the business flows operating a create-a-new-user flow while 80% operate a sign-in flow and a second test reverses the mix to 80% create and 20% sign-in.

  • Variations in the test parameters based on usage factors. For instance, changing the test parameters based on the time-of-day or end-of-quarter.

There is no concurrency testing heaven here. You can never create a deterministic test to uncover a non-deterministic concurrency issue. The goal instead is to get close to the problem and to get lucky. Operating a concurrency test with as many of the above operational parameters might not guarantee that the test will surface concurrency issues but it will work the odds in your favor to run across the right conditions to trigger a failure.


From Unit Tests to Concurrency Tests


There are some things you should expect in today’s test automation tools. For instance, many test automation tools are now using unit tests in ways that aid in surfacing concurrency problems. Each unit test operates a particular method or class. Stringing unit tests together into a sequence forms a functional test of a business flow.



You should also expect the test tool to provide libraries that embellish unit tests to use protocol handlers to speak the native protocols of the application or service under test, including HTTP, SOAP, Ajax, REST, Email, and custom network protocols. Additionally, you should expect the test tool embellishes functional unit tests with a Data Production Library (DPL) to provide test data as input to the unit test and data to validate the service or class response.


It is reasonable to find a test automation framework wrapping unit tests into functional tests that may be operated concurrently to identify concurrency issues in an application. I coined the term TestScenario in my open-source test tool (TestMaker) to name this wrapper.



A TestScenario operates the unit tests as a functional test by operating a sequence of unit tests once, as a load test by operating sequences of the unit tests in concurrently running threads, and as service monitors by running the unit tests periodically. The TestScenario gives us the flexibility to try multiple sequences of business flows concurrently.


The TestScenario defines which TestNodes will operate the test. I coined the term TestNode for the distributed test environment operating on a remote machine or a rack of machines in a QA lab. When the TestScenario begins it moves the unit test and any other resource files to the defined TestNodes. The unit test speaks the native protocols of the application under test. The TestNodes provide the unit test data through the DPL, operates the unit tests, and logs the results.


As the TestScenario operates the test, a monitor watches the application host and logs usage and resource statistics. The test tools then correlate the monitored data to the unit test operation to identify performance bottlenecks and available optimizations.


The key to using a test automation platform for concurrency testing is to identify what is happening on the server side. I find it valuable to observe at least the following:



  • Java Virtual Machine statistics (heap, memory, thread, object instantiations, and garbage collection)

  • Service call statistics (time to serve request, external service and database query times, properties settings, and service interface request values and schema)

  • Subsystem statistics (if the system uses a database then I want to see the transaction logs, input parameters, and query operations)

Eventually all of these observations are going to come with the manual expense of the time to correlate the observed data to what the test automation tool is operating on the client-side. Recently I began finding agent and agentless tools to monitor server-side activity in a way that can be snapshot correlated to the test operation. One such agent-based tool is Glassbox.


Tools like Glassbox watch an application server receive requests, operate objects and EJBs, communicate with databases, and interact with message buses. These tools provide a service interface that test environments can use to automate the correlations. For instance, Glassbox uses the Java Management Extensions (JMX) interface to communicate performance observations.


Glassbox installs an agent named the Glassbox Inspector to monitor traffic through the Java virtual machine and send data to an analysis application called Glassbox Troubleshooter. The system diagnoses problems by constructing a virtual application model describing the application components and how they connect to each other using aspects. Using a singly directed graph the system represents a component or resource within the application and at each edge a usage relationship. Using aspects Glassbox discovers the components dynamically as the application runs and constructs a model automatically.


The model allows Glassbox to go beyond simple correlation analysis to trace causality through the system. For example, a correlation analysis might reveal that a slow operation occurred at the same time the database was running slowly overall, suggesting a database tuning problem. Because the virtual application model allows walking the usage relationships in both directions, the model reveals that the slow database was in fact caused by a long-running report query and identifies that as the cause.


I recently used Glassbox to observe the Sun Blueprint’s Java Petstore and correlated the observations to a load tests. Some things became clear. While I had thought the browser-to-server communication would lead to performance bottlenecks at parsing the incoming requests and marshalling the responses, we found that more often communication between the EJBs and the database caused the biggest hotspots in performance.



I was able to operate a load test of the PetStore and identify the slowest performing functions by correlating the load test to an alert for slow database operations.


Integration gives correlation capability


Some of the test automation tools and server-side monitors are going further to provide correlations between the test activity and observations at the Thread and object level on what happens in the backend.


For instance, most test automation tools produce a Scalability Index chart that shows Transaction Per Second (TPS) ratings for increasing levels of concurrent requests to a service. The following chart shows the TPS values for two use cases.



In the above chart the red bar shows the service operating at 0.17 TPS when the test use case operates for 10 minutes at 20 concurrent virtual users (threads.) The blue bar shows the service operating at 0.08 TPS at 40 concurrent virtual users. I modified the chart to also show a Performance Alert when Glassbox observes an operation that falls outside of a normal time to complete the operation. (Glassbox uses properties files to set the parameters for normal time operation.) Clicking on the Performance Alert button brings you directly to the Glassbox Inspector for an explanation of the problem and details needed to understand and solve the performance problem.


While concurrency testing is often arduous, difficult to stage, and lengthy there are a new crop of test automation tools that make development and operation of concurrency tests relatively easy. Combining these test tools with monitoring software enables correlations of load testing activity on the front-end to thread, object, and service activity to surface concurrency issues. This is not guaranteed to surface concurrency issues but it will work the odds in your favor to run across the right conditions to trigger a failure and remediate a solution.


Resources


Brian Goetz article Testing Concurrent Programs


PushToTest TestMaker – open-source test automation


Glassbox – open-source troubleshooting agent for Java applications


Toward a Benchmark for Multi-Threaded Testing Tools