AWS DOCUMENTATION

Downloading files from an Amazon S3 bucket to an Oracle DB instance

  • Adding the AWS S3 Integration option
  • create new option group with S3-INTEGRATION option. (no access to do so )
  • download dump files from s3 to RDS directory
SELECT dbadeeds.dbadeeds_s3_tasks.download_from_s3(
p_bucket_name => 's3-dbbackup',
p_s3_prefix => 'sqlserver/Non_prod_test/',
p_directory_name => 'DATA_PUMP_DIR')
AS TASK_ID FROM DUAL;

3.check the status of transfer


SELECT text FROM table(dbadeeds.rds_file_util.read_text_file('BDUMP','dbtask-.log'));

SELECT text FROM table(dbadeeds.rds_file_util.read_text_file('BDUMP','dbtask-1610467532223-648.log')) ;

4. use DBMS_DATAPUMP to import the data file on the target DB instance

DECLARE

v_hdnl NUMBER;
BEGIN
v_hdnl := DBMS_DATAPUMP.OPEN(
operation => 'IMPORT',
job_mode => 'SCHEMA',
job_name => null);
DBMS_DATAPUMP.ADD_FILE(
handle => v_hdnl,
filename => 'sample_copied.dmp',
directory => 'DATA_PUMP_DIR',
filetype => dbms_datapump.ku$_file_type_dump_file);
DBMS_DATAPUMP.ADD_FILE(
handle => v_hdnl,
filename => 'sample_imp.log',
directory => 'DATA_PUMP_DIR',
filetype => dbms_datapump.ku$_file_type_log_file);
DBMS_DATAPUMP.METADATA_FILTER(v_hdnl,'SCHEMA_EXPR','IN (''SCHEMA_1'')');
DBMS_DATAPUMP.START_JOB(v_hdnl);
END;
/

5. Check the status

--view the import log file

SELECT * FROM TABLE (dbadeeds.rds_file_util.read_text_file(
p_directory => 'DATA_PUMP_DIR',
p_filename => 'sample_imp.log'));

--observe the status of job

SELECT owner_name, job_name, operation, job_mode,DEGREE, state FROM dba_datapump_jobs where state='EXECUTING';

--listing content of DATA_PUMP_DIR

SELECT * FROM TABLE(dbadeeds.rds_file_util.listdir('DATA_PUMP_DIR')) ORDER BY MTIME;

-- Remove the dump files after work is done
EXEC UTL_FILE.FREMOVE('DATA_PUMP_DIR','NAME OF FILE');

Leave a comment