本文整理汇总了PHP中AmazonS3::initiate_multipart_upload方法的典型用法代码示例。如果您正苦于以下问题:PHP AmazonS3::initiate_multipart_upload方法的具体用法?PHP AmazonS3::initiate_multipart_upload怎么用?PHP AmazonS3::initiate_multipart_upload使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AmazonS3
的用法示例。
在下文中一共展示了AmazonS3::initiate_multipart_upload方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的PHP代码示例。
示例1: send
//.........这里部分代码省略.........
pb_backupbuddy::status( 'details', 'Stash: Zip file. Detecting backup type if possible.' );
$serial = pb_backupbuddy::$classes['core']->get_serial_from_file( $file );
if ( isset( pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'] ) ) {
pb_backupbuddy::status( 'details', 'Stash: Detected backup type as `' . pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'] . '` via integrity check data.' );
$meta_array['backup_type'] = pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'];
} else {
if ( stristr( $file, '-db-' ) !== false ) {
pb_backupbuddy::status( 'details', 'Stash: Detected backup type as `db` via filename.' );
$meta_array['backup_type'] = 'db';
} elseif ( stristr( $file, '-full-' ) !== false ) {
pb_backupbuddy::status( 'details', 'Stash: Detected backup type as `full` via filename.' );
$meta_array['backup_type'] = 'full';
} else {
pb_backupbuddy::status( 'details', 'Stash: Could not detect backup type via integrity details nor filename.' );
}
}
}
*/
// Create S3 instance.
pb_backupbuddy::status('details', 'Creating Stash S3 instance.');
$s3 = new AmazonS3($upload_data['credentials']);
// the key, secret, token
if ($disable_ssl === true) {
@$s3->disable_ssl(true);
}
pb_backupbuddy::status('details', 'Stash S3 instance created.');
// Handle chunking of file into a multipart upload (if applicable).
$file_size = filesize($file);
if ($max_chunk_size >= 5 && $file_size / 1024 / 1024 > $max_chunk_size) {
// minimum chunk size is 5mb. Anything under 5mb we will not chunk.
pb_backupbuddy::status('details', 'Stash file size of ' . $file_size / 1024 / 1024 . 'MB exceeds max chunk size of ' . $max_chunk_size . 'MB set in settings for sending file as multipart upload.');
// Initiate multipart upload with S3.
pb_backupbuddy::status('details', 'Initiating Stash multipart upload.');
$response = $s3->initiate_multipart_upload($upload_data['bucket'], $upload_data['object'], array('encryption' => 'AES256'));
if (!$response->isOK()) {
$this_error = 'Stash was unable to initiate multipart upload.';
$pb_backupbuddy_destination_errors[] = $this_error;
pb_backupbuddy::status('error', $this_error);
return false;
} else {
$upload_id = (string) $response->body->UploadId;
pb_backupbuddy::status('details', 'Stash initiated multipart upload with ID `' . $upload_id . '`.');
}
// Get chunk parts for multipart transfer.
pb_backupbuddy::status('details', 'Stash getting multipart counts.');
$parts = $s3->get_multipart_counts($file_size, $max_chunk_size * 1024 * 1024);
// Size of chunks expected to be in bytes.
$multipart_destination_settings = $settings;
$multipart_destination_settings['_multipart_id'] = $upload_id;
$multipart_destination_settings['_multipart_partnumber'] = 0;
$multipart_destination_settings['_multipart_file'] = $file;
$multipart_destination_settings['_multipart_counts'] = $parts;
$multipart_destination_settings['_multipart_upload_data'] = $upload_data;
$multipart_destination_settings['_multipart_backup_type_dir'] = $backup_type_dir;
pb_backupbuddy::status('details', 'Stash multipart settings to pass:' . print_r($multipart_destination_settings, true));
unset($files[$file_id]);
// Remove this file from queue of files to send as it is now passed off to be handled in multipart upload.
// Schedule to process the parts.
pb_backupbuddy::status('details', 'Stash scheduling send of next part(s).');
wp_schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($multipart_destination_settings, $files, 'multipart', false));
spawn_cron(time() + 150);
// Adds > 60 seconds to get around once per minute cron running limit.
update_option('_transient_doing_cron', 0);
// Prevent cron-blocking for next item.
pb_backupbuddy::status('details', 'Stash scheduled send of next part(s). Done for this cycle.');
return array($upload_id, 'Starting send of ' . count($multipart_destination_settings['_multipart_counts']) . ' parts.');
示例2: send
//.........这里部分代码省略.........
if (!$response->isOK()) {
$this_error = 'Stash request for upload credentials failed.';
$pb_backupbuddy_destination_errors[] = $this_error;
pb_backupbuddy::status('error', $this_error);
return false;
}
if (!($upload_data = json_decode($response->body, true))) {
$this_error = 'Stash API did not give a valid JSON response.';
$pb_backupbuddy_destination_errors[] = $this_error;
pb_backupbuddy::status('error', $this_error);
return false;
}
if (isset($upload_data['error'])) {
$this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
$pb_backupbuddy_destination_errors[] = $this_error;
pb_backupbuddy::status('error', $this_error);
return false;
}
// Create S3 instance.
pb_backupbuddy::status('details', 'Creating Stash S3 instance.');
$s3 = new AmazonS3($upload_data['credentials']);
// the key, secret, token
if ($disable_ssl === true) {
@$s3->disable_ssl(true);
}
pb_backupbuddy::status('details', 'Stash S3 instance created.');
// Handle chunking of file into a multipart upload (if applicable).
$file_size = filesize($file);
if ($max_chunk_size >= self::MINIMUM_CHUNK_SIZE && $file_size / 1024 / 1024 > $max_chunk_size) {
// minimum chunk size is 5mb. Anything under 5mb we will not chunk.
pb_backupbuddy::status('details', 'Stash file size of ' . pb_backupbuddy::$format->file_size($file_size) . ' exceeds max chunk size of ' . $max_chunk_size . 'MB set in settings for sending file as multipart upload.');
// Initiate multipart upload with S3.
pb_backupbuddy::status('details', 'Initiating Stash multipart upload.');
$response = $s3->initiate_multipart_upload($upload_data['bucket'], $upload_data['object'], array('encryption' => 'AES256'));
if (!$response->isOK()) {
$this_error = 'Stash was unable to initiate multipart upload.';
$pb_backupbuddy_destination_errors[] = $this_error;
pb_backupbuddy::status('error', $this_error);
return false;
} else {
$upload_id = (string) $response->body->UploadId;
pb_backupbuddy::status('details', 'Stash initiated multipart upload with ID `' . $upload_id . '`.');
}
// Get chunk parts for multipart transfer.
pb_backupbuddy::status('details', 'Stash getting multipart counts.');
$parts = $s3->get_multipart_counts($file_size, $max_chunk_size * 1024 * 1024);
// Size of chunks expected to be in bytes.
$multipart_destination_settings = $settings;
$multipart_destination_settings['_multipart_id'] = $upload_id;
$multipart_destination_settings['_multipart_partnumber'] = 0;
$multipart_destination_settings['_multipart_file'] = $file;
$multipart_destination_settings['_multipart_counts'] = $parts;
$multipart_destination_settings['_multipart_upload_data'] = $upload_data;
$multipart_destination_settings['_multipart_backup_type_dir'] = $backup_type_dir;
pb_backupbuddy::status('details', 'Stash multipart settings to pass:' . print_r($multipart_destination_settings, true));
unset($files[$file_id]);
// Remove this file from queue of files to send as it is now passed off to be handled in multipart upload.
// Schedule to process the parts.
pb_backupbuddy::status('details', 'Stash scheduling send of next part(s).');
backupbuddy_core::schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($multipart_destination_settings, $files, $send_id));
spawn_cron(time() + 150);
// Adds > 60 seconds to get around once per minute cron running limit.
update_option('_transient_doing_cron', 0);
// Prevent cron-blocking for next item.
pb_backupbuddy::status('details', 'Stash scheduled send of next part(s). Done for this cycle.');
return array($upload_id, 'Starting send of ' . count($multipart_destination_settings['_multipart_counts']) . ' parts.');
示例3: array
//.........这里部分代码省略.........
$result_arr = array();
$result_arr['status'] = 'completed';
$result_arr['nextFunc'] = 'amazons3_backup_over';
$result_arr['s3Args'] = $tempArgs;
$result_arr['current_file_num'] = $current_file_num;
$result_arr['dont_retrace'] = true;
$task_result['task_results'][$historyID]['amazons3'][$current_file_num - 1] = basename($backup_file);
$task_result['amazons3'][$current_file_num - 1] = basename($backup_file);
if ($current_file_num >= $backup_files_count) {
unset($task_result['task_results'][$historyID]['server']);
@unlink($backup_file);
} else {
//to continue zip split parts
$resArray['status'] = 'partiallyCompleted';
$chunkResult = array();
$chunkResult['partsArray'] = array();
$chunkResult['nextPart'] = 1;
$chunkResult['upload_id'] = 'start';
$result_arr['response_data'] = $chunkResult;
$result_arr['nextFunc'] = 'amazons3_backup';
$result_arr['status'] = 'partiallyCompleted';
$result_arr['start_new_backup'] = true;
@unlink($backup_file);
}
$this->statusLog($this->hisID, array('stage' => 's3MultiCall', 'status' => 'completed', 'statusMsg' => 'nextCall', 'nextFunc' => 'amazons3_backup', 'task_result' => $task_result, 'responseParams' => $result_arr));
return $resArray;
} else {
return array('error' => 'Failed to upload to Amazon S3.');
}
}
if ($upload_id == 'start') {
echo "initiating multiCall upload";
//initiate the multiPartUpload to get the uploadID from its response
$response = $s3->initiate_multipart_upload($as3_bucket, $as3_file);
//createMultipartUpload
//convert the response into an array
$response_array = $cfu_obj->convert_response_to_array($response);
//get the uploadID
$upload_id = $response_array['body']['UploadId'];
//storing the uploadID in DB
$backup_settings_values['s3_upload_id'][$historyID] = $upload_id;
$backup_settings_values['backup_file'] = $backup_file;
update_option('iwp_client_multi_backup_temp_values', $backup_settings_values);
}
//get the parts of the big file
$parts = $s3->get_multipart_counts(iwp_mmb_get_file_size($backup_file), $upload_file_block_size);
//1 MB chunks
if ($retrace == 'set') {
$list_parts_response = $s3->list_parts($as3_bucket, $as3_file, $upload_id);
$partsArray = CFUtilities::convert_response_to_array($list_parts_response);
$nextPart = count($partsArray) + 1;
$this->statusLog($this->hisID, array('stage' => 's3MultiCall', 'status' => 'partiallyCompleted', 'statusMsg' => 'retracingValues', 'nextFunc' => 'amazons3_backup', 'task_result' => $task_result, 'responseParams' => $result_arr));
$retrace = 'unset';
}
//this is the main upload loop break it on when the timeLimit is reached
//chunk upload loop
$partsArraySize = count($parts);
$s3ChunkTimeTaken = 0;
$s3ChunkCount = 0;
$reloop = false;
$reloopCount = 0;
$status = '';
do {
$uploadLoopCount = 0;
if ($reloopCount == 0) {
$s3ChunkStartTime = $s3StartTime;