本文整理汇总了C++中ROUND_UP函数的典型用法代码示例。如果您正苦于以下问题:C++ ROUND_UP函数的具体用法?C++ ROUND_UP怎么用?C++ ROUND_UP使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ROUND_UP函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: x_render_char
/* Render a character. */
static int
x_render_char(gx_xfont * xf, gx_xglyph xg, gx_device * dev,
int xo, int yo, gx_color_index color, int required)
{
x_xfont *xxf = (x_xfont *) xf;
char chr = (char)xg;
gs_point wxy;
gs_int_rect bbox;
int x, y, w, h;
int code;
if (dev->dname == gs_x11_device.dname && !((gx_device_X *)dev)->is_buffered) {
gx_device_X *xdev = (gx_device_X *)dev;
code = (*xf->common.procs->char_metrics) (xf, xg, 0, &wxy, &bbox);
if (code < 0)
return code;
/* Buffer text for more efficient X interaction. */
if (xdev->text.item_count == MAX_TEXT_ITEMS ||
xdev->text.char_count == MAX_TEXT_CHARS ||
(IN_TEXT(xdev) &&
(yo != xdev->text.origin.y || color != xdev->fore_color ||
xxf->font->fid != xdev->fid))
) {
DRAW_TEXT(xdev);
xdev->text.item_count = xdev->text.char_count = 0;
}
if (xdev->text.item_count == 0) {
X_SET_FILL_STYLE(xdev, FillSolid);
X_SET_FORE_COLOR(xdev, color);
X_SET_FUNCTION(xdev, GXcopy);
xdev->text.origin.x = xdev->text.x = xo;
xdev->text.origin.y = yo;
xdev->text.items[0].font = xdev->fid = xxf->font->fid;
}
/*
* The following is wrong for rotated text, but it doesn't matter,
* because the next call of x_render_char will have a different Y.
*/
{
int index = xdev->text.item_count;
XTextItem *item = &xdev->text.items[index];
char *pchar = &xdev->text.chars[xdev->text.char_count++];
int delta = xo - xdev->text.x;
*pchar = chr;
if (index > 0 && delta == 0) {
/* Continue the same item. */
item[-1].nchars++;
} else {
/* Start a new item. */
item->chars = pchar;
item->nchars = 1;
item->delta = delta;
if (index > 0)
item->font = None;
xdev->text.item_count++;
}
xdev->text.x = xo + wxy.x;
}
if (xdev->bpixmap != (Pixmap) 0) {
x = xo + bbox.p.x;
y = yo + bbox.p.y;
w = bbox.q.x - bbox.p.x;
h = bbox.q.y - bbox.p.y;
fit_fill(dev, x, y, w, h);
x_update_add(xdev, x, y, w, h);
}
return 0;
} else if (!required)
return -1; /* too hard */
else {
/* Display on an intermediate bitmap, then copy the bits. */
gx_device_X *xdev = xxf->xdev;
int wbm, raster;
int i;
XImage *xim;
Pixmap xpm;
GC fgc;
byte *bits;
dev_proc_copy_mono((*copy_mono)) = dev_proc(dev, copy_mono);
code = (*xf->common.procs->char_metrics) (xf, xg, 0, &wxy, &bbox);
if (code < 0)
return code;
w = bbox.q.x - bbox.p.x;
h = bbox.q.y - bbox.p.y;
wbm = ROUND_UP(w, align_bitmap_mod * 8);
raster = wbm >> 3;
bits = (byte *) gs_malloc(xdev->memory, h, raster, "x_render_char");
if (bits == 0)
return gs_error_limitcheck;
xpm = XCreatePixmap(xdev->dpy, xdev->win, w, h, 1);
fgc = XCreateGC(xdev->dpy, xpm, None, NULL);
XSetForeground(xdev->dpy, fgc, 0);
XFillRectangle(xdev->dpy, xpm, fgc, 0, 0, w, h);
XSetForeground(xdev->dpy, fgc, 1);
XSetFont(xdev->dpy, fgc, xxf->font->fid);
//.........这里部分代码省略.........
示例2: op_x86_res_calc
void op_x86_res_calc(
int blockIdx,
float *ind_arg0, int *ind_arg0_maps,
float *ind_arg1, int *ind_arg1_maps,
float *ind_arg2, int *ind_arg2_maps,
float *ind_arg3, int *ind_arg3_maps,
short *arg0_maps,
short *arg1_maps,
short *arg2_maps,
short *arg3_maps,
short *arg4_maps,
short *arg5_maps,
short *arg6_maps,
short *arg7_maps,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors) {
float arg6_l[4];
float arg7_l[4];
int *ind_arg0_map, ind_arg0_size;
int *ind_arg1_map, ind_arg1_size;
int *ind_arg2_map, ind_arg2_size;
int *ind_arg3_map, ind_arg3_size;
float *ind_arg0_s;
float *ind_arg1_s;
float *ind_arg2_s;
float *ind_arg3_s;
int nelems2, ncolor;
int nelem, offset_b;
char shared[64000];
if (0==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = nelem;
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*4];
ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*4];
ind_arg2_map = ind_arg2_maps + ind_arg_offs[2+blockId*4];
ind_arg3_map = ind_arg3_maps + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(float)*2);
ind_arg1_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(float)*4);
ind_arg2_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(float)*1);
ind_arg3_s = (float *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=0; n<ind_arg0_size; n++)
for (int d=0; d<2; d++)
ind_arg0_s[d+n*2] = ind_arg0[d+ind_arg0_map[n]*2];
for (int n=0; n<ind_arg1_size; n++)
for (int d=0; d<4; d++)
ind_arg1_s[d+n*4] = ind_arg1[d+ind_arg1_map[n]*4];
for (int n=0; n<ind_arg2_size; n++)
for (int d=0; d<1; d++)
ind_arg2_s[d+n*1] = ind_arg2[d+ind_arg2_map[n]*1];
for (int n=0; n<ind_arg3_size; n++)
for (int d=0; d<4; d++)
ind_arg3_s[d+n*4] = ZERO_float;
__syncthreads();
// process set elements
for (int n=0; n<nelems2; n++) {
int col2 = -1;
if (n<nelem) {
//.........这里部分代码省略.........
示例3: ops_par_loop_update_halo_kernel3_minus_2_a
// host stub function
void ops_par_loop_update_halo_kernel3_minus_2_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_arg args[3] = { arg0, arg1, arg2};
ops_timing_realloc(94,"update_halo_kernel3_minus_2_a");
OPS_kernels[94].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
//build opencl kernel if not already built
buildOpenCLKernels_update_halo_kernel3_minus_2_a(
xdim0,ydim0,xdim1,ydim1);
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
//set up OpenCL thread blocks
size_t globalWorkSize[3] = {((x_size-1)/OPS_block_size_x+ 1)*OPS_block_size_x, ((y_size-1)/OPS_block_size_y + 1)*OPS_block_size_y, MAX(1,end[2]-start[2])};
size_t localWorkSize[3] = {OPS_block_size_x,OPS_block_size_y,1};
int *arg2h = (int *)arg2.data;
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0 + args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0 + args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = 1 *
//.........这里部分代码省略.........
示例4: RtlpCreateStack
NTSTATUS
RtlpCreateStack(
IN HANDLE Process,
IN SIZE_T MaximumStackSize OPTIONAL,
IN SIZE_T CommittedStackSize OPTIONAL,
IN ULONG ZeroBits OPTIONAL,
OUT PINITIAL_TEB InitialTeb
)
{
NTSTATUS Status;
PCH Stack;
SYSTEM_BASIC_INFORMATION SysInfo;
BOOLEAN GuardPage;
SIZE_T RegionSize;
ULONG OldProtect;
Status = ZwQuerySystemInformation( SystemBasicInformation,
(PVOID)&SysInfo,
sizeof( SysInfo ),
NULL
);
if ( !NT_SUCCESS( Status ) ) {
return( Status );
}
//
// if stack is in the current process, then default to
// the parameters from the image
//
if ( Process == NtCurrentProcess() ) {
PPEB Peb;
PIMAGE_NT_HEADERS NtHeaders;
Peb = NtCurrentPeb();
NtHeaders = RtlImageNtHeader(Peb->ImageBaseAddress);
if (!NtHeaders) {
return STATUS_INVALID_IMAGE_FORMAT;
}
if (!MaximumStackSize) {
MaximumStackSize = NtHeaders->OptionalHeader.SizeOfStackReserve;
}
if (!CommittedStackSize) {
CommittedStackSize = NtHeaders->OptionalHeader.SizeOfStackCommit;
}
}
else {
if (!CommittedStackSize) {
CommittedStackSize = SysInfo.PageSize;
}
if (!MaximumStackSize) {
MaximumStackSize = SysInfo.AllocationGranularity;
}
}
//
// Enforce a minimal stack commit if there is a PEB setting
// for this.
//
if ( CommittedStackSize >= MaximumStackSize ) {
MaximumStackSize = ROUND_UP(CommittedStackSize, (1024*1024));
}
CommittedStackSize = ROUND_UP( CommittedStackSize, SysInfo.PageSize );
MaximumStackSize = ROUND_UP( MaximumStackSize,
SysInfo.AllocationGranularity
);
Stack = NULL;
Status = ZwAllocateVirtualMemory( Process,
(PVOID *)&Stack,
ZeroBits,
&MaximumStackSize,
MEM_RESERVE,
PAGE_READWRITE
);
if ( !NT_SUCCESS( Status ) ) {
#if DBG
DbgPrint( "NTRTL: RtlpCreateStack( %lx ) failed. Stack Reservation Status == %X\n",
Process,
Status
);
#endif // DBG
return( Status );
}
//.........这里部分代码省略.........
示例5: reiserfs_symlink
static int reiserfs_symlink(struct inode *parent_dir,
struct dentry *dentry, const char *symname)
{
int retval;
struct inode *inode;
char *name;
int item_len;
struct reiserfs_transaction_handle th;
struct reiserfs_security_handle security;
int mode = S_IFLNK | S_IRWXUGO;
/* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
int jbegin_count =
JOURNAL_PER_BALANCE_CNT * 3 +
2 * (REISERFS_QUOTA_INIT_BLOCKS(parent_dir->i_sb) +
REISERFS_QUOTA_TRANS_BLOCKS(parent_dir->i_sb));
if (!(inode = new_inode(parent_dir->i_sb))) {
return -ENOMEM;
}
new_inode_init(inode, parent_dir, mode);
retval = reiserfs_security_init(parent_dir, inode, &security);
if (retval < 0) {
drop_new_inode(inode);
return retval;
}
jbegin_count += retval;
reiserfs_write_lock(parent_dir->i_sb);
item_len = ROUND_UP(strlen(symname));
if (item_len > MAX_DIRECT_ITEM_LEN(parent_dir->i_sb->s_blocksize)) {
retval = -ENAMETOOLONG;
drop_new_inode(inode);
goto out_failed;
}
name = kmalloc(item_len, GFP_NOFS);
if (!name) {
drop_new_inode(inode);
retval = -ENOMEM;
goto out_failed;
}
memcpy(name, symname, strlen(symname));
padd_item(name, item_len, strlen(symname));
retval = journal_begin(&th, parent_dir->i_sb, jbegin_count);
if (retval) {
drop_new_inode(inode);
kfree(name);
goto out_failed;
}
retval =
reiserfs_new_inode(&th, parent_dir, mode, name, strlen(symname),
dentry, inode, &security);
kfree(name);
if (retval) { /* reiserfs_new_inode iputs for us */
goto out_failed;
}
reiserfs_update_inode_transaction(inode);
reiserfs_update_inode_transaction(parent_dir);
inode->i_op = &reiserfs_symlink_inode_operations;
inode->i_mapping->a_ops = &reiserfs_address_space_operations;
// must be sure this inode is written with this transaction
//
//reiserfs_update_sd (&th, inode, READ_BLOCKS);
retval = reiserfs_add_entry(&th, parent_dir, dentry->d_name.name,
dentry->d_name.len, inode, 1 /*visible */ );
if (retval) {
int err;
inode->i_nlink--;
reiserfs_update_sd(&th, inode);
err = journal_end(&th, parent_dir->i_sb, jbegin_count);
if (err)
retval = err;
unlock_new_inode(inode);
iput(inode);
goto out_failed;
}
d_instantiate(dentry, inode);
unlock_new_inode(inode);
retval = journal_end(&th, parent_dir->i_sb, jbegin_count);
out_failed:
reiserfs_write_unlock(parent_dir->i_sb);
return retval;
}
示例6: ofl_structs_table_properties_pack
size_t
ofl_structs_table_properties_pack(struct ofl_table_feature_prop_header * src, struct ofp_table_feature_prop_header *dst, uint8_t *data, struct ofl_exp *exp){
dst->type = htons(src->type);
switch (src->type){
case OFPTFPT_INSTRUCTIONS:
case OFPTFPT_INSTRUCTIONS_MISS:{
int i;
struct ofl_table_feature_prop_instructions *sp = (struct ofl_table_feature_prop_instructions*) src;
struct ofp_table_feature_prop_instructions *dp = (struct ofp_table_feature_prop_instructions*) dst;
uint8_t *ptr;
dp->length = htons(sp->header.length);
ptr = (uint8_t*) data + (sizeof(struct ofp_table_feature_prop_header));
for(i = 0; i < sp->ids_num; i++){
if(sp->instruction_ids[i].type == OFPIT_EXPERIMENTER){
struct ofp_instruction inst;
inst.type = sp->instruction_ids[i].type;
if (exp == NULL || exp->inst == NULL || exp->inst->unpack == NULL) {
OFL_LOG_WARN(LOG_MODULE, "Received EXPERIMENTER instruction, but no callback was given.");
return ofl_error(OFPET_BAD_INSTRUCTION, OFPBIC_UNSUP_INST);
}
inst.len = ROUND_UP(sizeof(struct ofp_instruction) + exp->inst->ofp_len(&sp->instruction_ids[i]),8);
memcpy(ptr, &inst, sizeof(struct ofp_instruction) - 4);
ptr += sizeof(struct ofp_instruction) - 4;
}
else {
struct ofp_instruction inst;
inst.type = htons(sp->instruction_ids[i].type);
inst.len = htons(sizeof(struct ofp_instruction) - 4);
memcpy(ptr, &inst, sizeof(struct ofp_instruction) - 4);
ptr += sizeof(struct ofp_instruction) - 4;
}
}
memset(ptr, 0x0, ROUND_UP(sp->header.length,8) - sp->header.length);
return ROUND_UP(ntohs(dp->length),8);
}
case OFPTFPT_NEXT_TABLES:
case OFPTFPT_NEXT_TABLES_MISS:{
int i;
uint8_t *ptr;
struct ofl_table_feature_prop_next_tables *sp = (struct ofl_table_feature_prop_next_tables*) src;
struct ofp_table_feature_prop_next_tables *dp = (struct ofp_table_feature_prop_next_tables*) dst;
dp->length = htons(sp->header.length);
ptr = data + (sizeof(struct ofp_table_feature_prop_header));
for(i = 0; i < sp->table_num; i++){
memcpy(ptr, &sp->next_table_ids[i], sizeof(uint8_t));
ptr += sizeof(uint8_t);
}
memset(ptr, 0x0, ROUND_UP(sp->header.length,8)-sp->header.length);
return ROUND_UP(ntohs(dp->length),8);
}
case OFPTFPT_WRITE_ACTIONS:
case OFPTFPT_WRITE_ACTIONS_MISS:
case OFPTFPT_APPLY_ACTIONS:
case OFPTFPT_APPLY_ACTIONS_MISS:{
int i;
uint8_t *ptr;
struct ofl_table_feature_prop_actions *sp = (struct ofl_table_feature_prop_actions*) src;
struct ofp_table_feature_prop_actions *dp = (struct ofp_table_feature_prop_actions*) dst;
dp->length = htons(sp->header.length);
ptr = data + (sizeof(struct ofp_table_feature_prop_header));
for(i = 0; i < sp->actions_num; i++){
if(sp->action_ids[i].type == OFPAT_EXPERIMENTER){
memcpy(ptr, &sp->action_ids[i], sizeof(struct ofp_action_header));
ptr += sizeof(struct ofp_action_header);
}
else {
struct ofp_action_header action;
action.type = htons(sp->action_ids[i].type);
action.len = htons(sp->action_ids[i].len);
memcpy(ptr, &action, sizeof(struct ofp_action_header) -4);
ptr += sizeof(struct ofp_action_header) -4;
}
}
memset(ptr, 0x0, ROUND_UP(sp->header.length,8)- sp->header.length);
return ROUND_UP(ntohs(dp->length),8);
}
case OFPTFPT_MATCH:
case OFPTFPT_WILDCARDS:
case OFPTFPT_WRITE_SETFIELD:
case OFPTFPT_WRITE_SETFIELD_MISS:
case OFPTFPT_APPLY_SETFIELD:
case OFPTFPT_APPLY_SETFIELD_MISS:{
int i;
struct ofl_table_feature_prop_oxm *sp = (struct ofl_table_feature_prop_oxm*) src;
struct ofp_table_feature_prop_oxm *dp = (struct ofp_table_feature_prop_oxm*) dst;
dp->length = htons(sp->header.length);
data += sizeof(struct ofp_table_feature_prop_header);
for(i = 0; i < sp->oxm_num; i++){
uint32_t header = htonl(sp->oxm_ids[i]);
memcpy(data, &header, sizeof(uint32_t));
data += sizeof(uint32_t);
}
memset(data, 0x0, ROUND_UP(sp->header.length,8)- sp->header.length);
//.........这里部分代码省略.........
示例7: ops_par_loop_update_halo_kernel2_zvel_minus_2_back
// host stub function
void ops_par_loop_update_halo_kernel2_zvel_minus_2_back(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 57))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(57, "update_halo_kernel2_zvel_minus_2_back");
OPS_kernels[57].count++;
ops_timers_core(&c1, &t1);
}
// compute localy allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
#endif // OPS_MPI
int arg_idx[3];
int arg_idx_base[3];
#ifdef OPS_MPI
if (compute_ranges(args, 3, block, range, start, end, arg_idx) < 0)
return;
#else // OPS_MPI
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
arg_idx[n] = start[n];
}
#endif
for (int n = 0; n < 3; n++) {
arg_idx_base[n] = arg_idx[n];
}
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int *arg2h = (int *)arg2.data;
// Upload large globals
#ifdef OPS_GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
args[2].data = OPS_consts_h + consts_bytes;
args[2].data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)args[2].data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
#endif // OPS_GPU
// set up initial pointers
int base0 = args[0].dat->base_offset +
(OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
start[0] * args[0].stencil->stride[0];
base0 = base0 +
(OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
args[0].dat->size[0] * start[1] * args[0].stencil->stride[1];
base0 = base0 +
(OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
args[0].dat->size[0] * args[0].dat->size[1] * start[2] *
args[0].stencil->stride[2];
#ifdef OPS_GPU
double *p_a0 = (double *)((char *)args[0].data_d + base0);
#else
double *p_a0 = (double *)((char *)args[0].data + base0);
#endif
int base1 = args[1].dat->base_offset +
(OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size) *
start[0] * args[1].stencil->stride[0];
base1 = base1 +
(OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size) *
args[1].dat->size[0] * start[1] * args[1].stencil->stride[1];
base1 = base1 +
(OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size) *
args[1].dat->size[0] * args[1].dat->size[1] * start[2] *
args[1].stencil->stride[2];
#ifdef OPS_GPU
double *p_a1 = (double *)((char *)args[1].data_d + base1);
#else
double *p_a1 = (double *)((char *)args[1].data + base1);
#endif
#ifdef OPS_GPU
int *p_a2 = (int *)args[2].data_d;
#else
int *p_a2 = arg2h;
#endif
//.........这里部分代码省略.........
示例8: alloc_region
BOOT_CODE pptr_t
alloc_region(uint32_t size_bits)
{
unsigned int i;
unsigned int reg_index = 0; /* gcc cannot work out that this will not be used uninitialized */
region_t reg = REG_EMPTY;
region_t rem_small = REG_EMPTY;
region_t rem_large = REG_EMPTY;
region_t new_reg;
region_t new_rem_small;
region_t new_rem_large;
/* Search for a freemem region that will be the best fit for an allocation. We favour allocations
* that are aligned to either end of the region. If an allocation must split a region we favour
* an unbalanced split. In both cases we attempt to use the smallest region possible. In general
* this means we aim to make the size of the smallest remaining region smaller (ideally zero)
* followed by making the size of the largest remaining region smaller */
for (i = 0; i < MAX_NUM_FREEMEM_REG; i++) {
/* Determine whether placing the region at the start or the end will create a bigger left over region */
if (ROUND_UP(ndks_boot.freemem[i].start, size_bits) - ndks_boot.freemem[i].start <
ndks_boot.freemem[i].end - ROUND_DOWN(ndks_boot.freemem[i].end, size_bits)) {
new_reg.start = ROUND_UP(ndks_boot.freemem[i].start, size_bits);
new_reg.end = new_reg.start + BIT(size_bits);
} else {
new_reg.end = ROUND_DOWN(ndks_boot.freemem[i].end, size_bits);
new_reg.start = new_reg.end - BIT(size_bits);
}
if (new_reg.end > new_reg.start &&
new_reg.start >= ndks_boot.freemem[i].start &&
new_reg.end <= ndks_boot.freemem[i].end) {
if (new_reg.start - ndks_boot.freemem[i].start < ndks_boot.freemem[i].end - new_reg.end) {
new_rem_small.start = ndks_boot.freemem[i].start;
new_rem_small.end = new_reg.start;
new_rem_large.start = new_reg.end;
new_rem_large.end = ndks_boot.freemem[i].end;
} else {
new_rem_large.start = ndks_boot.freemem[i].start;
new_rem_large.end = new_reg.start;
new_rem_small.start = new_reg.end;
new_rem_small.end = ndks_boot.freemem[i].end;
}
if ( is_reg_empty(reg) ||
(reg_size(new_rem_small) < reg_size(rem_small)) ||
(reg_size(new_rem_small) == reg_size(rem_small) && reg_size(new_rem_large) < reg_size(rem_large)) ) {
reg = new_reg;
rem_small = new_rem_small;
rem_large = new_rem_large;
reg_index = i;
}
}
}
if (is_reg_empty(reg)) {
printf("Kernel init failing: not enough memory\n");
return 0;
}
/* Remove the region in question */
ndks_boot.freemem[reg_index] = REG_EMPTY;
/* Add the remaining regions in largest to smallest order */
insert_region(rem_large);
if (!insert_region(rem_small)) {
printf("alloc_region(): wasted 0x%x bytes due to alignment, try to increase MAX_NUM_FREEMEM_REG\n",
(unsigned int)(rem_small.end - rem_small.start));
}
return reg.start;
}
示例9: ROUND_UP
*
* Clearly, this structure is only needed if the CPU has an MMU!
*
* The following are not the smallest areas that could be allocated for a
* working system. If the amount of memory used by the page tables is
* critical, they could be reduced.
*/
PHYS_MEM_DESC sysPhysMemDesc [] =
{
/* DRAM - Always the first entry */
{
DDR_MCORE_ADDR, /* virtual address */
DDR_MCORE_ADDR, /* physical address */
ROUND_UP (DDR_MCORE_SIZE, PAGE_SIZE),
MMU_ATTR_VALID_MSK | MMU_ATTR_PROT_MSK | MMU_ATTR_WRITEALLOCATE_MSK,
#ifdef _WRS_CONFIG_SMP /* needs to be shared */
MMU_ATTR_VALID | MMU_ATTR_SUP_RWX | MMU_ATTR_WRITEALLOCATE_SHARED
#else
MMU_ATTR_VALID | MMU_ATTR_SUP_RWX | MMU_ATTR_WRITEALLOCATE
#endif /* _WRS_CONFIG_SMP */
},
/**************************************GU memmap begin**************************/
{/*GU reserved mem 1xxx*/
DDR_GU_ADDR, /* virtual address */
DDR_GU_ADDR, /* physical address */
ROUND_UP (ECS_BBPHAC_BASE_ADDR - DDR_GU_ADDR, PAGE_SIZE),
MMU_ATTR_VALID_MSK | MMU_ATTR_PROT_MSK | MMU_ATTR_NORMAL_NONCACHEABLE_MSK,
MMU_ATTR_VALID | MMU_ATTR_SUP_RWX | MMU_ATTR_NORMAL_NONCACHEABLE
},
示例10: ops_par_loop_update_halo_kernel1_fr2
// host stub function
void ops_par_loop_update_halo_kernel1_fr2(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6,
ops_arg arg7) {
// Timing
double t1, t2, c1, c2;
ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 8, range, 22))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(22, "update_halo_kernel1_fr2");
OPS_kernels[22].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
// build opencl kernel if not already built
buildOpenCLKernels_update_halo_kernel1_fr2(xdim0, ydim0, xdim1, ydim1, xdim2,
ydim2, xdim3, ydim3, xdim4, ydim4,
xdim5, ydim5, xdim6, ydim6);
// set up OpenCL thread blocks
size_t globalWorkSize[3] = {
((x_size - 1) / OPS_block_size_x + 1) * OPS_block_size_x,
((y_size - 1) / OPS_block_size_y + 1) * OPS_block_size_y,
((z_size - 1) / OPS_block_size_z + 1) * OPS_block_size_z};
size_t localWorkSize[3] = {OPS_block_size_x, OPS_block_size_y,
OPS_block_size_z};
int *arg7h = (int *)arg7.data;
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
//.........这里部分代码省略.........
示例11: pyramid_build
/* Build a pyramid.
*
* width/height is the size of this layer, real_* the subsection of the layer
* which is real pixels (as opposed to background).
*/
static Layer *
pyramid_build( VipsForeignSaveDz *dz, Layer *above,
int width, int height, VipsRect *real_pixels )
{
VipsForeignSave *save = VIPS_FOREIGN_SAVE( dz );
Layer *layer = VIPS_NEW( dz, Layer );
VipsRect strip;
int limit;
layer->dz = dz;
layer->width = width;
layer->height = height;
layer->tiles_across = ROUND_UP( width, dz->tile_size ) / dz->tile_size;
layer->tiles_down = ROUND_UP( height, dz->tile_size ) / dz->tile_size;
layer->real_pixels = *real_pixels;
layer->image = NULL;
layer->strip = NULL;
layer->copy = NULL;
if( !above )
/* Top of pyramid.
*/
layer->sub = 1;
else
layer->sub = above->sub * 2;
layer->below = NULL;
layer->above = above;
/* We round the image size up to an even number to make x2 shrink
* easy.
*/
layer->image = vips_image_new();
if( vips_image_pipelinev( layer->image,
VIPS_DEMAND_STYLE_ANY, save->ready, NULL ) ) {
layer_free( layer );
return( NULL );
}
layer->image->Xsize = width + (width & 1);
layer->image->Ysize = height + (height & 1);
layer->strip = vips_region_new( layer->image );
layer->copy = vips_region_new( layer->image );
/* The regions will get used in the bg thread callback, so make sure
* we don't own them.
*/
vips__region_no_ownership( layer->strip );
vips__region_no_ownership( layer->copy );
/* Build a line of tiles here. Normally strips are height + 2 *
* overlap, but the first row is missing the top edge.
*
* Expand the strip if necessary to make sure we have an even
* number of lines.
*/
layer->y = 0;
layer->write_y = 0;
strip.left = 0;
strip.top = 0;
strip.width = layer->image->Xsize;
strip.height = dz->tile_size + dz->overlap;
if( (strip.height & 1) == 1 )
strip.height += 1;
if( vips_region_buffer( layer->strip, &strip ) ) {
layer_free( layer );
return( NULL );
}
switch( dz->depth ) {
case VIPS_FOREIGN_DZ_DEPTH_ONEPIXEL:
limit = 1;
break;
case VIPS_FOREIGN_DZ_DEPTH_ONETILE:
limit = dz->tile_size;
break;
case VIPS_FOREIGN_DZ_DEPTH_ONE:
limit = VIPS_MAX( width, height );
break;
default:
g_assert( 0 );
limit = dz->tile_size;
break;
}
if( width > limit ||
height > limit ) {
/* Round up, so eg. a 5 pixel wide image becomes 3 a layer
//.........这里部分代码省略.........
示例12: paging_init
/**
* Create initial (temporary) page tables.
*
* We use 1MB (ARM_L1_SECTION_BYTES) pages (sections) with a single-level table.
* This allows 1MB*4k (ARM_L1_MAX_ENTRIES) = 4G per pagetable.
*
* Hardware details can be found in:
* ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition
* B3: Virtual Memory System Architecture (VMSA)
*/
void paging_init(void)
{
/**
* Make sure our page tables are correctly aligned in memory
*/
assert(ROUND_UP((lpaddr_t)l1_low, ARM_L1_ALIGN) == (lpaddr_t)l1_low);
assert(ROUND_UP((lpaddr_t)l1_high, ARM_L1_ALIGN) == (lpaddr_t)l1_high);
/**
* On ARMv7-A, physical RAM (PHYS_MEMORY_START) is the same with the
* offset of mapped physical memory within virtual address space
* (PHYS_MEMORY_START).
*/
STATIC_ASSERT(MEMORY_OFFSET == PHYS_MEMORY_START, "");
/**
* Zero the page tables: this has the effect of marking every PTE
* as invalid.
*/
memset(&l1_low, 0, sizeof(l1_low));
memset(&l1_high, 0, sizeof(l1_high));
memset(&l2_vec, 0, sizeof(l2_vec));
/**
* Now we lay out the kernel's virtual address space.
*
* 00000000-7FFFFFFFF: 1-1 mappings (hardware we have not mapped
* into high kernel space yet)
* 80000000-BFFFFFFFF: 1-1 mappings (this is 1GB of RAM)
* C0000000-FEFFFFFFF: On-demand mappings of hardware devices,
* allocated descending from DEVICE_OFFSET.
* FF000000-FFEFFFFFF: Unallocated.
* FFF00000-FFFFFFFFF: L2 table, containing:
* FFF00000-FFFEFFFF: Unallocated
* FFFF0000-FFFFFFFF: Exception vectors
*/
lvaddr_t base = 0;
size_t i;
for (i=0, base = 0; i < ARM_L1_MAX_ENTRIES/2; i++) {
map_kernel_section_lo(base, make_dev_section(base));
base += ARM_L1_SECTION_BYTES;
}
for (i=0, base = MEMORY_OFFSET; i < ARM_L1_MAX_ENTRIES/4; i++) {
map_kernel_section_hi(base, make_ram_section(base));
base += ARM_L1_SECTION_BYTES;
}
/* Map the exception vectors. */
map_vectors();
/**
* TTBCR: Translation Table Base Control register.
* TTBCR.N is bits[2:0]
* In a TLB miss TTBCR.N determines whether TTBR0 or TTBR1 is used as the
* base address for the translation table walk in memory:
* N == 0 -> always use TTBR0
* N > 0 -> if VA[31:32-N] > 0 use TTBR1 else use TTBR0
*
* TTBR0 is typically used for processes-specific addresses
* TTBR1 is typically used for OS addresses that do not change on context
* switch
*
* set TTBCR.N = 1 to use TTBR1 for VAs >= MEMORY_OFFSET (=2GB)
*/
assert(mmu_enabled == false);
cp15_invalidate_i_and_d_caches_fast();
cp15_invalidate_tlb();
cp15_write_ttbr1((lpaddr_t)l1_high);
cp15_write_ttbr0((lpaddr_t)l1_low);
#define TTBCR_N 1
uint32_t ttbcr = cp15_read_ttbcr();
ttbcr = (ttbcr & ~7) | TTBCR_N;
cp15_write_ttbcr(ttbcr);
STATIC_ASSERT(1UL<<(32-TTBCR_N) == MEMORY_OFFSET, "");
#undef TTBCR_N
cp15_enable_mmu();
cp15_enable_alignment();
cp15_invalidate_i_and_d_caches_fast();
cp15_invalidate_tlb();
mmu_enabled = true;
}
示例13: virtex_init
static void virtex_init(MachineState *machine)
{
ram_addr_t ram_size = machine->ram_size;
const char *kernel_filename = machine->kernel_filename;
const char *kernel_cmdline = machine->kernel_cmdline;
hwaddr initrd_base = 0;
int initrd_size = 0;
MemoryRegion *address_space_mem = get_system_memory();
DeviceState *dev;
PowerPCCPU *cpu;
CPUPPCState *env;
hwaddr ram_base = 0;
DriveInfo *dinfo;
MemoryRegion *phys_ram = g_new(MemoryRegion, 1);
qemu_irq irq[32], *cpu_irq;
int kernel_size;
int i;
/* init CPUs */
if (machine->cpu_model == NULL) {
machine->cpu_model = "440-Xilinx";
}
cpu = ppc440_init_xilinx(&ram_size, 1, machine->cpu_model, 400000000);
env = &cpu->env;
qemu_register_reset(main_cpu_reset, cpu);
memory_region_allocate_system_memory(phys_ram, NULL, "ram", ram_size);
memory_region_add_subregion(address_space_mem, ram_base, phys_ram);
dinfo = drive_get(IF_PFLASH, 0, 0);
pflash_cfi01_register(PFLASH_BASEADDR, NULL, "virtex.flash", FLASH_SIZE,
dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
(64 * 1024), FLASH_SIZE >> 16,
1, 0x89, 0x18, 0x0000, 0x0, 1);
cpu_irq = (qemu_irq *) &env->irq_inputs[PPC40x_INPUT_INT];
dev = qdev_create(NULL, "xlnx.xps-intc");
qdev_prop_set_uint32(dev, "kind-of-intr", 0);
qdev_init_nofail(dev);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, cpu_irq[0]);
for (i = 0; i < 32; i++) {
irq[i] = qdev_get_gpio_in(dev, i);
}
serial_mm_init(address_space_mem, UART16550_BASEADDR, 2, irq[UART16550_IRQ],
115200, serial_hds[0], DEVICE_LITTLE_ENDIAN);
/* 2 timers at irq 2 @ 62 Mhz. */
dev = qdev_create(NULL, "xlnx.xps-timer");
qdev_prop_set_uint32(dev, "one-timer-only", 0);
qdev_prop_set_uint32(dev, "clock-frequency", 62 * 1000000);
qdev_init_nofail(dev);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, TIMER_BASEADDR);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ]);
if (kernel_filename) {
uint64_t entry, low, high;
hwaddr boot_offset;
/* Boots a kernel elf binary. */
kernel_size = load_elf(kernel_filename, NULL, NULL,
&entry, &low, &high, 1, ELF_MACHINE, 0);
boot_info.bootstrap_pc = entry & 0x00ffffff;
if (kernel_size < 0) {
boot_offset = 0x1200000;
/* If we failed loading ELF's try a raw image. */
kernel_size = load_image_targphys(kernel_filename,
boot_offset,
ram_size);
boot_info.bootstrap_pc = boot_offset;
high = boot_info.bootstrap_pc + kernel_size + 8192;
}
boot_info.ima_size = kernel_size;
/* Load initrd. */
if (machine->initrd_filename) {
initrd_base = high = ROUND_UP(high, 4);
initrd_size = load_image_targphys(machine->initrd_filename,
high, ram_size - high);
if (initrd_size < 0) {
error_report("couldn't load ram disk '%s'",
machine->initrd_filename);
exit(1);
}
high = ROUND_UP(high + initrd_size, 4);
}
/* Provide a device-tree. */
boot_info.fdt = high + (8192 * 2);
boot_info.fdt &= ~8191;
xilinx_load_device_tree(boot_info.fdt, ram_size,
initrd_base, initrd_size,
kernel_cmdline);
}
//.........这里部分代码省略.........
示例14: _new_thread
void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t stackSize, k_thread_entry_t pEntry,
void *p1, void *p2, void *p3,
int priority, unsigned int options)
{
char *pStack = K_THREAD_STACK_BUFFER(stack);
/* Align stack end to maximum alignment requirement. */
char *stackEnd = (char *)ROUND_DOWN(pStack + stackSize, 16);
#if XCHAL_CP_NUM > 0
u32_t *cpSA;
char *cpStack;
#endif
_new_thread_init(thread, pStack, stackSize, priority, options);
#ifdef CONFIG_DEBUG
printk("\nstackPtr = %p, stackSize = %d\n", pStack, stackSize);
printk("stackEnd = %p\n", stackEnd);
#endif
#if XCHAL_CP_NUM > 0
/* Ensure CP state descriptor is correctly initialized */
cpStack = thread->arch.preempCoprocReg.cpStack; /* short hand alias */
memset(cpStack, 0, XT_CP_ASA); /* Set to zero to avoid bad surprises */
/* Coprocessor's stack is allocated just after the k_thread */
cpSA = (u32_t *)(thread->arch.preempCoprocReg.cpStack + XT_CP_ASA);
/* Coprocessor's save area alignment is at leat 16 bytes */
*cpSA = ROUND_UP(cpSA + 1,
(XCHAL_TOTAL_SA_ALIGN < 16 ? 16 : XCHAL_TOTAL_SA_ALIGN));
#ifdef CONFIG_DEBUG
printk("cpStack = %p\n", thread->arch.preempCoprocReg.cpStack);
printk("cpAsa = %p\n",
*(void **)(thread->arch.preempCoprocReg.cpStack + XT_CP_ASA));
#endif
#endif
/* Thread's first frame alignment is granted as both operands are
* aligned
*/
XtExcFrame *pInitCtx =
(XtExcFrame *)(stackEnd - (XT_XTRA_SIZE - XT_CP_SIZE));
#ifdef CONFIG_DEBUG
printk("pInitCtx = %p\n", pInitCtx);
#endif
/* Explicitly initialize certain saved registers */
/* task entrypoint */
pInitCtx->pc = (u32_t)_thread_entry;
/* physical top of stack frame */
pInitCtx->a1 = (u32_t)pInitCtx + XT_STK_FRMSZ;
/* user exception exit dispatcher */
pInitCtx->exit = (u32_t)_xt_user_exit;
/* Set initial PS to int level 0, EXCM disabled, user mode.
* Also set entry point argument arg.
*/
#ifdef __XTENSA_CALL0_ABI__
pInitCtx->a2 = (u32_t)pEntry;
pInitCtx->a3 = (u32_t)p1;
pInitCtx->a4 = (u32_t)p2;
pInitCtx->a5 = (u32_t)p3;
pInitCtx->ps = PS_UM | PS_EXCM;
#else
/* For windowed ABI set also WOE and CALLINC
* (pretend task is 'call4')
*/
pInitCtx->a6 = (u32_t)pEntry;
pInitCtx->a7 = (u32_t)p1;
pInitCtx->a8 = (u32_t)p2;
pInitCtx->a9 = (u32_t)p3;
pInitCtx->ps = PS_UM | PS_EXCM | PS_WOE | PS_CALLINC(1);
#endif
thread->callee_saved.topOfStack = pInitCtx;
thread->arch.flags = 0;
/* initial values in all other registers/k_thread entries are
* irrelevant
*/
}
示例15: _start
// this is the start point of execution at 0xBFC4A000
//
// it loads the IOPBTCONF module list from rom0 and compiles a
// list of modules and their addresses.
//
// this list is then passed to loadcore as it is executed in order
// to then load the rest of the modules
//
// args: total size of IOP ram in MegaBytes
// bootinfo flags
// string containing the reboot image filepath
// ? doesnt seem to be used
void _start(int ramMBSize, int bootInfo, char* udnlString, int unk)
{
ROMFS ri;
void *(*sysmem_entry)(u32 iopmemsize);
void (*loadcore_entry)(BOOT_PARAMS *init);
int i;
ROMDIR_INFO romdir_info;
ROMFILE_INFO romfile_info;
char conf_filename[10];
int ram_byte_size, num_lines;
u32 module_load_addr;
u32** modules_ptr;
char* file_data_ptr, *file_data_end;
void* psysmemstart;
BOOT_PARAMS* boot_params;
if( ramMBSize <= 2 )
ram_byte_size = 2;
else
ram_byte_size = ramMBSize;
ram_byte_size <<= 20;
// compile module list to send to loadcore
boot_params = (BOOT_PARAMS*)0x30000; // random address, has to be clear before loadcore call
boot_params->ramMBSize = ramMBSize;
boot_params->bootInfo = bootInfo;
boot_params->udnlString = NULL;
boot_params->moduleAddrs = (u32**)((u32)boot_params + sizeof(BOOT_PARAMS)); // right after
// if a undl string is specified, get a copy of it and store a pointer to it
if(udnlString)
{
boot_params->udnlString = (char*)boot_params->moduleAddrs;
kstrcpy(boot_params->udnlString, udnlString);
boot_params->moduleAddrs = (u32**)((u32)boot_params->udnlString + ROUND_UP(kstrlen(udnlString) + 8, 4));
}
// find the romdir table in the rom
if( searchRomDir((u32*)0xBFC00000, (u32*)0xBFC10000, &romdir_info) == NULL )
{
__printf("IOPBOOT: failed to find start of rom!\n");
// error - cant find romdir!
while(1) *(u8*)0x80000000 = 0;
}
// find the bootconf file in the romdir table
kstrcpy(conf_filename, "IOPBTCONF");
conf_filename[8] = '0' + bootInfo;
if( !searchFileInRom(&romdir_info, conf_filename, &romfile_info) )
{
kstrcpy(conf_filename, "IOPBTCONF");
if( !searchFileInRom(&romdir_info, conf_filename, &romfile_info) )
{
__printf("IOPBTCONF file not found!\n");
// error - cant find conf file!
while(1) *(u8*)0x80000000 = 1;
}
}
// count the number of lines in conf file
file_data_ptr = (char*)romfile_info.fileData;
file_data_end = (char*)romfile_info.fileData + romfile_info.entry->fileSize;
{
num_lines = 0;
while( file_data_ptr < file_data_end ) {
// loop until a "newline" charcter is found
while(file_data_ptr < file_data_end) {
if(*file_data_ptr++ < ' ')
break;
}
// loop until a "non-newline" charcter is found
while(file_data_ptr < file_data_end) {
if(*file_data_ptr++ >= ' ')
break;
}
num_lines++;
}
num_lines++;
}
// get the addresses of each module
{
module_load_addr = 0;
boot_params->numConfLines = num_lines-1;
modules_ptr = boot_params->moduleAddrs;
char* file_data_ptr = (char*)romfile_info.fileData;
//.........这里部分代码省略.........