本文整理汇总了C++中smb_panic函数的典型用法代码示例。如果您正苦于以下问题:C++ smb_panic函数的具体用法?C++ smb_panic怎么用?C++ smb_panic使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了smb_panic函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: smbc_free_context
/*
* Free a context
*
* Returns 0 on success. Otherwise returns 1, the SMBCCTX is _not_ freed
* and thus you'll be leaking memory if not handled properly.
*
*/
int
smbc_free_context(SMBCCTX *context,
int shutdown_ctx)
{
if (!context) {
errno = EBADF;
return 1;
}
if (shutdown_ctx) {
SMBCFILE * f;
DEBUG(1,("Performing aggressive shutdown.\n"));
f = context->internal->files;
while (f) {
smbc_getFunctionClose(context)(context, f);
f = f->next;
}
context->internal->files = NULL;
/* First try to remove the servers the nice way. */
if (smbc_getFunctionPurgeCachedServers(context)(context)) {
SMBCSRV * s;
SMBCSRV * next;
DEBUG(1, ("Could not purge all servers, "
"Nice way shutdown failed.\n"));
s = context->internal->servers;
while (s) {
DEBUG(1, ("Forced shutdown: %p (fd=%d)\n",
s, s->cli->fd));
cli_shutdown(s->cli);
smbc_getFunctionRemoveCachedServer(context)(context,
s);
next = s->next;
DLIST_REMOVE(context->internal->servers, s);
SAFE_FREE(s);
s = next;
}
context->internal->servers = NULL;
}
}
else {
/* This is the polite way */
if (smbc_getFunctionPurgeCachedServers(context)(context)) {
DEBUG(1, ("Could not purge all servers, "
"free_context failed.\n"));
errno = EBUSY;
return 1;
}
if (context->internal->servers) {
DEBUG(1, ("Active servers in context, "
"free_context failed.\n"));
errno = EBUSY;
return 1;
}
if (context->internal->files) {
DEBUG(1, ("Active files in context, "
"free_context failed.\n"));
errno = EBUSY;
return 1;
}
}
/* Things we have to clean up */
free(smbc_getWorkgroup(context));
smbc_setWorkgroup(context, NULL);
free(smbc_getNetbiosName(context));
smbc_setNetbiosName(context, NULL);
free(smbc_getUser(context));
smbc_setUser(context, NULL);
DEBUG(3, ("Context %p successfully freed\n", context));
/* Free any DFS auth context. */
TALLOC_FREE(context->internal->auth_info);
SAFE_FREE(context->internal);
SAFE_FREE(context);
/* Protect access to the count of contexts in use */
if (SMB_THREAD_LOCK(initialized_ctx_count_mutex) != 0) {
smb_panic("error locking 'initialized_ctx_count'");
}
if (initialized_ctx_count) {
initialized_ctx_count--;
}
if (initialized_ctx_count == 0) {
SMBC_module_terminate();
}
//.........这里部分代码省略.........
示例2: parse_user_quota_record
static bool parse_user_quota_record(const char *rdata, unsigned int rdata_count, unsigned int *offset, SMB_NTQUOTA_STRUCT *pqt)
{
int sid_len;
SMB_NTQUOTA_STRUCT qt;
ZERO_STRUCT(qt);
if (!rdata||!offset||!pqt) {
smb_panic("parse_quota_record: called with NULL POINTER!");
}
if (rdata_count < 40) {
return False;
}
/* offset to next quota record.
* 4 bytes IVAL(rdata,0)
* unused here...
*/
*offset = IVAL(rdata,0);
/* sid len */
sid_len = IVAL(rdata,4);
if (rdata_count < 40+sid_len) {
return False;
}
/* unknown 8 bytes in pdata
* maybe its the change time in NTTIME
*/
/* the used space 8 bytes (SMB_BIG_UINT)*/
qt.usedspace = (SMB_BIG_UINT)IVAL(rdata,16);
#ifdef LARGE_SMB_OFF_T
qt.usedspace |= (((SMB_BIG_UINT)IVAL(rdata,20)) << 32);
#else /* LARGE_SMB_OFF_T */
if ((IVAL(rdata,20) != 0)&&
((qt.usedspace != 0xFFFFFFFF)||
(IVAL(rdata,20)!=0xFFFFFFFF))) {
/* more than 32 bits? */
return False;
}
#endif /* LARGE_SMB_OFF_T */
/* the soft quotas 8 bytes (SMB_BIG_UINT)*/
qt.softlim = (SMB_BIG_UINT)IVAL(rdata,24);
#ifdef LARGE_SMB_OFF_T
qt.softlim |= (((SMB_BIG_UINT)IVAL(rdata,28)) << 32);
#else /* LARGE_SMB_OFF_T */
if ((IVAL(rdata,28) != 0)&&
((qt.softlim != 0xFFFFFFFF)||
(IVAL(rdata,28)!=0xFFFFFFFF))) {
/* more than 32 bits? */
return False;
}
#endif /* LARGE_SMB_OFF_T */
/* the hard quotas 8 bytes (SMB_BIG_UINT)*/
qt.hardlim = (SMB_BIG_UINT)IVAL(rdata,32);
#ifdef LARGE_SMB_OFF_T
qt.hardlim |= (((SMB_BIG_UINT)IVAL(rdata,36)) << 32);
#else /* LARGE_SMB_OFF_T */
if ((IVAL(rdata,36) != 0)&&
((qt.hardlim != 0xFFFFFFFF)||
(IVAL(rdata,36)!=0xFFFFFFFF))) {
/* more than 32 bits? */
return False;
}
#endif /* LARGE_SMB_OFF_T */
sid_parse(rdata+40,sid_len,&qt.sid);
qt.qtype = SMB_USER_QUOTA_TYPE;
*pqt = qt;
return True;
}
示例3: start_epmd
void start_epmd(struct tevent_context *ev_ctx,
struct messaging_context *msg_ctx)
{
struct rpc_srv_callbacks epmapper_cb;
NTSTATUS status;
pid_t pid;
bool ok;
int rc;
epmapper_cb.init = NULL;
epmapper_cb.shutdown = epmapper_shutdown_cb;
epmapper_cb.private_data = NULL;
DEBUG(1, ("Forking Endpoint Mapper Daemon\n"));
pid = fork();
if (pid == -1) {
DEBUG(0, ("Failed to fork Endpoint Mapper [%s], aborting ...\n",
strerror(errno)));
exit(1);
}
if (pid) {
/* parent */
return;
}
status = reinit_after_fork(msg_ctx,
ev_ctx,
true);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0,("reinit_after_fork() failed\n"));
smb_panic("reinit_after_fork() failed");
}
epmd_reopen_logs();
epmd_setup_sig_term_handler(ev_ctx);
epmd_setup_sig_hup_handler(ev_ctx, msg_ctx);
ok = serverid_register(procid_self(),
FLAG_MSG_GENERAL |
FLAG_MSG_PRINT_GENERAL);
if (!ok) {
DEBUG(0, ("Failed to register serverid in epmd!\n"));
exit(1);
}
messaging_register(msg_ctx,
ev_ctx,
MSG_SMB_CONF_UPDATED,
epmd_smb_conf_updated);
status = rpc_epmapper_init(&epmapper_cb);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("Failed to register epmd rpc inteface! (%s)\n",
nt_errstr(status)));
exit(1);
}
status = rpc_setup_tcpip_sockets(ev_ctx,
msg_ctx,
&ndr_table_epmapper,
NULL,
135);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("Failed to open epmd tcpip sockets!\n"));
exit(1);
}
ok = setup_dcerpc_ncalrpc_socket(ev_ctx,
msg_ctx,
"EPMAPPER",
srv_epmapper_delete_endpoints);
if (!ok) {
DEBUG(0, ("Failed to open epmd ncalrpc pipe!\n"));
exit(1);
}
ok = setup_named_pipe_socket("epmapper", ev_ctx, msg_ctx);
if (!ok) {
DEBUG(0, ("Failed to open epmd named pipe!\n"));
exit(1);
}
DEBUG(1, ("Endpoint Mapper Daemon Started (%d)\n", getpid()));
/* loop forever */
rc = tevent_loop_wait(ev_ctx);
/* should not be reached */
DEBUG(0,("background_queue: tevent_loop_wait() exited with %d - %s\n",
rc, (rc == 0) ? "out of events" : strerror(errno)));
exit(1);
}
示例4: cli_list_user_quota
bool cli_list_user_quota(struct cli_state *cli, int quota_fnum, SMB_NTQUOTA_LIST **pqt_list)
{
bool ret = False;
uint16 setup;
char params[16];
char *rparam=NULL, *rdata=NULL;
unsigned int rparam_count=0, rdata_count=0;
unsigned int offset;
const char *curdata = NULL;
unsigned int curdata_count = 0;
TALLOC_CTX *mem_ctx = NULL;
SMB_NTQUOTA_STRUCT qt;
SMB_NTQUOTA_LIST *tmp_list_ent;
if (!cli||!pqt_list) {
smb_panic("cli_list_user_quota() called with NULL Pointer!");
}
setup = NT_TRANSACT_GET_USER_QUOTA;
SSVAL(params, 0,quota_fnum);
SSVAL(params, 2,TRANSACT_GET_USER_QUOTA_LIST_START);
SIVAL(params, 4,0x00000000);
SIVAL(params, 8,0x00000000);
SIVAL(params,12,0x00000000);
if (!cli_send_nt_trans(cli,
NT_TRANSACT_GET_USER_QUOTA,
0,
&setup, 1, 0,
params, 16, 4,
NULL, 0, 2048)) {
DEBUG(1,("Failed to send NT_TRANSACT_GET_USER_QUOTA\n"));
goto cleanup;
}
if (!cli_receive_nt_trans(cli,
&rparam, &rparam_count,
&rdata, &rdata_count)) {
DEBUG(1,("Failed to recv NT_TRANSACT_GET_USER_QUOTA\n"));
goto cleanup;
}
if (cli_is_error(cli)) {
ret = False;
goto cleanup;
} else {
ret = True;
}
if (rdata_count == 0) {
*pqt_list = NULL;
return True;
}
if ((mem_ctx=talloc_init("SMB_USER_QUOTA_LIST"))==NULL) {
DEBUG(0,("talloc_init() failed\n"));
return (-1);
}
offset = 1;
for (curdata=rdata,curdata_count=rdata_count;
((curdata)&&(curdata_count>=8)&&(offset>0));
curdata +=offset,curdata_count -= offset) {
ZERO_STRUCT(qt);
if (!parse_user_quota_record(curdata, curdata_count, &offset, &qt)) {
DEBUG(1,("Failed to parse the quota record\n"));
goto cleanup;
}
if ((tmp_list_ent=TALLOC_ZERO_P(mem_ctx,SMB_NTQUOTA_LIST))==NULL) {
DEBUG(0,("TALLOC_ZERO() failed\n"));
talloc_destroy(mem_ctx);
return (-1);
}
if ((tmp_list_ent->quotas=TALLOC_ZERO_P(mem_ctx,SMB_NTQUOTA_STRUCT))==NULL) {
DEBUG(0,("TALLOC_ZERO() failed\n"));
talloc_destroy(mem_ctx);
return (-1);
}
memcpy(tmp_list_ent->quotas,&qt,sizeof(qt));
tmp_list_ent->mem_ctx = mem_ctx;
DLIST_ADD((*pqt_list),tmp_list_ent);
}
SSVAL(params, 2,TRANSACT_GET_USER_QUOTA_LIST_CONTINUE);
while(1) {
if (!cli_send_nt_trans(cli,
NT_TRANSACT_GET_USER_QUOTA,
0,
&setup, 1, 0,
params, 16, 4,
NULL, 0, 2048)) {
DEBUG(1,("Failed to send NT_TRANSACT_GET_USER_QUOTA\n"));
goto cleanup;
}
//.........这里部分代码省略.........
示例5: close_remove_share_mode
static NTSTATUS close_remove_share_mode(files_struct *fsp,
enum file_close_type close_type)
{
connection_struct *conn = fsp->conn;
BOOL delete_file = False;
struct share_mode_lock *lck;
SMB_STRUCT_STAT sbuf;
NTSTATUS status = NT_STATUS_OK;
int ret;
/*
* Lock the share entries, and determine if we should delete
* on close. If so delete whilst the lock is still in effect.
* This prevents race conditions with the file being created. JRA.
*/
lck = get_share_mode_lock(NULL, fsp->dev, fsp->inode, NULL, NULL);
if (lck == NULL) {
DEBUG(0, ("close_remove_share_mode: Could not get share mode "
"lock for file %s\n", fsp->fsp_name));
return NT_STATUS_INVALID_PARAMETER;
}
if (!del_share_mode(lck, fsp)) {
DEBUG(0, ("close_remove_share_mode: Could not delete share "
"entry for file %s\n", fsp->fsp_name));
}
if (fsp->initial_delete_on_close && (lck->delete_token == NULL)) {
BOOL became_user = False;
/* Initial delete on close was set and no one else
* wrote a real delete on close. */
if (current_user.vuid != fsp->vuid) {
become_user(conn, fsp->vuid);
became_user = True;
}
set_delete_on_close_lck(lck, True, ¤t_user.ut);
if (became_user) {
unbecome_user();
}
}
delete_file = lck->delete_on_close;
if (delete_file) {
int i;
/* See if others still have the file open. If this is the
* case, then don't delete. If all opens are POSIX delete now. */
for (i=0; i<lck->num_share_modes; i++) {
struct share_mode_entry *e = &lck->share_modes[i];
if (is_valid_share_mode_entry(e)) {
if (fsp->posix_open && (e->flags & SHARE_MODE_FLAG_POSIX_OPEN)) {
continue;
}
delete_file = False;
break;
}
}
}
/* Notify any deferred opens waiting on this close. */
notify_deferred_opens(lck);
reply_to_oplock_break_requests(fsp);
/*
* NT can set delete_on_close of the last open
* reference to a file.
*/
if (!(close_type == NORMAL_CLOSE || close_type == SHUTDOWN_CLOSE)
|| !delete_file
|| (lck->delete_token == NULL)) {
TALLOC_FREE(lck);
return NT_STATUS_OK;
}
/*
* Ok, we have to delete the file
*/
DEBUG(5,("close_remove_share_mode: file %s. Delete on close was set "
"- deleting file.\n", fsp->fsp_name));
/* Become the user who requested the delete. */
if (!push_sec_ctx()) {
smb_panic("close_remove_share_mode: file %s. failed to push "
"sec_ctx.\n");
}
set_sec_ctx(lck->delete_token->uid,
lck->delete_token->gid,
lck->delete_token->ngroups,
lck->delete_token->groups,
NULL);
/* We can only delete the file if the name we have is still valid and
//.........这里部分代码省略.........
示例6: exit_server_common
//.........这里部分代码省略.........
*/
status = smb1srv_tcon_disconnect_all(xconn);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0,("Server exit (%s)\n",
(reason ? reason : "normal exit")));
DEBUG(0, ("exit_server_common: "
"smb1srv_tcon_disconnect_all() failed (%s) - "
"triggering cleanup\n", nt_errstr(status)));
}
status = smbXsrv_session_logoff_all(xconn);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0,("Server exit (%s)\n",
(reason ? reason : "normal exit")));
DEBUG(0, ("exit_server_common: "
"smbXsrv_session_logoff_all() failed (%s) - "
"triggering cleanup\n", nt_errstr(status)));
}
}
change_to_root_user();
/* 3 second timeout. */
print_notify_send_messages(msg_ctx, 3);
#ifdef USE_DMAPI
/* Destroy Samba DMAPI session only if we are master smbd process */
if (am_parent) {
if (!dmapi_destroy_session()) {
DEBUG(0,("Unable to close Samba DMAPI session\n"));
}
}
#endif
if (am_parent) {
rpc_wkssvc_shutdown();
rpc_dssetup_shutdown();
#ifdef DEVELOPER
rpc_rpcecho_shutdown();
#endif
rpc_netdfs_shutdown();
rpc_initshutdown_shutdown();
rpc_eventlog_shutdown();
rpc_ntsvcs_shutdown();
rpc_svcctl_shutdown();
rpc_spoolss_shutdown();
rpc_srvsvc_shutdown();
rpc_winreg_shutdown();
rpc_netlogon_shutdown();
rpc_samr_shutdown();
rpc_lsarpc_shutdown();
}
/*
* we need to force the order of freeing the following,
* because smbd_msg_ctx is not a talloc child of smbd_server_conn.
*/
if (client != NULL) {
struct smbXsrv_connection *next;
for (; xconn != NULL; xconn = next) {
next = xconn->next;
DLIST_REMOVE(client->connections, xconn);
talloc_free(xconn);
DO_PROFILE_INC(disconnect);
}
TALLOC_FREE(client->sconn);
}
sconn = NULL;
xconn = NULL;
client = NULL;
netlogon_creds_cli_close_global_db();
TALLOC_FREE(global_smbXsrv_client);
smbprofile_dump();
server_messaging_context_free();
server_event_context_free();
TALLOC_FREE(smbd_memcache_ctx);
locking_end();
printing_end();
if (how != SERVER_EXIT_NORMAL) {
smb_panic(reason);
/* Notreached. */
exit(1);
} else {
DEBUG(3,("Server exit (%s)\n",
(reason ? reason : "normal exit")));
if (am_parent) {
pidfile_unlink(lp_pid_directory(), "smbd");
}
gencache_stabilize();
}
exit(0);
}
示例7: smbsrv_push_passthru_fileinfo
//.........这里部分代码省略.........
SSVAL(blob->data, 62, 0); /* padding */
SIVAL(blob->data, 64, st->all_info.out.ea_size);
BLOB_CHECK(smbsrv_blob_append_string(mem_ctx, blob,
st->all_info.out.fname.s,
68, default_str_flags,
STR_UNICODE));
return NT_STATUS_OK;
case RAW_FILEINFO_NAME_INFORMATION:
BLOB_CHECK(smbsrv_blob_grow_data(mem_ctx, blob, 4));
BLOB_CHECK(smbsrv_blob_append_string(mem_ctx, blob,
st->name_info.out.fname.s,
0, default_str_flags,
STR_UNICODE));
return NT_STATUS_OK;
case RAW_FILEINFO_ALT_NAME_INFORMATION:
BLOB_CHECK(smbsrv_blob_grow_data(mem_ctx, blob, 4));
BLOB_CHECK(smbsrv_blob_append_string(mem_ctx, blob,
st->alt_name_info.out.fname.s,
0, default_str_flags,
STR_UNICODE));
return NT_STATUS_OK;
case RAW_FILEINFO_STREAM_INFORMATION:
for (i=0;i<st->stream_info.out.num_streams;i++) {
uint32_t data_size = blob->length;
uint8_t *data;
BLOB_CHECK(smbsrv_blob_grow_data(mem_ctx, blob, data_size + 24));
data = blob->data + data_size;
SBVAL(data, 8, st->stream_info.out.streams[i].size);
SBVAL(data, 16, st->stream_info.out.streams[i].alloc_size);
BLOB_CHECK(smbsrv_blob_append_string(mem_ctx, blob,
st->stream_info.out.streams[i].stream_name.s,
data_size + 4, default_str_flags,
STR_UNICODE));
if (i == st->stream_info.out.num_streams - 1) {
SIVAL(blob->data, data_size, 0);
} else {
BLOB_CHECK(smbsrv_blob_fill_data(mem_ctx, blob, (blob->length+7)&~7));
SIVAL(blob->data, data_size,
blob->length - data_size);
}
}
return NT_STATUS_OK;
case RAW_FILEINFO_SMB2_ALL_EAS:
/* if no eas are returned the backend should
* have returned NO_EAS_ON_FILE or NO_MORE_EAS
*
* so it's a programmer error if num_eas == 0
*/
if (st->all_eas.out.num_eas == 0) {
smb_panic("0 eas for SMB2_ALL_EAS - programmer error in ntvfs backend");
}
list_size = ea_list_size_chained(st->all_eas.out.num_eas,
st->all_eas.out.eas);
BLOB_CHECK(smbsrv_blob_grow_data(mem_ctx, blob, list_size));
ea_put_list_chained(blob->data,
st->all_eas.out.num_eas,
st->all_eas.out.eas);
return NT_STATUS_OK;
case RAW_FILEINFO_SMB2_ALL_INFORMATION:
BLOB_CHECK(smbsrv_blob_grow_data(mem_ctx, blob, 0x64));
push_nttime(blob->data, 0x00, st->all_info2.out.create_time);
push_nttime(blob->data, 0x08, st->all_info2.out.access_time);
push_nttime(blob->data, 0x10, st->all_info2.out.write_time);
push_nttime(blob->data, 0x18, st->all_info2.out.change_time);
SIVAL(blob->data, 0x20, st->all_info2.out.attrib);
SIVAL(blob->data, 0x24, st->all_info2.out.unknown1);
SBVAL(blob->data, 0x28, st->all_info2.out.alloc_size);
SBVAL(blob->data, 0x30, st->all_info2.out.size);
SIVAL(blob->data, 0x38, st->all_info2.out.nlink);
SCVAL(blob->data, 0x3C, st->all_info2.out.delete_pending);
SCVAL(blob->data, 0x3D, st->all_info2.out.directory);
SSVAL(blob->data, 0x3E, 0); /* padding */
SBVAL(blob->data, 0x40, st->all_info2.out.file_id);
SIVAL(blob->data, 0x48, st->all_info2.out.ea_size);
SIVAL(blob->data, 0x4C, st->all_info2.out.access_mask);
SBVAL(blob->data, 0x50, st->all_info2.out.position);
SBVAL(blob->data, 0x58, st->all_info2.out.mode);
BLOB_CHECK(smbsrv_blob_append_string(mem_ctx, blob,
st->all_info2.out.fname.s,
0x60, default_str_flags,
STR_UNICODE));
return NT_STATUS_OK;
default:
return NT_STATUS_INVALID_LEVEL;
}
return NT_STATUS_INVALID_LEVEL;
}
示例8: fstrcpy
connection_struct *make_connection(struct smb_request *req,
NTTIME now,
const char *service_in,
const char *pdev, uint64_t vuid,
NTSTATUS *status)
{
struct smbd_server_connection *sconn = req->sconn;
uid_t euid;
struct user_struct *vuser = NULL;
char *service = NULL;
fstring dev;
int snum = -1;
fstrcpy(dev, pdev);
/* This must ONLY BE CALLED AS ROOT. As it exits this function as
* root. */
if (!non_root_mode() && (euid = geteuid()) != 0) {
DEBUG(0,("make_connection: PANIC ERROR. Called as nonroot "
"(%u)\n", (unsigned int)euid ));
smb_panic("make_connection: PANIC ERROR. Called as nonroot\n");
}
if (conn_num_open(sconn) > 2047) {
*status = NT_STATUS_INSUFF_SERVER_RESOURCES;
return NULL;
}
vuser = get_valid_user_struct(sconn, vuid);
if (!vuser) {
DEBUG(1,("make_connection: refusing to connect with "
"no session setup\n"));
*status = NT_STATUS_ACCESS_DENIED;
return NULL;
}
/* Logic to try and connect to the correct [homes] share, preferably
without too many getpwnam() lookups. This is particulary nasty for
winbind usernames, where the share name isn't the same as unix
username.
The snum of the homes share is stored on the vuser at session setup
time.
*/
if (strequal(service_in,HOMES_NAME)) {
if (vuser->homes_snum == -1) {
DEBUG(2, ("[homes] share not available for "
"this user because it was not found "
"or created at session setup "
"time\n"));
*status = NT_STATUS_BAD_NETWORK_NAME;
return NULL;
}
DEBUG(5, ("making a connection to [homes] service "
"created at session setup time\n"));
return make_connection_smb1(req, now,
vuser->homes_snum,
vuser,
dev, status);
} else if ((vuser->homes_snum != -1)
&& strequal(service_in,
lp_servicename(talloc_tos(), vuser->homes_snum))) {
DEBUG(5, ("making a connection to 'homes' service [%s] "
"created at session setup time\n", service_in));
return make_connection_smb1(req, now,
vuser->homes_snum,
vuser,
dev, status);
}
service = talloc_strdup(talloc_tos(), service_in);
if (!service) {
*status = NT_STATUS_NO_MEMORY;
return NULL;
}
if (!strlower_m(service)) {
DEBUG(2, ("strlower_m %s failed\n", service));
*status = NT_STATUS_INVALID_PARAMETER;
return NULL;
}
snum = find_service(talloc_tos(), service, &service);
if (!service) {
*status = NT_STATUS_NO_MEMORY;
return NULL;
}
if (snum < 0) {
if (strequal(service,"IPC$") ||
(lp_enable_asu_support() && strequal(service,"ADMIN$"))) {
DEBUG(3,("refusing IPC connection to %s\n", service));
*status = NT_STATUS_ACCESS_DENIED;
return NULL;
}
DEBUG(3,("%s (%s) couldn't find service %s\n",
get_remote_machine_name(),
tsocket_address_string(
//.........这里部分代码省略.........
示例9: sys_get_xfs_quota
/****************************************************************************
Abstract out the XFS Quota Manager quota get call.
****************************************************************************/
int sys_get_xfs_quota(const char *path, const char *bdev, enum SMB_QUOTA_TYPE qtype, unid_t id, SMB_DISK_QUOTA *dp)
{
int ret = -1;
uint32 qflags = 0;
SMB_BIG_UINT bsize = (SMB_BIG_UINT)BBSIZE;
struct fs_disk_quota D;
struct fs_quota_stat F;
ZERO_STRUCT(D);
ZERO_STRUCT(F);
if (!bdev||!dp)
smb_panic("sys_get_xfs_quota: called with NULL pointer");
ZERO_STRUCT(*dp);
dp->qtype = qtype;
switch (qtype) {
case SMB_USER_QUOTA_TYPE:
DEBUG(10,("sys_get_xfs_quota: path[%s] bdev[%s] SMB_USER_QUOTA_TYPE uid[%u]\n",
path, bdev, (unsigned)id.uid));
if ((ret=quotactl(QCMD(Q_XGETQUOTA,USRQUOTA), bdev, id.uid, (caddr_t)&D)))
return ret;
break;
#ifdef HAVE_GROUP_QUOTA
case SMB_GROUP_QUOTA_TYPE:
DEBUG(10,("sys_get_xfs_quota: path[%s] bdev[%s] SMB_GROUP_QUOTA_TYPE gid[%u]\n",
path, bdev, (unsigned)id.gid));
if ((ret=quotactl(QCMD(Q_XGETQUOTA,GRPQUOTA), bdev, id.gid, (caddr_t)&D)))
return ret;
break;
#endif /* HAVE_GROUP_QUOTA */
case SMB_USER_FS_QUOTA_TYPE:
DEBUG(10,("sys_get_xfs_quota: path[%s] bdev[%s] SMB_USER_FS_QUOTA_TYPE (uid[%u])\n",
path, bdev, (unsigned)id.uid));
quotactl(QCMD(Q_XGETQSTAT,USRQUOTA), bdev, -1, (caddr_t)&F);
if (F.qs_flags & XFS_QUOTA_UDQ_ENFD) {
qflags |= QUOTAS_DENY_DISK;
}
else if (F.qs_flags & XFS_QUOTA_UDQ_ACCT) {
qflags |= QUOTAS_ENABLED;
}
ret = 0;
break;
#ifdef HAVE_GROUP_QUOTA
case SMB_GROUP_FS_QUOTA_TYPE:
DEBUG(10,("sys_get_xfs_quota: path[%s] bdev[%s] SMB_GROUP_FS_QUOTA_TYPE (gid[%u])\n",
path, bdev, (unsigned)id.gid));
quotactl(QCMD(Q_XGETQSTAT,GRPQUOTA), bdev, -1, (caddr_t)&F);
if (F.qs_flags & XFS_QUOTA_GDQ_ENFD) {
qflags |= QUOTAS_DENY_DISK;
}
else if (F.qs_flags & XFS_QUOTA_GDQ_ACCT) {
qflags |= QUOTAS_ENABLED;
}
ret = 0;
break;
#endif /* HAVE_GROUP_QUOTA */
default:
errno = ENOSYS;
return -1;
}
dp->bsize = bsize;
dp->softlimit = (SMB_BIG_UINT)D.d_blk_softlimit;
dp->hardlimit = (SMB_BIG_UINT)D.d_blk_hardlimit;
dp->ihardlimit = (SMB_BIG_UINT)D.d_ino_hardlimit;
dp->isoftlimit = (SMB_BIG_UINT)D.d_ino_softlimit;
dp->curinodes = (SMB_BIG_UINT)D.d_icount;
dp->curblocks = (SMB_BIG_UINT)D.d_bcount;
dp->qflags = qflags;
return ret;
}
示例10: sys_set_xfs_quota
/****************************************************************************
Abstract out the XFS Quota Manager quota set call.
****************************************************************************/
int sys_set_xfs_quota(const char *path, const char *bdev, enum SMB_QUOTA_TYPE qtype, unid_t id, SMB_DISK_QUOTA *dp)
{
int ret = -1;
uint32 qflags = 0;
SMB_BIG_UINT bsize = (SMB_BIG_UINT)BBSIZE;
struct fs_disk_quota D;
struct fs_quota_stat F;
int q_on = 0;
int q_off = 0;
ZERO_STRUCT(D);
ZERO_STRUCT(F);
if (!bdev||!dp)
smb_panic("sys_set_xfs_quota: called with NULL pointer");
if (bsize == dp->bsize) {
D.d_blk_softlimit = dp->softlimit;
D.d_blk_hardlimit = dp->hardlimit;
D.d_ino_hardlimit = dp->ihardlimit;
D.d_ino_softlimit = dp->isoftlimit;
} else {
D.d_blk_softlimit = (dp->softlimit*dp->bsize)/bsize;
D.d_blk_hardlimit = (dp->hardlimit*dp->bsize)/bsize;
D.d_ino_hardlimit = (dp->ihardlimit*dp->bsize)/bsize;
D.d_ino_softlimit = (dp->isoftlimit*dp->bsize)/bsize;
}
qflags = dp->qflags;
switch (qtype) {
case SMB_USER_QUOTA_TYPE:
DEBUG(10,("sys_set_xfs_quota: path[%s] bdev[%s] SMB_USER_QUOTA_TYPE uid[%u]\n",
path, bdev, (unsigned)id.uid));
D.d_fieldmask |= FS_DQ_LIMIT_MASK;
ret = quotactl(QCMD(Q_XSETQLIM,USRQUOTA), bdev, id.uid, (caddr_t)&D);
break;
#ifdef HAVE_GROUP_QUOTA
case SMB_GROUP_QUOTA_TYPE:
DEBUG(10,("sys_set_xfs_quota: path[%s] bdev[%s] SMB_GROUP_QUOTA_TYPE gid[%u]\n",
path, bdev, (unsigned)id.gid));
D.d_fieldmask |= FS_DQ_LIMIT_MASK;
ret = quotactl(QCMD(Q_XSETQLIM,GRPQUOTA), bdev, id.gid, (caddr_t)&D);
break;
#endif /* HAVE_GROUP_QUOTA */
case SMB_USER_FS_QUOTA_TYPE:
DEBUG(10,("sys_set_xfs_quota: path[%s] bdev[%s] SMB_USER_FS_QUOTA_TYPE (uid[%u])\n",
path, bdev, (unsigned)id.uid));
quotactl(QCMD(Q_XGETQSTAT,USRQUOTA), bdev, -1, (caddr_t)&F);
if (qflags & QUOTAS_DENY_DISK) {
if (!(F.qs_flags & XFS_QUOTA_UDQ_ENFD))
q_on |= XFS_QUOTA_UDQ_ENFD;
if (!(F.qs_flags & XFS_QUOTA_UDQ_ACCT))
q_on |= XFS_QUOTA_UDQ_ACCT;
if (q_on != 0) {
ret = quotactl(QCMD(Q_XQUOTAON,USRQUOTA),bdev, -1, (caddr_t)&q_on);
} else {
ret = 0;
}
} else if (qflags & QUOTAS_ENABLED) {
if (F.qs_flags & XFS_QUOTA_UDQ_ENFD)
q_off |= XFS_QUOTA_UDQ_ENFD;
if (q_off != 0) {
ret = quotactl(QCMD(Q_XQUOTAOFF,USRQUOTA),bdev, -1, (caddr_t)&q_off);
} else {
ret = 0;
}
if (!(F.qs_flags & XFS_QUOTA_UDQ_ACCT))
q_on |= XFS_QUOTA_UDQ_ACCT;
if (q_on != 0) {
ret = quotactl(QCMD(Q_XQUOTAON,USRQUOTA),bdev, -1, (caddr_t)&q_on);
} else {
ret = 0;
}
} else {
#if 0
/* Switch on XFS_QUOTA_UDQ_ACCT didn't work!
* only swittching off XFS_QUOTA_UDQ_ACCT work
*/
if (F.qs_flags & XFS_QUOTA_UDQ_ENFD)
q_off |= XFS_QUOTA_UDQ_ENFD;
if (F.qs_flags & XFS_QUOTA_UDQ_ACCT)
q_off |= XFS_QUOTA_UDQ_ACCT;
if (q_off !=0) {
ret = quotactl(QCMD(Q_XQUOTAOFF,USRQUOTA),bdev, -1, (caddr_t)&q_off);
} else {
ret = 0;
}
//.........这里部分代码省略.........
示例11: SMBC_module_init
/*
* Do some module- and library-wide intializations
*/
static void
SMBC_module_init(void * punused)
{
bool conf_loaded = False;
char *home = NULL;
TALLOC_CTX *frame = talloc_stackframe();
load_case_tables();
setup_logging("libsmbclient", True);
/* Here we would open the smb.conf file if needed ... */
lp_set_in_client(True);
home = getenv("HOME");
if (home) {
char *conf = NULL;
if (asprintf(&conf, "%s/.smb/smb.conf", home) > 0) {
if (lp_load(conf, True, False, False, True)) {
conf_loaded = True;
} else {
DEBUG(5, ("Could not load config file: %s\n",
conf));
}
SAFE_FREE(conf);
}
}
if (!conf_loaded) {
/*
* Well, if that failed, try the get_dyn_CONFIGFILE
* Which points to the standard locn, and if that
* fails, silently ignore it and use the internal
* defaults ...
*/
if (!lp_load(get_dyn_CONFIGFILE(), True, False, False, False)) {
DEBUG(5, ("Could not load config file: %s\n",
get_dyn_CONFIGFILE()));
} else if (home) {
char *conf;
/*
* We loaded the global config file. Now lets
* load user-specific modifications to the
* global config.
*/
if (asprintf(&conf,
"%s/.smb/smb.conf.append",
home) > 0) {
if (!lp_load(conf, True, False, False, False)) {
DEBUG(10,
("Could not append config file: "
"%s\n",
conf));
}
SAFE_FREE(conf);
}
}
}
load_interfaces(); /* Load the list of interfaces ... */
reopen_logs(); /* Get logging working ... */
/*
* Block SIGPIPE (from lib/util_sock.c: write())
* It is not needed and should not stop execution
*/
BlockSignals(True, SIGPIPE);
/* Create the mutex we'll use to protect initialized_ctx_count */
if (SMB_THREAD_CREATE_MUTEX("initialized_ctx_count_mutex",
initialized_ctx_count_mutex) != 0) {
smb_panic("SMBC_module_init: "
"failed to create 'initialized_ctx_count' mutex");
}
TALLOC_FREE(frame);
}
示例12: sys_select
int sys_select(int maxfd, fd_set *readfds, fd_set *writefds, fd_set *errorfds, struct timeval *tval)
{
int ret, saved_errno;
fd_set *readfds2, readfds_buf;
if (initialised != sys_getpid()) {
#ifdef _XBOX
OutputDebugString("SMB -> Todo: sys_select\n");
#elif //_XBOX
pipe(select_pipe);
/*
* These next two lines seem to fix a bug with the Linux
* 2.0.x kernel (and probably other UNIXes as well) where
* the one byte read below can block even though the
* select returned that there is data in the pipe and
* the pipe_written variable was incremented. Thanks to
* HP for finding this one. JRA.
*/
if(set_blocking(select_pipe[0],0)==-1)
smb_panic("select_pipe[0]: O_NONBLOCK failed.\n");
if(set_blocking(select_pipe[1],0)==-1)
smb_panic("select_pipe[1]: O_NONBLOCK failed.\n");
#endif //_XBOX
initialised = sys_getpid();
}
maxfd = MAX(select_pipe[0]+1, maxfd);
/* If readfds is NULL we need to provide our own set. */
if (readfds) {
readfds2 = readfds;
} else {
readfds2 = &readfds_buf;
FD_ZERO(readfds2);
}
#ifndef _XBOX
FD_SET(select_pipe[0], readfds2);
#endif
errno = 0;
ret = select(maxfd,readfds2,writefds,errorfds,tval);
if (ret <= 0) {
FD_ZERO(readfds2);
if (writefds)
FD_ZERO(writefds);
if (errorfds)
FD_ZERO(errorfds);
} else if (FD_ISSET(select_pipe[0], readfds2)) {
char c;
saved_errno = errno;
if (read(select_pipe[0], &c, 1) == 1) {
pipe_read++;
/* Mark Weaver <[email protected]> pointed out a critical
fix to ensure we don't lose signals. We must always
return -1 when the select pipe is set, otherwise if another
fd is also ready (so ret == 2) then we used to eat the
byte in the pipe and lose the signal. JRA.
*/
ret = -1;
#if 0
/* JRA - we can use this to debug the signal messaging... */
DEBUG(0,("select got %u signal\n", (unsigned int)c));
#endif
errno = EINTR;
} else {
FD_CLR(select_pipe[0], readfds2);
ret--;
errno = saved_errno;
}
}
return ret;
}
示例13: token_contains_name
static bool token_contains_name(TALLOC_CTX *mem_ctx,
const char *username,
const char *domain,
const char *sharename,
const struct nt_user_token *token,
const char *name)
{
const char *prefix;
DOM_SID sid;
enum lsa_SidType type;
struct smbd_server_connection *sconn = smbd_server_conn;
if (username != NULL) {
name = talloc_sub_basic(mem_ctx, username, domain, name);
}
if (sharename != NULL) {
name = talloc_string_sub(mem_ctx, name, "%S", sharename);
}
if (name == NULL) {
/* This is too security sensitive, better panic than return a
* result that might be interpreted in a wrong way. */
smb_panic("substitutions failed");
}
/* check to see is we already have a SID */
if ( string_to_sid( &sid, name ) ) {
DEBUG(5,("token_contains_name: Checking for SID [%s] in token\n", name));
return nt_token_check_sid( &sid, token );
}
if (!do_group_checks(&name, &prefix)) {
if (!lookup_name_smbconf(mem_ctx, name, LOOKUP_NAME_ALL,
NULL, NULL, &sid, &type)) {
DEBUG(5, ("lookup_name %s failed\n", name));
return False;
}
if (type != SID_NAME_USER) {
DEBUG(5, ("%s is a %s, expected a user\n",
name, sid_type_lookup(type)));
return False;
}
return nt_token_check_sid(&sid, token);
}
for (/* initialized above */ ; *prefix != '\0'; prefix++) {
if (*prefix == '+') {
if (!lookup_name_smbconf(mem_ctx, name,
LOOKUP_NAME_ALL|LOOKUP_NAME_GROUP,
NULL, NULL, &sid, &type)) {
DEBUG(5, ("lookup_name %s failed\n", name));
return False;
}
if ((type != SID_NAME_DOM_GRP) &&
(type != SID_NAME_ALIAS) &&
(type != SID_NAME_WKN_GRP)) {
DEBUG(5, ("%s is a %s, expected a group\n",
name, sid_type_lookup(type)));
return False;
}
if (nt_token_check_sid(&sid, token)) {
return True;
}
continue;
}
if (*prefix == '&') {
if (username) {
if (user_in_netgroup(sconn, username, name)) {
return True;
}
}
continue;
}
smb_panic("got invalid prefix from do_groups_check");
}
return False;
}
示例14: sys_set_quota
int sys_set_quota(const char *path, enum SMB_QUOTA_TYPE qtype, unid_t id, SMB_DISK_QUOTA *dp)
{
int ret = -1;
int i;
BOOL ready = False;
char *mntpath = NULL;
char *bdev = NULL;
char *fs = NULL;
/* find the block device file */
if (!path||!dp)
smb_panic("get_smb_quota: called with NULL pointer");
if (command_set_quota(path, qtype, id, dp)==0) {
return 0;
} else if (errno != ENOSYS) {
return -1;
}
if ((ret=sys_path_to_bdev(path,&mntpath,&bdev,&fs))!=0) {
DEBUG(0,("sys_path_to_bdev() failed for path [%s]!\n",path));
return ret;
}
errno = 0;
DEBUG(10,("sys_set_quota() uid(%u, %u)\n", (unsigned)getuid(), (unsigned)geteuid()));
for (i=0;(fs && sys_quota_backends[i].name && sys_quota_backends[i].set_quota);i++) {
if (strcmp(fs,sys_quota_backends[i].name)==0) {
ret = sys_quota_backends[i].set_quota(mntpath, bdev, qtype, id, dp);
if (ret!=0) {
DEBUG(3,("sys_set_%s_quota() failed for mntpath[%s] bdev[%s] qtype[%d] id[%d]: %s.\n",
fs,mntpath,bdev,qtype,(qtype==SMB_GROUP_QUOTA_TYPE?id.gid:id.uid),strerror(errno)));
} else {
DEBUG(10,("sys_set_%s_quota() called for mntpath[%s] bdev[%s] qtype[%d] id[%d].\n",
fs,mntpath,bdev,qtype,(qtype==SMB_GROUP_QUOTA_TYPE?id.gid:id.uid)));
}
ready = True;
break;
}
}
if (!ready) {
/* use the default vfs quota functions */
ret=sys_set_vfs_quota(mntpath, bdev, qtype, id, dp);
if (ret!=0) {
DEBUG(3,("sys_set_%s_quota() failed for mntpath[%s] bdev[%s] qtype[%d] id[%d]: %s.\n",
"vfs",mntpath,bdev,qtype,(qtype==SMB_GROUP_QUOTA_TYPE?id.gid:id.uid),strerror(errno)));
} else {
DEBUG(10,("sys_set_%s_quota() called for mntpath[%s] bdev[%s] qtype[%d] id[%d].\n",
"vfs",mntpath,bdev,qtype,(qtype==SMB_GROUP_QUOTA_TYPE?id.gid:id.uid)));
}
}
SAFE_FREE(mntpath);
SAFE_FREE(bdev);
SAFE_FREE(fs);
if ((ret!=0)&& (errno == EDQUOT)) {
DEBUG(10,("sys_set_quota() warning over quota!\n"));
return 0;
}
return ret;
}
示例15: close_directory
static NTSTATUS close_directory(files_struct *fsp, enum file_close_type close_type)
{
struct share_mode_lock *lck = 0;
BOOL delete_dir = False;
NTSTATUS status = NT_STATUS_OK;
/*
* NT can set delete_on_close of the last open
* reference to a directory also.
*/
lck = get_share_mode_lock(NULL, fsp->dev, fsp->inode, NULL, NULL);
if (lck == NULL) {
DEBUG(0, ("close_directory: Could not get share mode lock for %s\n", fsp->fsp_name));
return NT_STATUS_INVALID_PARAMETER;
}
if (!del_share_mode(lck, fsp)) {
DEBUG(0, ("close_directory: Could not delete share entry for %s\n", fsp->fsp_name));
}
if (fsp->initial_delete_on_close) {
BOOL became_user = False;
/* Initial delete on close was set - for
* directories we don't care if anyone else
* wrote a real delete on close. */
if (current_user.vuid != fsp->vuid) {
become_user(fsp->conn, fsp->vuid);
became_user = True;
}
send_stat_cache_delete_message(fsp->fsp_name);
set_delete_on_close_lck(lck, True, ¤t_user.ut);
if (became_user) {
unbecome_user();
}
}
delete_dir = lck->delete_on_close;
if (delete_dir) {
int i;
/* See if others still have the dir open. If this is the
* case, then don't delete. If all opens are POSIX delete now. */
for (i=0; i<lck->num_share_modes; i++) {
struct share_mode_entry *e = &lck->share_modes[i];
if (is_valid_share_mode_entry(e)) {
if (fsp->posix_open && (e->flags & SHARE_MODE_FLAG_POSIX_OPEN)) {
continue;
}
delete_dir = False;
break;
}
}
}
if ((close_type == NORMAL_CLOSE || close_type == SHUTDOWN_CLOSE) &&
delete_dir &&
lck->delete_token) {
/* Become the user who requested the delete. */
if (!push_sec_ctx()) {
smb_panic("close_directory: failed to push sec_ctx.\n");
}
set_sec_ctx(lck->delete_token->uid,
lck->delete_token->gid,
lck->delete_token->ngroups,
lck->delete_token->groups,
NULL);
TALLOC_FREE(lck);
status = rmdir_internals(fsp->conn, fsp->fsp_name);
DEBUG(5,("close_directory: %s. Delete on close was set - "
"deleting directory returned %s.\n",
fsp->fsp_name, nt_errstr(status)));
/* unbecome user. */
pop_sec_ctx();
/*
* Ensure we remove any change notify requests that would
* now fail as the directory has been deleted.
*/
if(NT_STATUS_IS_OK(status)) {
remove_pending_change_notify_requests_by_fid(fsp, NT_STATUS_DELETE_PENDING);
}
} else {
TALLOC_FREE(lck);
remove_pending_change_notify_requests_by_fid(
fsp, NT_STATUS_OK);
}
/*
//.........这里部分代码省略.........