本文整理汇总了Golang中github.com/crunchydata/crunchy-postgresql-manager/types.Container类的典型用法代码示例。如果您正苦于以下问题:Golang Container类的具体用法?Golang Container怎么用?Golang Container使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Container类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: GetAllContainers
// GetAllContainers returns a list of all containers
func GetAllContainers(dbConn *sql.DB) ([]types.Container, error) {
var rows *sql.Rows
var err error
queryStr := fmt.Sprintf("select c.id, c.name, c.clusterid, c.role, c.image, to_char(c.createdt, 'MM-DD-YYYY HH24:MI:SS'), p.id, p.name from project p, container c where c.projectid = p.id order by c.name")
logit.Info.Println("admindb:GetAllContainers:" + queryStr)
rows, err = dbConn.Query(queryStr)
if err != nil {
return nil, err
}
defer rows.Close()
var clustername string
containers := make([]types.Container, 0)
for rows.Next() {
container := types.Container{}
if err = rows.Scan(&container.ID, &container.Name, &container.ClusterID, &container.Role, &container.Image, &container.CreateDate, &container.ProjectID, &container.ProjectName); err != nil {
return nil, err
}
logit.Info.Println("cluster id is [" + container.ClusterID + "]")
if container.ClusterID != "-1" {
clustername, err = GetClusterName(dbConn, container.ClusterID)
if err != nil {
logit.Info.Println("admindb:GetAllContainers:error " + err.Error())
return nil, err
}
container.ClusterName = clustername
}
containers = append(containers, container)
}
if err = rows.Err(); err != nil {
return nil, err
}
return containers, nil
}
示例2: GetContainerPgpool
// GetContainerPgpool find the pgpool container in a cluster
func GetContainerPgpool(dbConn *sql.DB, clusterid string) (types.Container, error) {
//logit.Info.Println("admindb:GetContainerMaster:called")
container := types.Container{}
queryStr := fmt.Sprintf("select c.id, c.name, c.clusterid, c.role, c.image, to_char(c.createdt, 'MM-DD-YYYY HH24:MI:SS'), p.id, p.name from project p, container c where c.role = 'pgpool' and c.clusterid = %s and c.projectid = p.id", clusterid)
logit.Info.Println("admindb:GetContainerPgpool:" + queryStr)
err := dbConn.QueryRow(queryStr).Scan(&container.ID, &container.Name, &container.ClusterID, &container.Role, &container.Image, &container.CreateDate, &container.ProjectID, &container.ProjectName)
switch {
case err == sql.ErrNoRows:
logit.Info.Println("admindb:GetContainerPgpool: no pgpool container with that clusterid " + clusterid)
return container, err
case err != nil:
return container, err
}
if container.ClusterID != "-1" {
var clustername string
clustername, err = GetClusterName(dbConn, container.ClusterID)
if err != nil {
logit.Info.Println("admindb:GetContainerPgPool:error " + err.Error())
return container, err
}
container.ClusterName = clustername
}
return container, nil
}
示例3: GetAllContainersNotInCluster
// GetAllContainersNotInCluster is used to fetch all nodes that are eligible to be added into a cluster
func GetAllContainersNotInCluster(dbConn *sql.DB) ([]types.Container, error) {
var rows *sql.Rows
var err error
queryStr := fmt.Sprintf("select c.id, c.name, c.clusterid, c.role, c.image, to_char(c.createdt, 'MM-DD-YYYY HH24:MI:SS'), p.id, p.name, l.name from project p, container c left join cluster l on c.clusterid = l.id where c.role != 'standalone' and c.clusterid = -1 and c.projectid = p.id order by c.name")
logit.Info.Println("admindb:GetAllContainersNotInCluster:" + queryStr)
rows, err = dbConn.Query(queryStr)
if err != nil {
return nil, err
}
defer rows.Close()
containers := make([]types.Container, 0)
for rows.Next() {
container := types.Container{}
if err = rows.Scan(&container.ID, &container.Name, &container.ClusterID, &container.Role, &container.Image, &container.CreateDate, &container.ProjectID, &container.ProjectName, &container.ClusterName); err != nil {
return nil, err
}
container.ClusterName = container.ClusterID
containers = append(containers, container)
}
if err = rows.Err(); err != nil {
return nil, err
}
return containers, nil
}
示例4: AutoCluster
// AutoCluster creates a new cluster
func AutoCluster(w rest.ResponseWriter, r *rest.Request) {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), 400)
return
}
defer dbConn.Close()
logit.Info.Println("AUTO CLUSTER PROFILE starts")
params := AutoClusterInfo{}
err = r.DecodeJsonPayload(¶ms)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = secimpl.Authorize(dbConn, params.Token, "perm-cluster")
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
if params.Name == "" {
logit.Error.Println("AutoCluster: error in Name")
rest.Error(w, "cluster name required", http.StatusBadRequest)
return
}
if params.ClusterType == "" {
logit.Error.Println("AutoCluster: error in ClusterType")
rest.Error(w, "ClusterType name required", http.StatusBadRequest)
return
}
if params.ProjectID == "" {
logit.Error.Println("AutoCluster: error in ProjectID")
rest.Error(w, "ProjectID name required", http.StatusBadRequest)
return
}
if params.ClusterProfile == "" {
logit.Error.Println("AutoCluster: error in ClusterProfile")
rest.Error(w, "ClusterProfile name required", http.StatusBadRequest)
return
}
logit.Info.Println("AutoCluster: Name=" + params.Name + " ClusterType=" + params.ClusterType + " Profile=" + params.ClusterProfile + " ProjectID=" + params.ProjectID)
//create cluster definition
dbcluster := types.Cluster{}
dbcluster.ID = ""
dbcluster.ProjectID = params.ProjectID
dbcluster.Name = util.CleanName(params.Name)
dbcluster.ClusterType = params.ClusterType
dbcluster.Status = "uninitialized"
dbcluster.Containers = make(map[string]string)
var ival int
ival, err = admindb.InsertCluster(dbConn, dbcluster)
clusterID := strconv.Itoa(ival)
dbcluster.ID = clusterID
//logit.Info.Println(clusterID)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, "Insert Cluster error:"+err.Error(), http.StatusBadRequest)
return
}
//lookup profile
profile, err2 := getClusterProfileInfo(dbConn, params.ClusterProfile)
if err2 != nil {
logit.Error.Println(err2.Error())
rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
return
}
//var masterServer types.Server
//var chosenServers []types.Server
if profile.Algo == "round-robin" {
//masterServer, chosenServers, err2 = roundRobin(dbConn, profile)
} else {
logit.Error.Println("AutoCluster: error-unsupported algorithm request")
rest.Error(w, "AutoCluster error: unsupported algorithm", http.StatusBadRequest)
return
}
//create master container
dockermaster := swarmapi.DockerRunRequest{}
dockermaster.Image = "cpm-node"
dockermaster.ContainerName = params.Name + "-master"
dockermaster.ProjectID = params.ProjectID
dockermaster.Standalone = "false"
dockermaster.Profile = profile.MasterProfile
if err != nil {
logit.Error.Println("AutoCluster: error-create master node " + err.Error())
rest.Error(w, "AutoCluster error"+err.Error(), http.StatusBadRequest)
return
}
//.........这里部分代码省略.........
示例5: EventJoinCluster
// TODO
func EventJoinCluster(w rest.ResponseWriter, r *rest.Request) {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), 400)
return
}
defer dbConn.Close()
err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-cluster")
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
IDList := r.PathParam("IDList")
if IDList == "" {
logit.Error.Println("IDList required")
rest.Error(w, "IDList required", http.StatusBadRequest)
return
} else {
logit.Info.Println("EventJoinCluster: IDList=[" + IDList + "]")
}
MasterID := r.PathParam("MasterID")
if MasterID == "" {
logit.Error.Println("MasterID required")
rest.Error(w, "MasterID required", http.StatusBadRequest)
return
} else {
logit.Info.Println("EventJoinCluster: MasterID=[" + MasterID + "]")
}
ClusterID := r.PathParam("ClusterID")
if ClusterID == "" {
logit.Error.Println("ClusterID required")
rest.Error(w, "node ClusterID required", http.StatusBadRequest)
return
} else {
logit.Info.Println("EventJoinCluster: ClusterID=[" + ClusterID + "]")
}
var idList = strings.Split(IDList, "_")
i := 0
pgpoolCount := 0
origDBNode := types.Container{}
for i = range idList {
if idList[i] != "" {
logit.Info.Println("EventJoinCluster: idList[" + strconv.Itoa(i) + "]=" + idList[i])
origDBNode, err = admindb.GetContainer(dbConn, idList[i])
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
//update the node to be in the cluster
origDBNode.ClusterID = ClusterID
if origDBNode.Image == "cpm-node" {
origDBNode.Role = STANDBY
} else {
origDBNode.Role = "pgpool"
pgpoolCount++
}
if pgpoolCount > 1 {
logit.Error.Println("EventJoinCluster: more than 1 pgpool is in the cluster")
rest.Error(w, "only 1 pgpool is allowed in a cluster", http.StatusBadRequest)
return
}
err = admindb.UpdateContainer(dbConn, origDBNode)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
i++
}
//we use the -1 value to indicate that we are only adding
//to an existing cluster, the UI doesn't know who the master
//is at this point
if MasterID != "-1" {
//update the master node
origDBNode, err = admindb.GetContainer(dbConn, MasterID)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
origDBNode.ClusterID = ClusterID
origDBNode.Role = "master"
err = admindb.UpdateContainer(dbConn, origDBNode)
if err != nil {
logit.Error.Println(err.Error())
//.........这里部分代码省略.........
示例6: AdminFailover
// AdminFailover causes a cluster failorver to be performed for a given cluster
func AdminFailover(w rest.ResponseWriter, r *rest.Request) {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), 400)
return
}
defer dbConn.Close()
err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-cluster")
if err != nil {
logit.Error.Println("authorize error " + err.Error())
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
ID := r.PathParam("ID")
if ID == "" {
logit.Error.Println("node ID required error")
rest.Error(w, "node ID required", http.StatusBadRequest)
return
}
//dbNode is the standby node we are going to fail over and
//make the new master in the cluster
var dbNode types.Container
dbNode, err = admindb.GetContainer(dbConn, ID)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
cluster, err := admindb.GetCluster(dbConn, dbNode.ClusterID)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
var failoverResp cpmcontainerapi.FailoverResponse
failoverResp, err = cpmcontainerapi.FailoverClient(dbNode.Name)
if err != nil {
logit.Error.Println("fail-over error " + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
logit.Info.Println("AdminFailover: fail-over output " + failoverResp.Output)
//update the old master to standalone role
oldMaster := types.Container{}
oldMaster, err = admindb.GetContainerMaster(dbConn, dbNode.ClusterID)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
oldMaster.Role = "standalone"
oldMaster.ClusterID = "-1"
err = admindb.UpdateContainer(dbConn, oldMaster)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
//update the failover node to master role
dbNode.Role = "master"
err = admindb.UpdateContainer(dbConn, dbNode)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
//stop pg on the old master
//params.IPAddress1 = oldMaster.IPAddress
var stopPGResp cpmcontainerapi.StopPGResponse
stopPGResp, err = cpmcontainerapi.StopPGClient(oldMaster.Name)
if err != nil {
logit.Error.Println(err.Error() + stopPGResp.Output)
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
err = configureCluster("SM", dbConn, cluster, false)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.WriteHeader(http.StatusOK)
status := types.SimpleStatus{}
status.Status = "OK"
w.WriteJson(&status)
return
}
示例7: ScaleUpCluster
// ScaleUpCluster increases the count of standby containers in a cluster
func ScaleUpCluster(w rest.ResponseWriter, r *rest.Request) {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), 400)
return
}
defer dbConn.Close()
err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
ID := r.PathParam("ID")
cluster, err := admindb.GetCluster(dbConn, ID)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
var containers []types.Container
containers, err = admindb.GetAllContainersForCluster(dbConn, ID)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
//determine number of standby nodes currently
standbyCnt := 0
for i := range containers {
if containers[i].Role == STANDBY {
standbyCnt++
}
}
//logit.Info.Printf("standbyCnt ends at %d\n", standbyCnt)
//provision new container
params := new(swarmapi.DockerRunRequest)
params.Image = "cpm-node"
//TODO make the server choice smart
params.ProjectID = cluster.ProjectID
params.ContainerName = cluster.Name + "-" + STANDBY + "-" + fmt.Sprintf("%d", standbyCnt)
params.Standalone = "false"
var standby = true
params.Profile = "LG"
//logit.Info.Printf("here with ProjectID %s\n", cluster.ProjectID)
_, err = provisionImpl(dbConn, params, standby)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
err = provisionImplInit(dbConn, params, false)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
//need to update the new container's ClusterID
var node types.Container
node, err = admindb.GetContainerByName(dbConn, params.ContainerName)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, "error"+err.Error(), http.StatusBadRequest)
return
}
node.ClusterID = cluster.ID
node.Role = STANDBY
err = admindb.UpdateContainer(dbConn, node)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, "error"+err.Error(), http.StatusBadRequest)
return
}
err = configureCluster(params.Profile, dbConn, cluster, false)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.WriteHeader(http.StatusOK)
status := types.SimpleStatus{}
status.Status = "OK"
w.WriteJson(&status)
//.........这里部分代码省略.........
示例8: provisionImpl
func provisionImpl(dbConn *sql.DB, params *swarmapi.DockerRunRequest, standby bool) (string, error) {
logit.Info.Println("PROFILE: provisionImpl starts 1")
var errorStr string
//make sure the container name is not already taken
_, err := admindb.GetContainerByName(dbConn, params.ContainerName)
if err != nil {
if err != sql.ErrNoRows {
return "", err
}
} else {
errorStr = "container name " + params.ContainerName + " already used can't provision"
logit.Error.Println(errorStr)
return "", errors.New(errorStr)
}
//get the pg data path
var pgdatapath types.Setting
pgdatapath, err = admindb.GetSetting(dbConn, "PG-DATA-PATH")
if err != nil {
logit.Error.Println(err.Error())
return "", err
}
var infoResponse swarmapi.DockerInfoResponse
infoResponse, err = swarmapi.DockerInfo()
servers := make([]types.Server, len(infoResponse.Output))
i := 0
for i = range infoResponse.Output {
servers[i].ID = infoResponse.Output[i]
servers[i].Name = infoResponse.Output[i]
servers[i].IPAddress = infoResponse.Output[i]
i++
}
//for database nodes, on the target server, we need to allocate
//a disk volume on all CPM servers for the /pgdata container volume to work with
//this causes a volume to be created with the directory
//named the same as the container name
params.PGDataPath = pgdatapath.Value + "/" + params.ContainerName
logit.Info.Println("PROFILE provisionImpl 2 about to provision volume " + params.PGDataPath)
if params.Image != "cpm-pgpool" {
preq := &cpmserverapi.DiskProvisionRequest{}
preq.Path = params.PGDataPath
var response cpmserverapi.DiskProvisionResponse
for _, each := range servers {
logit.Info.Println("Provision: provisionvolume on server " + each.Name)
response, err = cpmserverapi.DiskProvisionClient(each.Name, preq)
if err != nil {
logit.Info.Println("Provision: provisionvolume error" + err.Error())
logit.Error.Println(err.Error())
return "", err
}
logit.Info.Println("Provision: provisionvolume call response=" + response.Status)
}
}
logit.Info.Println("PROFILE provisionImpl 3 provision volume completed")
//run docker run to create the container
params.CPU, params.MEM, err = getDockerResourceSettings(dbConn, params.Profile)
if err != nil {
logit.Error.Println(err.Error())
return "", err
}
//inspect and remove any existing container
logit.Info.Println("PROFILE provisionImpl inspect 4")
inspectReq := &swarmapi.DockerInspectRequest{}
inspectReq.ContainerName = params.ContainerName
var inspectResponse swarmapi.DockerInspectResponse
inspectResponse, err = swarmapi.DockerInspect(inspectReq)
if err != nil {
logit.Error.Println(err.Error())
return "", err
}
if inspectResponse.RunningState != "not-found" {
logit.Info.Println("PROFILE provisionImpl remove existing container 4a")
rreq := &swarmapi.DockerRemoveRequest{}
rreq.ContainerName = params.ContainerName
_, err = swarmapi.DockerRemove(rreq)
if err != nil {
logit.Error.Println(err.Error())
return "", err
}
}
//pass any restore env vars to the new container
if params.RestoreJob != "" {
if params.EnvVars == nil {
//logit.Info.Println("making envvars map")
params.EnvVars = make(map[string]string)
}
params.EnvVars["RestoreJob"] = params.RestoreJob
params.EnvVars["RestoreRemotePath"] = params.RestoreRemotePath
params.EnvVars["RestoreRemoteHost"] = params.RestoreRemoteHost
params.EnvVars["RestoreRemoteUser"] = params.RestoreRemoteUser
params.EnvVars["RestoreDbUser"] = params.RestoreDbUser
//.........这里部分代码省略.........