本文整理汇总了Golang中goshawkdb/io/server.SegToBytes函数的典型用法代码示例。如果您正苦于以下问题:Golang SegToBytes函数的具体用法?Golang SegToBytes怎么用?Golang SegToBytes使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了SegToBytes函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: newTwoBTxnVotesSender
func newTwoBTxnVotesSender(outcome *msgs.Outcome, txnId *common.TxnId, submitter common.RMId, recipients ...common.RMId) *twoBTxnVotesSender {
submitterSeg := capn.NewBuffer(nil)
submitterMsg := msgs.NewRootMessage(submitterSeg)
submitterMsg.SetSubmissionOutcome(*outcome)
if outcome.Which() == msgs.OUTCOME_ABORT {
abort := outcome.Abort()
abort.SetResubmit() // nuke out the updates as proposers don't need them.
}
seg := capn.NewBuffer(nil)
msg := msgs.NewRootMessage(seg)
twoB := msgs.NewTwoBTxnVotes(seg)
msg.SetTwoBTxnVotes(twoB)
twoB.SetOutcome(*outcome)
server.Log(txnId, "Sending 2B to", recipients)
return &twoBTxnVotesSender{
msg: server.SegToBytes(seg),
recipients: recipients,
submitterMsg: server.SegToBytes(submitterSeg),
submitter: submitter,
}
}
示例2: start
func (cr *connectionRun) start() (bool, error) {
log.Printf("Connection established to %v (%v)\n", cr.remoteHost, cr.remoteRMId)
seg := capn.NewBuffer(nil)
message := msgs.NewRootMessage(seg)
message.SetHeartbeat()
cr.beatBytes = server.SegToBytes(seg)
if cr.isServer {
cr.connectionManager.ServerEstablished(cr.Connection)
}
if cr.isClient {
topology, servers := cr.connectionManager.ClientEstablished(cr.ConnectionNumber, cr.Connection)
cr.connectionManager.AddSender(cr.Connection)
cr.submitter = client.NewClientTxnSubmitter(cr.connectionManager.RMId, cr.connectionManager.BootCount, topology, cr.connectionManager)
cr.submitter.TopologyChange(nil, servers)
}
cr.mustSendBeat = true
cr.missingBeats = 0
cr.beater = newConnectionBeater(cr.Connection)
go cr.beater.beat()
cr.reader = newConnectionReader(cr.Connection)
go cr.reader.read()
return false, nil
}
示例3: handleMsgFromPeer
func (cr *connectionRun) handleMsgFromPeer(msg *msgs.Message) error {
if cr.currentState != cr {
// probably just draining the queue from the reader after a restart
return nil
}
cr.missingBeats = 0
switch which := msg.Which(); which {
case msgs.MESSAGE_HEARTBEAT:
// do nothing
case msgs.MESSAGE_CLIENTTXNSUBMISSION:
ctxn := msg.ClientTxnSubmission()
origTxnId := common.MakeTxnId(ctxn.Id())
cr.submitter.SubmitClientTransaction(&ctxn, func(clientOutcome *msgs.ClientTxnOutcome, err error) {
switch {
case err != nil:
cr.clientTxnError(&ctxn, err, origTxnId)
case clientOutcome == nil: // shutdown
return
default:
seg := capn.NewBuffer(nil)
msg := msgs.NewRootMessage(seg)
msg.SetClientTxnOutcome(*clientOutcome)
cr.sendMessage(server.SegToBytes(msg.Segment))
}
})
default:
cr.connectionManager.Dispatchers.DispatchMessage(cr.remoteRMId, which, msg)
}
return nil
}
示例4: maybeWriteToDisk
func (palc *proposerAwaitLocallyComplete) maybeWriteToDisk() {
if !(palc.currentState == palc && palc.callbackInvoked && palc.allAcceptorsAgreed) {
return
}
stateSeg := capn.NewBuffer(nil)
state := msgs.NewRootProposerState(stateSeg)
acceptorsCap := stateSeg.NewUInt32List(len(palc.acceptors))
state.SetAcceptors(acceptorsCap)
for idx, rmId := range palc.acceptors {
acceptorsCap.Set(idx, uint32(rmId))
}
data := server.SegToBytes(stateSeg)
future := palc.proposerManager.Disk.ReadWriteTransaction(false, func(rwtxn *mdbs.RWTxn) (interface{}, error) {
return nil, rwtxn.Put(db.DB.Proposers, palc.txnId[:], data, 0)
})
go func() {
if _, err := future.ResultError(); err != nil {
log.Printf("Error: %v when writing proposer to disk: %v\n", palc.txnId, err)
return
}
palc.proposerManager.Exe.Enqueue(palc.writeDone)
}()
}
示例5: start
func (awtd *acceptorWriteToDisk) start() {
outcome := awtd.outcome
outcomeCap := (*msgs.Outcome)(outcome)
awtd.sendToAll = awtd.sendToAll || outcomeCap.Which() == msgs.OUTCOME_COMMIT
sendToAll := awtd.sendToAll
stateSeg := capn.NewBuffer(nil)
state := msgs.NewRootAcceptorState(stateSeg)
state.SetTxn(*awtd.ballotAccumulator.Txn)
state.SetOutcome(*outcomeCap)
state.SetSendToAll(awtd.sendToAll)
state.SetInstances(awtd.ballotAccumulator.AddInstancesToSeg(stateSeg))
data := server.SegToBytes(stateSeg)
// to ensure correct order of writes, schedule the write from
// the current go-routine...
server.Log(awtd.txnId, "Writing 2B to disk...")
future := awtd.acceptorManager.Disk.ReadWriteTransaction(false, func(rwtxn *mdbs.RWTxn) (interface{}, error) {
return nil, rwtxn.Put(db.DB.BallotOutcomes, awtd.txnId[:], data, 0)
})
go func() {
// ... but process the result in a new go-routine to avoid blocking the executor.
if _, err := future.ResultError(); err != nil {
log.Printf("Error: %v Acceptor Write error: %v", awtd.txnId, err)
return
}
server.Log(awtd.txnId, "Writing 2B to disk...done.")
awtd.acceptorManager.Exe.Enqueue(func() { awtd.writeDone(outcome, sendToAll) })
}()
}
示例6: OneATxnVotesReceived
func (am *AcceptorManager) OneATxnVotesReceived(sender common.RMId, txnId *common.TxnId, oneATxnVotes *msgs.OneATxnVotes) {
instanceRMId := common.RMId(oneATxnVotes.RmId())
server.Log(txnId, "1A received from", sender, "; instance:", instanceRMId)
instId := instanceId([instanceIdLen]byte{})
instIdSlice := instId[:]
copy(instIdSlice, txnId[:])
binary.BigEndian.PutUint32(instIdSlice[common.KeyLen:], uint32(instanceRMId))
replySeg := capn.NewBuffer(nil)
msg := msgs.NewRootMessage(replySeg)
oneBTxnVotes := msgs.NewOneBTxnVotes(replySeg)
msg.SetOneBTxnVotes(oneBTxnVotes)
oneBTxnVotes.SetRmId(oneATxnVotes.RmId())
oneBTxnVotes.SetTxnId(oneATxnVotes.TxnId())
proposals := oneATxnVotes.Proposals()
promises := msgs.NewTxnVotePromiseList(replySeg, proposals.Len())
oneBTxnVotes.SetPromises(promises)
for idx, l := 0, proposals.Len(); idx < l; idx++ {
proposal := proposals.At(idx)
vUUId := common.MakeVarUUId(proposal.VarId())
copy(instIdSlice[common.KeyLen+4:], vUUId[:])
promise := promises.At(idx)
promise.SetVarId(vUUId[:])
am.ensureInstance(txnId, &instId, vUUId).OneATxnVotesReceived(&proposal, &promise)
}
NewOneShotSender(server.SegToBytes(replySeg), am.ConnectionManager, sender)
}
示例7: MakeTxnSubmissionAbortMsg
func MakeTxnSubmissionAbortMsg(txnId *common.TxnId) []byte {
seg := capn.NewBuffer(nil)
msg := msgs.NewRootMessage(seg)
tsa := msgs.NewTxnSubmissionAbort(seg)
msg.SetSubmissionAbort(tsa)
tsa.SetTxnId(txnId[:])
return server.SegToBytes(seg)
}
示例8: MakeTxnSubmissionCompleteMsg
func MakeTxnSubmissionCompleteMsg(txnId *common.TxnId) []byte {
seg := capn.NewBuffer(nil)
msg := msgs.NewRootMessage(seg)
tsc := msgs.NewTxnSubmissionComplete(seg)
msg.SetSubmissionComplete(tsc)
tsc.SetTxnId(txnId[:])
return server.SegToBytes(seg)
}
示例9: MakeTxnLocallyCompleteMsg
func MakeTxnLocallyCompleteMsg(txnId *common.TxnId) []byte {
seg := capn.NewBuffer(nil)
msg := msgs.NewRootMessage(seg)
tlc := msgs.NewTxnLocallyComplete(seg)
msg.SetTxnLocallyComplete(tlc)
tlc.SetTxnId(txnId[:])
return server.SegToBytes(seg)
}
示例10: TxnToRootBytes
func TxnToRootBytes(txn *msgs.Txn) []byte {
seg := capn.NewBuffer(nil)
txnCap := msgs.NewRootTxn(seg)
txnCap.SetId(txn.Id())
txnCap.SetRetry(txn.Retry())
txnCap.SetSubmitter(txn.Submitter())
txnCap.SetSubmitterBootCount(txn.SubmitterBootCount())
txnCap.SetActions(txn.Actions())
txnCap.SetAllocations(txn.Allocations())
txnCap.SetFInc(txn.FInc())
txnCap.SetTopologyVersion(txn.TopologyVersion())
return server.SegToBytes(seg)
}
示例11: deletionDone
func (adfd *acceptorDeleteFromDisk) deletionDone() {
if adfd.currentState == adfd {
adfd.nextState(nil)
adfd.acceptorManager.AcceptorFinished(adfd.txnId)
seg := capn.NewBuffer(nil)
msg := msgs.NewRootMessage(seg)
tgc := msgs.NewTxnGloballyComplete(seg)
msg.SetTxnGloballyComplete(tgc)
tgc.SetTxnId(adfd.txnId[:])
server.Log(adfd.txnId, "Sending TGC to", adfd.tgcRecipients)
NewOneShotSender(server.SegToBytes(seg), adfd.acceptorManager.ConnectionManager, adfd.tgcRecipients...)
}
}
示例12: SubmitTransaction
func (sts *SimpleTxnSubmitter) SubmitTransaction(txnCap *msgs.Txn, activeRMs []common.RMId, continuation TxnCompletionConsumer, delay time.Duration) {
seg := capn.NewBuffer(nil)
msg := msgs.NewRootMessage(seg)
msg.SetTxnSubmission(*txnCap)
txnId := common.MakeTxnId(txnCap.Id())
server.Log(txnId, "Submitting txn")
txnSender := paxos.NewRepeatingSender(server.SegToBytes(seg), activeRMs...)
if delay == 0 {
sts.connectionManager.AddSender(txnSender)
} else {
go func() {
// fmt.Printf("%v ", delay)
time.Sleep(delay)
sts.connectionManager.AddSender(txnSender)
}()
}
acceptors := paxos.GetAcceptorsFromTxn(txnCap)
shutdownFun := func(shutdown bool) {
delete(sts.outcomeConsumers, *txnId)
// fmt.Printf("sts%v ", len(sts.outcomeConsumers))
sts.connectionManager.RemoveSenderAsync(txnSender)
paxos.NewOneShotSender(paxos.MakeTxnSubmissionCompleteMsg(txnId), sts.connectionManager, acceptors...)
if shutdown {
if txnCap.Retry() {
paxos.NewOneShotSender(paxos.MakeTxnSubmissionAbortMsg(txnId), sts.connectionManager, activeRMs...)
}
continuation(txnId, nil)
}
}
shutdownFunPtr := &shutdownFun
sts.onShutdown[shutdownFunPtr] = server.EmptyStructVal
outcomeAccumulator := paxos.NewOutcomeAccumulator(int(txnCap.FInc()), acceptors)
consumer := func(sender common.RMId, txnId *common.TxnId, outcome *msgs.Outcome) {
if outcome, _ = outcomeAccumulator.BallotOutcomeReceived(sender, outcome); outcome != nil {
delete(sts.onShutdown, shutdownFunPtr)
shutdownFun(false)
continuation(txnId, outcome)
}
}
sts.outcomeConsumers[*txnId] = consumer
// fmt.Printf("sts%v ", len(sts.outcomeConsumers))
}
示例13: TxnLocallyCompleteReceived
func (am *AcceptorManager) TxnLocallyCompleteReceived(sender common.RMId, txnId *common.TxnId, tlc *msgs.TxnLocallyComplete) {
if aInst, found := am.acceptors[*txnId]; found && aInst.acceptor != nil {
server.Log(txnId, "TLC received from", sender, "(acceptor found)")
aInst.acceptor.TxnLocallyCompleteReceived(sender)
} else {
// We must have deleted the acceptor state from disk,
// immediately prior to sending TGC, and then died. Now we're
// back up, the proposers have sent us more TLCs, and we should
// just reply with TGCs.
server.Log(txnId, "TLC received from", sender, "(acceptor not found)")
seg := capn.NewBuffer(nil)
msg := msgs.NewRootMessage(seg)
tgc := msgs.NewTxnGloballyComplete(seg)
msg.SetTxnGloballyComplete(tgc)
tgc.SetTxnId(txnId[:])
server.Log(txnId, "Sending single TGC to", sender)
NewOneShotSender(server.SegToBytes(seg), am.ConnectionManager, sender)
}
}
示例14: maybeWriteFrame
func (v *Var) maybeWriteFrame(f *frame, action *localAction, positions *common.Positions) {
if v.writeInProgress != nil {
v.writeInProgress = func() {
v.writeInProgress = nil
v.maybeWriteFrame(f, action, positions)
}
return
}
v.writeInProgress = func() {
v.writeInProgress = nil
v.maybeMakeInactive()
}
oldVarCap := *v.varCap
varSeg := capn.NewBuffer(nil)
varCap := msgs.NewRootVar(varSeg)
v.varCap = &varCap
varCap.SetId(oldVarCap.Id())
if positions != nil {
varCap.SetPositions(capn.UInt8List(*positions))
} else {
varCap.SetPositions(oldVarCap.Positions())
}
varCap.SetWriteTxnId(f.frameTxnId[:])
varCap.SetWriteTxnClock(f.frameTxnClock.AddToSeg(varSeg))
varCap.SetWritesClock(f.frameWritesClock.AddToSeg(varSeg))
varData := server.SegToBytes(varSeg)
txnBytes := action.TxnRootBytes()
// to ensure correct order of writes, schedule the write from
// the current go-routine...
future := v.disk.ReadWriteTransaction(false, func(rwtxn *mdbs.RWTxn) (interface{}, error) {
if err := db.WriteTxnToDisk(rwtxn, f.frameTxnId, txnBytes); err != nil {
return nil, err
}
if err := rwtxn.Put(db.DB.Vars, v.UUId[:], varData, 0); err != nil {
return nil, err
}
if v.curFrameOnDisk != nil {
return nil, db.DeleteTxnFromDisk(rwtxn, v.curFrameOnDisk.frameTxnId)
}
return nil, nil
})
go func() {
// ... but process the result in a new go-routine to avoid blocking the executor.
if _, err := future.ResultError(); err != nil {
log.Println("Var error when writing to disk:", err)
return
}
// Switch back to the right go-routine
v.applyToVar(func() {
server.Log(v.UUId, "Wrote", f.frameTxnId)
v.curFrameOnDisk = f
for ancestor := f.parent; ancestor != nil && ancestor.DescendentOnDisk(); ancestor = ancestor.parent {
}
v.writeInProgress()
})
}()
}