本文整理匯總了Golang中camlistore/org/pkg/blobref.SHA1FromString函數的典型用法代碼示例。如果您正苦於以下問題:Golang SHA1FromString函數的具體用法?Golang SHA1FromString怎麽用?Golang SHA1FromString使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了SHA1FromString函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: WriteFileMap
// This is the simple 1MB chunk version. The rolling checksum version is below.
func WriteFileMap(bs blobserver.StatReceiver, fileMap map[string]interface{}, r io.Reader) (*blobref.BlobRef, error) {
parts, size := []BytesPart{}, int64(0)
buf := new(bytes.Buffer)
for {
buf.Reset()
n, err := io.Copy(buf, io.LimitReader(r, 1<<20))
if err != nil {
return nil, err
}
if n == 0 {
break
}
hash := crypto.SHA1.New()
io.Copy(hash, bytes.NewBuffer(buf.Bytes()))
br := blobref.FromHash("sha1", hash)
hasBlob, err := serverHasBlob(bs, br)
if err != nil {
return nil, err
}
if !hasBlob {
sb, err := bs.ReceiveBlob(br, buf)
if err != nil {
return nil, err
}
if expect := (blobref.SizedBlobRef{br, n}); !expect.Equal(sb) {
return nil, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb)
}
}
size += n
parts = append(parts, BytesPart{
BlobRef: br,
Size: uint64(n),
Offset: 0, // into BlobRef to read from (not of dest)
})
}
err := PopulateParts(fileMap, size, parts)
if err != nil {
return nil, err
}
json, err := MapToCamliJSON(fileMap)
if err != nil {
return nil, err
}
br := blobref.SHA1FromString(json)
sb, err := bs.ReceiveBlob(br, strings.NewReader(json))
if err != nil {
return nil, err
}
if expect := (blobref.SizedBlobRef{br, int64(len(json))}); !expect.Equal(sb) {
return nil, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb)
}
return br, nil
}
示例2: uploadBytes
// uploadBytes populates bb (a builder of either type "bytes" or
// "file", which is a superset of "bytes"), sets it to the provided
// size, and populates with provided spans. The bytes or file schema
// blob is uploaded and its blobref is returned.
func uploadBytes(bs blobserver.StatReceiver, bb *Builder, size int64, s []span) *uploadBytesFuture {
future := newUploadBytesFuture()
parts := []BytesPart{}
addBytesParts(bs, &parts, s, future)
if err := bb.PopulateParts(size, parts); err != nil {
future.errc <- err
return future
}
// Hack until camlistore.org/issue/102 is fixed. If we happen to upload
// the "file" schema before any of its parts arrive, then the indexer
// can get confused. So wait on the parts before, and then upload
// the "file" blob afterwards.
if bb.Type() == "file" {
future.errc <- nil
_, err := future.Get() // may not be nil, if children parts failed
future = newUploadBytesFuture()
if err != nil {
future.errc <- err
return future
}
}
json := bb.Blob().JSON()
br := blobref.SHA1FromString(json)
future.br = br
go func() {
_, err := uploadString(bs, br, json)
future.errc <- err
}()
return future
}
示例3: writeFileMapOld
// This is the simple 1MB chunk version. The rolling checksum version is below.
func writeFileMapOld(bs blobserver.StatReceiver, file *Builder, r io.Reader) (*blobref.BlobRef, error) {
parts, size := []BytesPart{}, int64(0)
var buf bytes.Buffer
for {
buf.Reset()
n, err := io.Copy(&buf, io.LimitReader(r, maxBlobSize))
if err != nil {
return nil, err
}
if n == 0 {
break
}
hash := blobref.NewHash()
io.Copy(hash, bytes.NewReader(buf.Bytes()))
br := blobref.FromHash(hash)
hasBlob, err := serverHasBlob(bs, br)
if err != nil {
return nil, err
}
if !hasBlob {
sb, err := bs.ReceiveBlob(br, &buf)
if err != nil {
return nil, err
}
if expect := (blobref.SizedBlobRef{br, n}); !expect.Equal(sb) {
return nil, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb)
}
}
size += n
parts = append(parts, BytesPart{
BlobRef: br,
Size: uint64(n),
Offset: 0, // into BlobRef to read from (not of dest)
})
}
err := file.PopulateParts(size, parts)
if err != nil {
return nil, err
}
json := file.Blob().JSON()
if err != nil {
return nil, err
}
br := blobref.SHA1FromString(json)
sb, err := bs.ReceiveBlob(br, strings.NewReader(json))
if err != nil {
return nil, err
}
if expect := (blobref.SizedBlobRef{br, int64(len(json))}); !expect.Equal(sb) {
return nil, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb)
}
return br, nil
}
示例4: SignerPublicKeyBlobref
// TODO: move to config package?
func SignerPublicKeyBlobref() *blobref.BlobRef {
configOnce.Do(parseConfig)
key := "keyId"
keyId, ok := config[key].(string)
if !ok {
log.Printf("No key %q in JSON configuration file %q; have you run \"camput init\"?", key, ConfigFilePath())
return nil
}
keyRing, hasKeyRing := config["secretRing"].(string)
if !hasKeyRing {
if fn := osutil.IdentitySecretRing(); fileExists(fn) {
keyRing = fn
} else if fn := jsonsign.DefaultSecRingPath(); fileExists(fn) {
keyRing = fn
} else {
log.Printf("Couldn't find keyId %q; no 'secretRing' specified in config file, and no standard secret ring files exist.")
return nil
}
}
entity, err := jsonsign.EntityFromSecring(keyId, keyRing)
if err != nil {
log.Printf("Couldn't find keyId %q in secret ring: %v", keyId, err)
return nil
}
armored, err := jsonsign.ArmoredPublicKey(entity)
if err != nil {
log.Printf("Error serializing public key: %v", err)
return nil
}
selfPubKeyDir, ok := config["selfPubKeyDir"].(string)
if !ok {
log.Printf("No 'selfPubKeyDir' defined in %q", ConfigFilePath())
return nil
}
fi, err := os.Stat(selfPubKeyDir)
if err != nil || !fi.IsDir() {
log.Printf("selfPubKeyDir of %q doesn't exist or not a directory", selfPubKeyDir)
return nil
}
br := blobref.SHA1FromString(armored)
pubFile := filepath.Join(selfPubKeyDir, br.String()+".camli")
log.Printf("key file: %q", pubFile)
fi, err = os.Stat(pubFile)
if err != nil {
err = ioutil.WriteFile(pubFile, []byte(armored), 0644)
if err != nil {
log.Printf("Error writing public key to %q: %v", pubFile, err)
return nil
}
}
return br
}
示例5: uploadString
func uploadString(bs blobserver.StatReceiver, s string) (*blobref.BlobRef, error) {
br := blobref.SHA1FromString(s)
hasIt, err := serverHasBlob(bs, br)
if err != nil {
return nil, err
}
if hasIt {
return br, nil
}
_, err = bs.ReceiveBlob(br, strings.NewReader(s))
if err != nil {
return nil, err
}
return br, nil
}
示例6: writeFileChunks
func writeFileChunks(bs blobserver.StatReceiver, file *Builder, r io.Reader) (n int64, spans []span, outerr error) {
src := ¬eEOFReader{r: r}
bufr := bufio.NewReaderSize(src, bufioReaderSize)
spans = []span{} // the tree of spans, cut on interesting rollsum boundaries
rs := rollsum.New()
var last int64
var buf bytes.Buffer
blobSize := 0 // of the next blob being built, should be same as buf.Len()
const chunksInFlight = 32 // at ~64 KB chunks, this is ~2MB memory per file
gatec := make(chan bool, chunksInFlight)
firsterrc := make(chan error, 1)
// uploadLastSpan runs in the same goroutine as the loop below and is responsible for
// starting uploading the contents of the buf. It returns false if there's been
// an error and the loop below should be stopped.
uploadLastSpan := func() bool {
chunk := buf.String()
buf.Reset()
br := blobref.SHA1FromString(chunk)
spans[len(spans)-1].br = br
select {
case outerr = <-firsterrc:
return false
default:
// No error seen so far, continue.
}
gatec <- true
go func() {
if _, err := uploadString(bs, br, chunk); err != nil {
select {
case firsterrc <- err:
default:
}
}
<-gatec
}()
return true
}
for {
c, err := bufr.ReadByte()
if err == io.EOF {
if n != last {
spans = append(spans, span{from: last, to: n})
if !uploadLastSpan() {
return
}
}
break
}
if err != nil {
return 0, nil, err
}
buf.WriteByte(c)
n++
blobSize++
rs.Roll(c)
var bits int
onRollSplit := rs.OnSplit()
switch {
case blobSize == maxBlobSize:
bits = 20 // arbitrary node weight; 1<<20 == 1MB
case src.sawEOF:
// Don't split. End is coming soon enough.
continue
case onRollSplit && n > firstChunkSize && blobSize > tooSmallThreshold:
bits = rs.Bits()
case n == firstChunkSize:
bits = 18 // 1 << 18 == 256KB
default:
// Don't split.
continue
}
blobSize = 0
// Take any spans from the end of the spans slice that
// have a smaller 'bits' score and make them children
// of this node.
var children []span
childrenFrom := len(spans)
for childrenFrom > 0 && spans[childrenFrom-1].bits < bits {
childrenFrom--
}
if nCopy := len(spans) - childrenFrom; nCopy > 0 {
children = make([]span, nCopy)
copy(children, spans[childrenFrom:])
spans = spans[:childrenFrom]
}
spans = append(spans, span{from: last, to: n, bits: bits, children: children})
last = n
if !uploadLastSpan() {
return
}
}
// Loop was already hit earlier.
//.........這裏部分代碼省略.........
示例7: RunCommand
func (c *initCmd) RunCommand(args []string) error {
if len(args) > 0 {
return cmdmain.ErrUsage
}
if c.newKey && c.gpgkey != "" {
log.Fatal("--newkey and --gpgkey are mutually exclusive")
}
blobDir := path.Join(osutil.CamliConfigDir(), "keyblobs")
os.Mkdir(osutil.CamliConfigDir(), 0700)
os.Mkdir(blobDir, 0700)
var keyId string
var err error
secRing := osutil.IdentitySecretRing()
if c.newKey {
keyId, err = jsonsign.GenerateNewSecRing(secRing)
if err != nil {
return err
}
} else {
keyId, err = c.keyId(secRing)
if err != nil {
return err
}
}
if os.Getenv("GPG_AGENT_INFO") == "" {
log.Printf("No GPG_AGENT_INFO found in environment; you should setup gnupg-agent. camput might be annoying otherwise, if your private key is encrypted.")
}
pubArmor, err := c.getPublicKeyArmored(keyId)
if err != nil {
return err
}
bref := blobref.SHA1FromString(string(pubArmor))
keyBlobPath := path.Join(blobDir, bref.String()+".camli")
if err = ioutil.WriteFile(keyBlobPath, pubArmor, 0644); err != nil {
log.Fatalf("Error writing public key blob to %q: %v", keyBlobPath, err)
}
if ok, err := jsonsign.VerifyPublicKeyFile(keyBlobPath, keyId); !ok {
log.Fatalf("Error verifying public key at %q: %v", keyBlobPath, err)
}
log.Printf("Your Camlistore identity (your GPG public key's blobref) is: %s", bref.String())
configFilePath := osutil.UserClientConfigPath()
_, err = os.Stat(configFilePath)
if err == nil {
log.Fatalf("Config file %q already exists; quitting without touching it.", configFilePath)
}
if f, err := os.OpenFile(configFilePath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0600); err == nil {
defer f.Close()
m := make(map[string]interface{})
m["keyId"] = keyId // TODO(bradfitz): make this 'identity' to match server config?
m["publicKeyBlobref"] = bref.String() // TODO(bradfitz): not used anymore?
m["server"] = "http://localhost:3179/"
m["selfPubKeyDir"] = blobDir
m["auth"] = "localhost"
jsonBytes, err := json.MarshalIndent(m, "", " ")
if err != nil {
log.Fatalf("JSON serialization error: %v", err)
}
_, err = f.Write(jsonBytes)
if err != nil {
log.Fatalf("Error writing to %q: %v", configFilePath, err)
}
log.Printf("Wrote %q; modify as necessary.", configFilePath)
}
return nil
}
示例8: NewUploadHandleFromString
func NewUploadHandleFromString(data string) *UploadHandle {
bref := blobref.SHA1FromString(data)
r := strings.NewReader(data)
return &UploadHandle{BlobRef: bref, Size: int64(len(data)), Contents: r}
}
示例9: RunCommand
func (c *initCmd) RunCommand(_ *Uploader, args []string) error {
if len(args) > 0 {
return ErrUsage
}
blobDir := path.Join(osutil.CamliConfigDir(), "keyblobs")
os.Mkdir(osutil.CamliConfigDir(), 0700)
os.Mkdir(blobDir, 0700)
keyId, err := c.keyId()
if err != nil {
return err
}
if os.Getenv("GPG_AGENT_INFO") == "" {
log.Printf("No GPG_AGENT_INFO found in environment; you should setup gnupg-agent. camput might be annoying otherwise, if your private key is encrypted.")
}
pubArmor, err := c.getPublicKeyArmored(keyId)
if err != nil {
return err
}
bref := blobref.SHA1FromString(string(pubArmor))
keyBlobPath := path.Join(blobDir, bref.String()+".camli")
if err = ioutil.WriteFile(keyBlobPath, pubArmor, 0644); err != nil {
log.Fatalf("Error writing public key blob to %q: %v", keyBlobPath, err)
}
if ok, err := jsonsign.VerifyPublicKeyFile(keyBlobPath, keyId); !ok {
log.Fatalf("Error verifying public key at %q: %v", keyBlobPath, err)
}
log.Printf("Your Camlistore identity (your GPG public key's blobref) is: %s", bref.String())
_, err = os.Stat(client.ConfigFilePath())
if err == nil {
log.Fatalf("Config file %q already exists; quitting without touching it.", client.ConfigFilePath())
}
if f, err := os.OpenFile(client.ConfigFilePath(), os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0600); err == nil {
defer f.Close()
m := make(map[string]interface{})
m["keyId"] = keyId // TODO(bradfitz): make this 'identity' to match server config?
m["publicKeyBlobref"] = bref.String() // TODO(bradfitz): not used anymore?
m["blobServer"] = "http://localhost:3179/"
m["selfPubKeyDir"] = blobDir
m["auth"] = "none"
blobPut := make([]map[string]string, 1)
blobPut[0] = map[string]string{
"alias": "local",
"host": "http://localhost:3179/",
"password": "",
}
m["blobPut"] = blobPut
blobGet := make([]map[string]string, 2)
blobGet[0] = map[string]string{
"alias": "keyblobs",
"path": blobDir,
}
blobGet[1] = map[string]string{
"alias": "local",
"host": "http://localhost:3179/",
"password": "",
}
m["blobGet"] = blobGet
jsonBytes, err := json.MarshalIndent(m, "", " ")
if err != nil {
log.Fatalf("JSON serialization error: %v", err)
}
_, err = f.Write(jsonBytes)
if err != nil {
log.Fatalf("Error writing to %q: %v", client.ConfigFilePath(), err)
}
log.Printf("Wrote %q; modify as necessary.", client.ConfigFilePath())
}
return nil
}
示例10: uploadNodeRegularFile
func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) {
// TODO(mpl): maybe break this func into more maintainable pieces?
filebb := schema.NewCommonFileMap(n.fullPath, n.fi)
filebb.SetType("file")
file, err := up.open(n.fullPath)
if err != nil {
return nil, err
}
defer file.Close()
if up.fileOpts.exifTime {
ra, ok := file.(io.ReaderAt)
if !ok {
return nil, errors.New("Error asserting local file to io.ReaderAt")
}
modtime, err := schema.FileTime(ra)
if err != nil {
log.Printf("warning: getting time from EXIF failed for %v: %v", n.fullPath, err)
} else {
filebb.SetModTime(modtime)
}
}
var (
size = n.fi.Size()
fileContents io.Reader = io.LimitReader(file, size)
br *blobref.BlobRef // of file schemaref
sum string // sha1 hashsum of the file to upload
pr *client.PutResult // of the final "file" schema blob
)
const dupCheckThreshold = 256 << 10
if size > dupCheckThreshold {
sumRef, err := up.wholeFileDigest(n.fullPath)
if err == nil {
sum = sumRef.String()
ok := false
pr, ok = up.fileMapFromDuplicate(up.statReceiver(n), filebb, sum)
if ok {
br = pr.BlobRef
noteFileUploaded(n.fullPath, !pr.Skipped)
if up.fileOpts.wantVivify() {
// we can return early in that case, because the other options
// are disallowed in the vivify case.
return pr, nil
}
}
}
}
if up.fileOpts.wantVivify() {
// If vivify wasn't already done in fileMapFromDuplicate.
err := schema.WriteFileChunks(up.statReceiver(n), filebb, fileContents)
if err != nil {
return nil, err
}
json, err := filebb.JSON()
if err != nil {
return nil, err
}
br = blobref.SHA1FromString(json)
h := &client.UploadHandle{
BlobRef: br,
Size: int64(len(json)),
Contents: strings.NewReader(json),
Vivify: true,
}
pr, err = up.Upload(h)
if err != nil {
return nil, err
}
noteFileUploaded(n.fullPath, true)
return pr, nil
}
if br == nil {
// br still nil means fileMapFromDuplicate did not find the file on the server,
// and the file has not just been uploaded subsequently to a vivify request.
// So we do the full file + file schema upload here.
if sum == "" && up.fileOpts.wantFilePermanode() {
fileContents = &trackDigestReader{r: fileContents}
}
br, err = schema.WriteFileMap(up.statReceiver(n), filebb, fileContents)
if err != nil {
return nil, err
}
}
// TODO(mpl): test that none of these claims get uploaded if they've already been done
if up.fileOpts.wantFilePermanode() {
if td, ok := fileContents.(*trackDigestReader); ok {
sum = td.Sum()
}
// Use a fixed time value for signing; not using modtime
// so two identical files don't have different modtimes?
// TODO(bradfitz): consider this more?
permaNodeSigTime := time.Unix(0, 0)
permaNode, err := up.UploadPlannedPermanode(sum, permaNodeSigTime)
if err != nil {
return nil, fmt.Errorf("Error uploading permanode for node %v: %v", n, err)
}
//.........這裏部分代碼省略.........
示例11: uploadNodeRegularFile
func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) {
filebb := schema.NewCommonFileMap(n.fullPath, n.fi)
filebb.SetType("file")
file, err := up.open(n.fullPath)
if err != nil {
return nil, err
}
defer file.Close()
if up.fileOpts.exifTime {
ra, ok := file.(io.ReaderAt)
if !ok {
return nil, errors.New("Error asserting local file to io.ReaderAt")
}
modtime, err := schema.FileTime(ra)
if err != nil {
log.Printf("warning: getting time from EXIF failed for %v: %v", n.fullPath, err)
} else {
filebb.SetModTime(modtime)
}
}
var (
size = n.fi.Size()
fileContents io.Reader = io.LimitReader(file, size)
br *blobref.BlobRef // of file schemaref
sum string // sha1 hashsum of the file to upload
pr *client.PutResult // of the final "file" schema blob
)
const dupCheckThreshold = 256 << 10
if size > dupCheckThreshold {
sumRef, err := up.wholeFileDigest(n.fullPath)
if err == nil {
sum = sumRef.String()
ok := false
pr, ok = up.fileMapFromDuplicate(up.statReceiver(n), filebb, sum)
if ok {
br = pr.BlobRef
client.NoteFileUploaded(n.fullPath, !pr.Skipped)
if up.fileOpts.wantVivify() {
// we can return early in that case, because the other options
// are disallowed in the vivify case.
return pr, nil
}
}
}
}
if up.fileOpts.wantVivify() {
// If vivify wasn't already done in fileMapFromDuplicate.
err := schema.WriteFileChunks(up.statReceiver(n), filebb, fileContents)
if err != nil {
return nil, err
}
json, err := filebb.JSON()
if err != nil {
return nil, err
}
br = blobref.SHA1FromString(json)
h := &client.UploadHandle{
BlobRef: br,
Size: int64(len(json)),
Contents: strings.NewReader(json),
Vivify: true,
}
pr, err = up.Upload(h)
if err != nil {
return nil, err
}
client.NoteFileUploaded(n.fullPath, true)
return pr, nil
}
if br == nil {
// br still nil means fileMapFromDuplicate did not find the file on the server,
// and the file has not just been uploaded subsequently to a vivify request.
// So we do the full file + file schema upload here.
if sum == "" && up.fileOpts.wantFilePermanode() {
fileContents = &trackDigestReader{r: fileContents}
}
br, err = schema.WriteFileMap(up.statReceiver(n), filebb, fileContents)
if err != nil {
return nil, err
}
}
// The work for those planned permanodes (and the claims) is redone
// everytime we get here (i.e past the stat cache). However, they're
// caught by the have cache, so they won't be reuploaded for nothing
// at least.
if up.fileOpts.wantFilePermanode() {
if td, ok := fileContents.(*trackDigestReader); ok {
sum = td.Sum()
}
// claimTime is both the time of the "claimDate" in the
// JSON claim, as well as the date in the OpenPGP
// header.
// TODO(bradfitz): this is a little clumsy to do by hand.
// There should probably be a method on *Uploader to do this
// from an unsigned schema map. Maybe ditch the schema.Claimer
//.........這裏部分代碼省略.........
示例12: genLowLevelConfig
// genLowLevelConfig returns a low-level config from a high-level config.
func genLowLevelConfig(conf *Config) (lowLevelConf *Config, err error) {
var (
baseURL = conf.OptionalString("baseURL", "")
listen = conf.OptionalString("listen", "")
auth = conf.RequiredString("auth")
keyId = conf.RequiredString("identity")
secretRing = conf.RequiredString("identitySecretRing")
blobPath = conf.RequiredString("blobPath")
tlsOn = conf.OptionalBool("https", false)
tlsCert = conf.OptionalString("HTTPSCertFile", "")
tlsKey = conf.OptionalString("HTTPSKeyFile", "")
dbname = conf.OptionalString("dbname", "")
mysql = conf.OptionalString("mysql", "")
postgres = conf.OptionalString("postgres", "")
mongo = conf.OptionalString("mongo", "")
_ = conf.OptionalList("replicateTo")
s3 = conf.OptionalString("s3", "")
publish = conf.OptionalObject("publish")
)
if err := conf.Validate(); err != nil {
return nil, err
}
obj := jsonconfig.Obj{}
if tlsOn {
if (tlsCert != "") != (tlsKey != "") {
return nil, errors.New("Must set both TLSCertFile and TLSKeyFile (or neither to generate a self-signed cert)")
}
if tlsCert != "" {
obj["TLSCertFile"] = tlsCert
obj["TLSKeyFile"] = tlsKey
} else {
obj["TLSCertFile"] = "config/selfgen_cert.pem"
obj["TLSKeyFile"] = "config/selfgen_key.pem"
}
}
if baseURL != "" {
if strings.HasSuffix(baseURL, "/") {
baseURL = baseURL[:len(baseURL)-1]
}
obj["baseURL"] = baseURL
}
if listen != "" {
obj["listen"] = listen
}
obj["https"] = tlsOn
obj["auth"] = auth
if dbname == "" {
username := os.Getenv("USER")
if username == "" {
return nil, fmt.Errorf("USER env var not set; needed to define dbname")
}
dbname = "camli" + username
}
var indexerPath string
switch {
case mongo != "" && mysql != "" || mongo != "" && postgres != "" || mysql != "" && postgres != "":
return nil, fmt.Errorf("You can only pick one of the db engines (mongo, mysql, postgres).")
case mysql != "":
indexerPath = "/index-mysql/"
case postgres != "":
indexerPath = "/index-postgres/"
case mongo != "":
indexerPath = "/index-mongo/"
default:
indexerPath = "/index-mem/"
}
entity, err := jsonsign.EntityFromSecring(keyId, secretRing)
if err != nil {
return nil, err
}
armoredPublicKey, err := jsonsign.ArmoredPublicKey(entity)
if err != nil {
return nil, err
}
prefixesParams := &configPrefixesParams{
secretRing: secretRing,
keyId: keyId,
indexerPath: indexerPath,
blobPath: blobPath,
searchOwner: blobref.SHA1FromString(armoredPublicKey),
}
prefixes := genLowLevelPrefixes(prefixesParams)
cacheDir := filepath.Join(blobPath, "/cache")
if err := os.MkdirAll(cacheDir, 0700); err != nil {
return nil, fmt.Errorf("Could not create blobs dir %s: %v", cacheDir, err)
}
published := []interface{}{}
if publish != nil {
published, err = addPublishedConfig(prefixes, publish)
if err != nil {
return nil, fmt.Errorf("Could not generate config for published: %v", err)
//.........這裏部分代碼省略.........
示例13: uploadNodeRegularFile
func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) {
m := schema.NewCommonFileMap(n.fullPath, n.fi)
m["camliType"] = "file"
file, err := up.open(n.fullPath)
if err != nil {
return nil, err
}
defer file.Close()
size := n.fi.Size()
var fileContents io.Reader = io.LimitReader(file, size)
if up.fileOpts.wantVivify() {
err := schema.WriteFileChunks(up.statReceiver(), m, fileContents)
if err != nil {
return nil, err
}
json, err := m.JSON()
if err != nil {
return nil, err
}
bref := blobref.SHA1FromString(json)
h := &client.UploadHandle{
BlobRef: bref,
Size: int64(len(json)),
Contents: strings.NewReader(json),
Vivify: true,
}
return up.Upload(h)
}
var (
blobref *blobref.BlobRef // of file schemaref
sum string // "sha1-xxxxx"
)
const dupCheckThreshold = 256 << 10
if size > dupCheckThreshold {
sumRef, err := up.wholeFileDigest(n.fullPath)
if err == nil {
sum = sumRef.String()
if ref, ok := up.fileMapFromDuplicate(up.statReceiver(), m, sum); ok {
blobref = ref
}
}
}
if blobref == nil {
if sum == "" && up.fileOpts.wantFilePermanode() {
fileContents = &trackDigestReader{r: fileContents}
}
blobref, err = schema.WriteFileMap(up.statReceiver(), m, fileContents)
if err != nil {
return nil, err
}
}
// TODO(mpl): test that none of these claims get uploaded if they've already been done
if up.fileOpts.wantFilePermanode() {
if td, ok := fileContents.(*trackDigestReader); ok {
sum = td.Sum()
}
// Use a fixed time value for signing; not using modtime
// so two identical files don't have different modtimes?
// TODO(bradfitz): consider this more?
permaNodeSigTime := time.Unix(0, 0)
permaNode, err := up.UploadPlannedPermanode(sum, permaNodeSigTime)
if err != nil {
return nil, fmt.Errorf("Error uploading permanode for node %v: %v", n, err)
}
handleResult("node-permanode", permaNode, nil)
// claimTime is both the time of the "claimDate" in the
// JSON claim, as well as the date in the OpenPGP
// header.
// TODO(bradfitz): this is a little clumsy to do by hand.
// There should probably be a method on *Uploader to do this
// from an unsigned schema map. Maybe ditch the schema.Claimer
// type and just have the Uploader override the claimDate.
claimTime := n.fi.ModTime()
contentAttr := schema.NewSetAttributeClaim(permaNode.BlobRef, "camliContent", blobref.String())
contentAttr.SetClaimDate(claimTime)
signed, err := up.SignMap(contentAttr, claimTime)
if err != nil {
return nil, fmt.Errorf("Failed to sign content claim for node %v: %v", n, err)
}
put, err := up.uploadString(signed)
if err != nil {
return nil, fmt.Errorf("Error uploading permanode's attribute for node %v: %v", n, err)
}
handleResult("node-permanode-contentattr", put, nil)
if tags := up.fileOpts.tags(); len(tags) > 0 {
// TODO(mpl): do these claims concurrently, not in series
for _, tag := range tags {
m := schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag)
m.SetClaimDate(claimTime)
// TODO(mpl): verify that SetClaimDate does modify the GPG signature date of the claim
signed, err := up.SignMap(m, claimTime)
//.........這裏部分代碼省略.........
示例14: vivify
// vivify verifies that all the chunks for the file described by fileblob are on the blobserver.
// It makes a planned permanode, signs it, and uploads it. It finally makes a camliContent claim
// on that permanode for fileblob, signs it, and uploads it to the blobserver.
func vivify(blobReceiver blobserver.BlobReceiveConfiger, fileblob blobref.SizedBlobRef) error {
sf, ok := blobReceiver.(blobref.StreamingFetcher)
if !ok {
return fmt.Errorf("BlobReceiver is not a StreamingFetcher")
}
fetcher := blobref.SeekerFromStreamingFetcher(sf)
fr, err := schema.NewFileReader(fetcher, fileblob.BlobRef)
if err != nil {
return fmt.Errorf("Filereader error for blobref %v: %v", fileblob.BlobRef.String(), err)
}
defer fr.Close()
h := sha1.New()
n, err := io.Copy(h, fr)
if err != nil {
return fmt.Errorf("Could not read all file of blobref %v: %v", fileblob.BlobRef.String(), err)
}
if n != fr.Size() {
return fmt.Errorf("Could not read all file of blobref %v. Wanted %v, got %v", fileblob.BlobRef.String(), fr.Size(), n)
}
config := blobReceiver.Config()
if config == nil {
return errors.New("blobReceiver has no config")
}
hf := config.HandlerFinder
if hf == nil {
return errors.New("blobReceiver config has no HandlerFinder")
}
JSONSignRoot, sh, err := hf.FindHandlerByType("jsonsign")
// TODO(mpl): second check should not be necessary, and yet it happens. Figure it out.
if err != nil || sh == nil {
return errors.New("jsonsign handler not found")
}
sigHelper, ok := sh.(*signhandler.Handler)
if !ok {
return errors.New("handler is not a JSON signhandler")
}
discoMap := sigHelper.DiscoveryMap(JSONSignRoot)
publicKeyBlobRef, ok := discoMap["publicKeyBlobRef"].(string)
if !ok {
return fmt.Errorf("Discovery: json decoding error: %v", err)
}
unsigned := schema.NewHashPlannedPermanode(h)
unsigned["camliSigner"] = publicKeyBlobRef
signed, err := sigHelper.SignMap(unsigned)
if err != nil {
return fmt.Errorf("Signing permanode %v: %v", signed, err)
}
signedPerm := blobref.SHA1FromString(signed)
_, err = blobReceiver.ReceiveBlob(signedPerm, strings.NewReader(signed))
if err != nil {
return fmt.Errorf("While uploading signed permanode %v: %v", signed, err)
}
contentAttr := schema.NewSetAttributeClaim(signedPerm, "camliContent", fileblob.BlobRef.String())
claimDate, err := time.Parse(time.RFC3339, fr.FileSchema().UnixMtime)
contentAttr.SetClaimDate(claimDate)
contentAttr["camliSigner"] = publicKeyBlobRef
signed, err = sigHelper.SignMap(contentAttr)
if err != nil {
return fmt.Errorf("Signing camliContent claim: %v", err)
}
signedClaim := blobref.SHA1FromString(signed)
_, err = blobReceiver.ReceiveBlob(signedClaim, strings.NewReader(signed))
if err != nil {
return fmt.Errorf("While uploading signed camliContent claim %v: %v", signed, err)
}
return nil
}
示例15: GenLowLevelConfig
func GenLowLevelConfig(conf *Config) (lowLevelConf *Config, err error) {
var (
baseUrl = conf.RequiredString("listen")
auth = conf.RequiredString("auth")
keyId = conf.RequiredString("identity")
secretRing = conf.RequiredString("identitySecretRing")
blobPath = conf.RequiredString("blobPath")
tlsOn = conf.OptionalBool("TLS", false)
dbname = conf.OptionalString("dbname", "")
mysql = conf.OptionalString("mysql", "")
mongo = conf.OptionalString("mongo", "")
_ = conf.OptionalList("replicateTo")
_ = conf.OptionalString("s3", "")
publish = conf.OptionalObject("publish")
)
if err := conf.Validate(); err != nil {
return nil, err
}
obj := jsonconfig.Obj{}
scheme := "http"
if tlsOn {
scheme = "https"
obj["TLSCertFile"] = "config/selfgen_cert.pem"
obj["TLSKeyFile"] = "config/selfgen_key.pem"
}
obj["baseURL"] = scheme + "://" + baseUrl
obj["https"] = tlsOn
obj["auth"] = auth
if dbname == "" {
username := os.Getenv("USER")
if username == "" {
return nil, fmt.Errorf("USER env var not set; needed to define dbname")
}
dbname = "camli" + username
}
var indexerPath string
switch {
case mongo != "" && mysql != "":
return nil, fmt.Errorf("Cannot have both mysql and mongo in config, pick one")
case mysql != "":
indexerPath = "/index-mysql/"
case mongo != "":
indexerPath = "/index-mongo/"
default:
indexerPath = "/index-mem/"
}
entity, err := jsonsign.EntityFromSecring(keyId, secretRing)
if err != nil {
return nil, err
}
armoredPublicKey, err := jsonsign.ArmoredPublicKey(entity)
if err != nil {
return nil, err
}
prefixesParams := &configPrefixesParams{
secretRing: secretRing,
keyId: keyId,
indexerPath: indexerPath,
blobPath: blobPath,
searchOwner: blobref.SHA1FromString(armoredPublicKey),
}
prefixes := genLowLevelPrefixes(prefixesParams)
cacheDir := filepath.Join(blobPath, "/cache")
if err := os.MkdirAll(cacheDir, 0700); err != nil {
return nil, fmt.Errorf("Could not create blobs dir %s: %v", cacheDir, err)
}
published := []interface{}{}
if publish != nil {
published, err = addPublishedConfig(&prefixes, publish)
if err != nil {
return nil, fmt.Errorf("Could not generate config for published: %v", err)
}
}
addUIConfig(&prefixes, "/ui/", published)
if mysql != "" {
addMysqlConfig(&prefixes, dbname, mysql)
}
if mongo != "" {
addMongoConfig(&prefixes, dbname, mongo)
}
if indexerPath == "/index-mem/" {
addMemindexConfig(&prefixes)
}
obj["prefixes"] = (map[string]interface{})(prefixes)
lowLevelConf = &Config{
jsonconfig.Obj: obj,
configPath: conf.configPath,
}
return lowLevelConf, nil
//.........這裏部分代碼省略.........