本文整理匯總了Golang中github.com/cockroachdb/cockroach/client.Batch.CPut方法的典型用法代碼示例。如果您正苦於以下問題:Golang Batch.CPut方法的具體用法?Golang Batch.CPut怎麽用?Golang Batch.CPut使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/cockroachdb/cockroach/client.Batch
的用法示例。
在下文中一共展示了Batch.CPut方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: writeDescriptor
// writeDescriptor takes a Table or Database descriptor and writes it
// if needed, incrementing the descriptor counter.
func (p *planner) writeDescriptor(plainKey descriptorKey, descriptor descriptorProto, ifNotExists bool) error {
key := plainKey.Key()
// Check whether key exists.
gr, err := p.txn.Get(key)
if err != nil {
return err
}
if gr.Exists() {
if ifNotExists {
// Noop.
return nil
}
// Key exists, but we don't want it to: error out.
return fmt.Errorf("%s %q already exists", descriptor.TypeName(), plainKey.Name())
}
// Increment unique descriptor counter.
if ir, err := p.txn.Inc(keys.DescIDGenerator, 1); err == nil {
descriptor.SetID(structured.ID(ir.ValueInt() - 1))
} else {
return err
}
// TODO(pmattis): The error currently returned below is likely going to be
// difficult to interpret.
// TODO(pmattis): Need to handle if-not-exists here as well.
descKey := structured.MakeDescMetadataKey(descriptor.GetID())
b := client.Batch{}
b.CPut(key, descKey, nil)
b.CPut(descKey, descriptor, nil)
return p.txn.Run(&b)
}
示例2: RenameDatabase
// RenameDatabase renames the database.
// Privileges: security.RootUser user.
// Notes: postgres requires superuser, db owner, or "CREATEDB".
// mysql >= 5.1.23 does not allow database renames.
func (p *planner) RenameDatabase(n *parser.RenameDatabase) (planNode, error) {
if n.Name == "" || n.NewName == "" {
return nil, errEmptyDatabaseName
}
if p.session.User != security.RootUser {
return nil, fmt.Errorf("only %s is allowed to rename databases", security.RootUser)
}
dbDesc, err := p.getDatabaseDesc(string(n.Name))
if err != nil {
return nil, err
}
if dbDesc == nil {
return nil, databaseDoesNotExistError(string(n.Name))
}
if n.Name == n.NewName {
// Noop.
return &emptyNode{}, nil
}
// Now update the nameMetadataKey and the descriptor.
descKey := sqlbase.MakeDescMetadataKey(dbDesc.GetID())
dbDesc.SetName(string(n.NewName))
if err := dbDesc.Validate(); err != nil {
return nil, err
}
newKey := databaseKey{string(n.NewName)}.Key()
oldKey := databaseKey{string(n.Name)}.Key()
descID := dbDesc.GetID()
descDesc := sqlbase.WrapDescriptor(dbDesc)
b := client.Batch{}
b.CPut(newKey, descID, nil)
b.Put(descKey, descDesc)
b.Del(oldKey)
if err := p.txn.Run(&b); err != nil {
if _, ok := err.(*roachpb.ConditionFailedError); ok {
return nil, fmt.Errorf("the new database name %q already exists", string(n.NewName))
}
return nil, err
}
p.setTestingVerifyMetadata(func(systemConfig config.SystemConfig) error {
if err := expectDescriptorID(systemConfig, newKey, descID); err != nil {
return err
}
if err := expectDescriptor(systemConfig, descKey, descDesc); err != nil {
return err
}
return expectDeleted(systemConfig, oldKey)
})
return &emptyNode{}, nil
}
示例3: makeBackfillBatch
func (p *planner) makeBackfillBatch(tableName *parser.QualifiedName, tableDesc *TableDescriptor, indexDescs ...IndexDescriptor) (client.Batch, error) {
b := client.Batch{}
// Get all the rows affected.
// TODO(vivek): Avoid going through Select.
// TODO(tamird): Support partial indexes?
row, err := p.Select(&parser.Select{
Exprs: parser.SelectExprs{parser.StarSelectExpr()},
From: parser.TableExprs{&parser.AliasedTableExpr{Expr: tableName}},
})
if err != nil {
return b, err
}
// Construct a map from column ID to the index the value appears at within a
// row.
colIDtoRowIndex := map[ColumnID]int{}
for i, name := range row.Columns() {
c, err := tableDesc.FindColumnByName(name)
if err != nil {
return b, err
}
colIDtoRowIndex[c.ID] = i
}
// TODO(tamird): This will fall down in production use. We need to do
// something better (see #2036). In particular, this implementation
// has the following problems:
// - Very large tables will generate an enormous batch here. This
// isn't really a problem in itself except that it will exacerbate
// the other issue:
// - Any non-quiescent table that this runs against will end up with
// an inconsistent index. This is because as inserts/updates continue
// to roll in behind this operation's read front, the written index
// will become incomplete/stale before it's written.
for row.Next() {
rowVals := row.Values()
for _, indexDesc := range indexDescs {
secondaryIndexEntries, err := encodeSecondaryIndexes(
tableDesc.ID, []IndexDescriptor{indexDesc}, colIDtoRowIndex, rowVals)
if err != nil {
return b, err
}
for _, secondaryIndexEntry := range secondaryIndexEntries {
if log.V(2) {
log.Infof("CPut %s -> %v", prettyKey(secondaryIndexEntry.key, 0),
secondaryIndexEntry.value)
}
b.CPut(secondaryIndexEntry.key, secondaryIndexEntry.value, nil)
}
}
}
return b, row.Err()
}
示例4: createDescriptor
// createDescriptor implements the DescriptorAccessor interface.
func (p *planner) createDescriptor(plainKey sqlbase.DescriptorKey, descriptor sqlbase.DescriptorProto, ifNotExists bool) (bool, error) {
idKey := plainKey.Key()
// Check whether idKey exists.
gr, err := p.txn.Get(idKey)
if err != nil {
return false, err
}
if gr.Exists() {
if ifNotExists {
// Noop.
return false, nil
}
// Key exists, but we don't want it to: error out.
return false, fmt.Errorf("%s %q already exists", descriptor.TypeName(), plainKey.Name())
}
// Increment unique descriptor counter.
if ir, err := p.txn.Inc(keys.DescIDGenerator, 1); err == nil {
descriptor.SetID(sqlbase.ID(ir.ValueInt() - 1))
} else {
return false, err
}
// TODO(pmattis): The error currently returned below is likely going to be
// difficult to interpret.
//
// TODO(pmattis): Need to handle if-not-exists here as well.
//
// TODO(pmattis): This is writing the namespace and descriptor table entries,
// but not going through the normal INSERT logic and not performing a precise
// mimicry. In particular, we're only writing a single key per table, while
// perfect mimicry would involve writing a sentinel key for each row as well.
descKey := sqlbase.MakeDescMetadataKey(descriptor.GetID())
b := client.Batch{}
descID := descriptor.GetID()
descDesc := sqlbase.WrapDescriptor(descriptor)
if log.V(2) {
log.Infof("CPut %s -> %d", idKey, descID)
log.Infof("CPut %s -> %s", descKey, descDesc)
}
b.CPut(idKey, descID, nil)
b.CPut(descKey, descDesc, nil)
p.setTestingVerifyMetadata(func(systemConfig config.SystemConfig) error {
if err := expectDescriptorID(systemConfig, idKey, descID); err != nil {
return err
}
return expectDescriptor(systemConfig, descKey, descDesc)
})
return true, p.txn.Run(&b)
}
示例5: updateRangeDescriptor
// updateRangeDescriptor adds a ConditionalPut on the range descriptor. The
// conditional put verifies that changes to the range descriptor are made in a
// well-defined order, preventing a scenario where a wayward replica which is
// no longer part of the original Raft group comes back online to form a
// splinter group with a node which was also a former replica, and hijacks the
// range descriptor. This is a last line of defense; other mechanisms should
// prevent rogue replicas from getting this far (see #768).
func updateRangeDescriptor(b *client.Batch, descKey proto.Key, oldDesc, newDesc *proto.RangeDescriptor) error {
var oldValue []byte
if oldDesc != nil {
var err error
if oldValue, err = gogoproto.Marshal(oldDesc); err != nil {
return err
}
}
newValue, err := gogoproto.Marshal(newDesc)
if err != nil {
return err
}
b.CPut(descKey, newValue, oldValue)
return nil
}
示例6: RenameDatabase
// RenameDatabase renames the database.
// Privileges: "root" user.
// Notes: postgres requires superuser, db owner, or "CREATEDB".
// mysql >= 5.1.23 does not allow database renames.
func (p *planner) RenameDatabase(n *parser.RenameDatabase) (planNode, error) {
if n.Name == "" || n.NewName == "" {
return nil, errEmptyDatabaseName
}
if p.user != security.RootUser {
return nil, fmt.Errorf("only %s is allowed to rename databases", security.RootUser)
}
dbDesc, err := p.getDatabaseDesc(string(n.Name))
if err != nil {
return nil, err
}
if n.Name == n.NewName {
// Noop.
return &valuesNode{}, nil
}
// Now update the nameMetadataKey and the descriptor.
descKey := MakeDescMetadataKey(dbDesc.GetID())
dbDesc.SetName(string(n.NewName))
if err := dbDesc.Validate(); err != nil {
return nil, err
}
b := client.Batch{}
b.CPut(databaseKey{string(n.NewName)}.Key(), dbDesc.GetID(), nil)
b.Put(descKey, dbDesc)
b.Del(databaseKey{string(n.Name)}.Key())
// Mark transaction as operating on the system DB.
p.txn.SetSystemDBTrigger()
if err := p.txn.Run(&b); err != nil {
if _, ok := err.(*proto.ConditionFailedError); ok {
return nil, fmt.Errorf("the new database name %q already exists", string(n.NewName))
}
return nil, err
}
return &valuesNode{}, nil
}
示例7: createDescriptor
// createDescriptor takes a Table or Database descriptor and creates it
// if needed, incrementing the descriptor counter.
func (p *planner) createDescriptor(plainKey descriptorKey, descriptor descriptorProto, ifNotExists bool) error {
key := plainKey.Key()
// Check whether key exists.
gr, err := p.txn.Get(key)
if err != nil {
return err
}
if gr.Exists() {
if ifNotExists {
// Noop.
return nil
}
// Key exists, but we don't want it to: error out.
return fmt.Errorf("%s %q already exists", descriptor.TypeName(), plainKey.Name())
}
// Increment unique descriptor counter.
if ir, err := p.txn.Inc(keys.DescIDGenerator, 1); err == nil {
descriptor.SetID(ID(ir.ValueInt() - 1))
} else {
return err
}
// TODO(pmattis): The error currently returned below is likely going to be
// difficult to interpret.
//
// TODO(pmattis): Need to handle if-not-exists here as well.
//
// TODO(pmattis): This is writing the namespace and descriptor table entries,
// but not going through the normal INSERT logic and not performing a precise
// mimicry. In particular, we're only writing a single key per table, while
// perfect mimicry would involve writing a sentinel key for each row as well.
descKey := MakeDescMetadataKey(descriptor.GetID())
b := client.Batch{}
b.CPut(key, descriptor.GetID(), nil)
b.CPut(descKey, wrapDescriptor(descriptor), nil)
return p.txn.Run(&b)
}
示例8: Update
//.........這裏部分代碼省略.........
// Secondary indexes needing updating.
var indexes []IndexDescriptor
for _, index := range tableDesc.Indexes {
for _, id := range index.ColumnIDs {
if _, ok := colIDSet[id]; ok {
indexes = append(indexes, index)
break
}
}
}
// Update all the rows.
var b client.Batch
for rows.Next() {
rowVals := rows.Values()
primaryIndexKey, _, err := encodeIndexKey(
primaryIndex.ColumnIDs, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix)
if err != nil {
return nil, err
}
// Compute the current secondary index key:value pairs for this row.
secondaryIndexEntries, err := encodeSecondaryIndexes(
tableDesc.ID, indexes, colIDtoRowIndex, rowVals)
if err != nil {
return nil, err
}
// Our updated value expressions occur immediately after the plain
// columns in the output.
newVals := rowVals[len(tableDesc.Columns):]
// Update the row values.
for i, col := range cols {
val := newVals[i]
if !col.Nullable && val == parser.DNull {
return nil, fmt.Errorf("null value in column %q violates not-null constraint", col.Name)
}
rowVals[colIDtoRowIndex[col.ID]] = val
}
// Compute the new secondary index key:value pairs for this row.
newSecondaryIndexEntries, err := encodeSecondaryIndexes(
tableDesc.ID, indexes, colIDtoRowIndex, rowVals)
if err != nil {
return nil, err
}
// Update secondary indexes.
for i, newSecondaryIndexEntry := range newSecondaryIndexEntries {
secondaryIndexEntry := secondaryIndexEntries[i]
if !bytes.Equal(newSecondaryIndexEntry.key, secondaryIndexEntry.key) {
if log.V(2) {
log.Infof("CPut %q -> %v", newSecondaryIndexEntry.key, newSecondaryIndexEntry.value)
}
b.CPut(newSecondaryIndexEntry.key, newSecondaryIndexEntry.value, nil)
if log.V(2) {
log.Infof("Del %q", secondaryIndexEntry.key)
}
b.Del(secondaryIndexEntry.key)
}
}
// Add the new values.
for i, val := range newVals {
col := cols[i]
primitive, err := convertDatum(col, val)
if err != nil {
return nil, err
}
key := MakeColumnKey(col.ID, primaryIndexKey)
if primitive != nil {
// We only output non-NULL values. Non-existent column keys are
// considered NULL during scanning and the row sentinel ensures we know
// the row exists.
if log.V(2) {
log.Infof("Put %q -> %v", key, val)
}
b.Put(key, primitive)
} else {
// The column might have already existed but is being set to NULL, so
// delete it.
if log.V(2) {
log.Infof("Del %q", key)
}
b.Del(key)
}
}
}
if err := rows.Err(); err != nil {
return nil, err
}
if err := p.txn.Run(&b); err != nil {
return nil, convertBatchError(tableDesc, b, err)
}
// TODO(tamird/pmattis): return the number of affected rows.
return &valuesNode{}, nil
}
示例9: updateRow
// updateRow adds to the batch the kv operations necessary to update a table row
// with the given values.
//
// The row corresponding to oldValues is updated with the ones in updateValues.
// Note that updateValues only contains the ones that are changing.
//
// The return value is only good until the next call to UpdateRow.
func (ru *rowUpdater) updateRow(
b *client.Batch,
oldValues []parser.Datum,
updateValues []parser.Datum,
) ([]parser.Datum, error) {
if len(oldValues) != len(ru.fetchCols) {
return nil, util.Errorf("got %d values but expected %d", len(oldValues), len(ru.fetchCols))
}
if len(updateValues) != len(ru.updateCols) {
return nil, util.Errorf("got %d values but expected %d", len(updateValues), len(ru.updateCols))
}
primaryIndexKey, secondaryIndexEntries, err := ru.helper.encodeIndexes(ru.fetchColIDtoRowIndex, oldValues)
if err != nil {
return nil, err
}
// Check that the new value types match the column types. This needs to
// happen before index encoding because certain datum types (i.e. tuple)
// cannot be used as index values.
for i, val := range updateValues {
if ru.marshalled[i], err = sqlbase.MarshalColumnValue(ru.updateCols[i], val); err != nil {
return nil, err
}
}
// Update the row values.
copy(ru.newValues, oldValues)
for i, updateCol := range ru.updateCols {
ru.newValues[ru.fetchColIDtoRowIndex[updateCol.ID]] = updateValues[i]
}
newPrimaryIndexKey := primaryIndexKey
rowPrimaryKeyChanged := false
var newSecondaryIndexEntries []sqlbase.IndexEntry
if ru.primaryKeyColChange {
newPrimaryIndexKey, newSecondaryIndexEntries, err = ru.helper.encodeIndexes(ru.fetchColIDtoRowIndex, ru.newValues)
if err != nil {
return nil, err
}
rowPrimaryKeyChanged = !bytes.Equal(primaryIndexKey, newPrimaryIndexKey)
} else {
newSecondaryIndexEntries, err = sqlbase.EncodeSecondaryIndexes(
ru.helper.tableDesc.ID, ru.helper.indexes, ru.fetchColIDtoRowIndex, ru.newValues)
if err != nil {
return nil, err
}
}
if rowPrimaryKeyChanged {
err := ru.rd.deleteRow(b, oldValues)
if err != nil {
return nil, err
}
err = ru.ri.insertRow(b, ru.newValues)
return ru.newValues, err
}
// Update secondary indexes.
for i, newSecondaryIndexEntry := range newSecondaryIndexEntries {
secondaryIndexEntry := secondaryIndexEntries[i]
secondaryKeyChanged := !bytes.Equal(newSecondaryIndexEntry.Key, secondaryIndexEntry.Key)
if secondaryKeyChanged {
if log.V(2) {
log.Infof("Del %s", secondaryIndexEntry.Key)
}
b.Del(secondaryIndexEntry.Key)
// Do not update Indexes in the DELETE_ONLY state.
if _, ok := ru.deleteOnlyIndex[i]; !ok {
if log.V(2) {
log.Infof("CPut %s -> %v", newSecondaryIndexEntry.Key, newSecondaryIndexEntry.Value)
}
b.CPut(newSecondaryIndexEntry.Key, newSecondaryIndexEntry.Value, nil)
}
}
}
// Add the new values.
for i, val := range updateValues {
col := ru.updateCols[i]
if ru.helper.columnInPK(col.ID) {
// Skip primary key columns as their values are encoded in the row
// sentinel key which is guaranteed to exist for as long as the row
// exists.
continue
}
ru.key = keys.MakeColumnKey(newPrimaryIndexKey, uint32(col.ID))
if ru.marshalled[i].RawBytes != nil {
// We only output non-NULL values. Non-existent column keys are
// considered NULL during scanning and the row sentinel ensures we know
// the row exists.
//.........這裏部分代碼省略.........
示例10: insertRow
// insertRow adds to the batch the kv operations necessary to insert a table row
// with the given values.
func (ri *rowInserter) insertRow(b *client.Batch, values []parser.Datum) error {
if len(values) != len(ri.insertCols) {
return util.Errorf("got %d values but expected %d", len(values), len(ri.insertCols))
}
// Encode the values to the expected column type. This needs to
// happen before index encoding because certain datum types (i.e. tuple)
// cannot be used as index values.
for i, val := range values {
// Make sure the value can be written to the column before proceeding.
var err error
if ri.marshalled[i], err = sqlbase.MarshalColumnValue(ri.insertCols[i], val); err != nil {
return err
}
}
primaryIndexKey, secondaryIndexEntries, err := ri.helper.encodeIndexes(ri.insertColIDtoRowIndex, values)
if err != nil {
return err
}
// Write the row sentinel. We want to write the sentinel first in case
// we are trying to insert a duplicate primary key: if we write the
// secondary indexes first, we may get an error that looks like a
// uniqueness violation on a non-unique index.
ri.key = keys.MakeNonColumnKey(primaryIndexKey)
if log.V(2) {
log.Infof("CPut %s -> NULL", ri.key)
}
// Each sentinel value needs a distinct RawBytes field as the computed
// checksum includes the key the value is associated with.
ri.sentinelValue.SetBytes([]byte{})
b.CPut(&ri.key, &ri.sentinelValue, nil)
ri.key = nil
for _, secondaryIndexEntry := range secondaryIndexEntries {
if log.V(2) {
log.Infof("CPut %s -> %v", secondaryIndexEntry.Key, secondaryIndexEntry.Value)
}
ri.key = secondaryIndexEntry.Key
b.CPut(&ri.key, secondaryIndexEntry.Value, nil)
}
ri.key = nil
// Write the row columns.
for i, val := range values {
col := ri.insertCols[i]
if ri.helper.columnInPK(col.ID) {
// Skip primary key columns as their values are encoded in the row
// sentinel key which is guaranteed to exist for as long as the row
// exists.
continue
}
if ri.marshalled[i].RawBytes != nil {
// We only output non-NULL values. Non-existent column keys are
// considered NULL during scanning and the row sentinel ensures we know
// the row exists.
ri.key = keys.MakeColumnKey(primaryIndexKey, uint32(col.ID))
if log.V(2) {
log.Infof("CPut %s -> %v", ri.key, val)
}
b.CPut(&ri.key, &ri.marshalled[i], nil)
ri.key = nil
}
}
return nil
}
示例11: backfillBatch
//.........這裏部分代碼省略.........
// will fail on big tables (see #3274).
// Delete the entire dropped columns.
// This used to use SQL UPDATE in the past to update the dropped
// column to NULL; but a column in the process of being
// dropped is placed in the table descriptor mutations, and
// a SQL UPDATE of a column in mutations will fail.
if len(droppedColumnDescs) > 0 {
// Run a scan across the table using the primary key.
start := roachpb.Key(MakeIndexKeyPrefix(oldTableDesc.ID, oldTableDesc.PrimaryIndex.ID))
// Use a different batch to perform the scan.
batch := &client.Batch{}
batch.Scan(start, start.PrefixEnd(), 0)
if pErr := p.txn.Run(batch); pErr != nil {
return pErr
}
for _, result := range batch.Results {
var sentinelKey roachpb.Key
for _, kv := range result.Rows {
if sentinelKey == nil || !bytes.HasPrefix(kv.Key, sentinelKey) {
// Sentinel keys have a 0 suffix indicating 0 bytes of column
// ID. Strip off that suffix to determine the prefix shared with the
// other keys for the row.
sentinelKey = stripColumnIDLength(kv.Key)
for _, columnDesc := range droppedColumnDescs {
// Delete the dropped column.
colKey := keys.MakeColumnKey(sentinelKey, uint32(columnDesc.ID))
if log.V(2) {
log.Infof("Del %s", colKey)
}
b.Del(colKey)
}
}
}
}
}
for _, indexDescriptor := range droppedIndexDescs {
indexPrefix := MakeIndexKeyPrefix(oldTableDesc.ID, indexDescriptor.ID)
// Delete the index.
indexStartKey := roachpb.Key(indexPrefix)
indexEndKey := indexStartKey.PrefixEnd()
if log.V(2) {
log.Infof("DelRange %s - %s", indexStartKey, indexEndKey)
}
b.DelRange(indexStartKey, indexEndKey)
}
if len(newIndexDescs) > 0 {
// Get all the rows affected.
// TODO(vivek): Avoid going through Select.
// TODO(tamird): Support partial indexes?
// Use a scanNode with SELECT to pass in a TableDescriptor
// to the SELECT without needing to use a parser.QualifiedName,
// because we want to run schema changes from a gossip feed of
// table IDs.
scan := &scanNode{
planner: p,
txn: p.txn,
desc: oldTableDesc,
}
scan.initDescDefaults()
rows, pErr := p.selectIndex(&selectNode{}, scan, nil, false)
if pErr != nil {
return pErr
}
// Construct a map from column ID to the index the value appears at within a
// row.
colIDtoRowIndex, pErr := makeColIDtoRowIndex(rows, oldTableDesc)
if pErr != nil {
return pErr
}
for rows.Next() {
rowVals := rows.Values()
for _, newIndexDesc := range newIndexDescs {
secondaryIndexEntries, pErr := encodeSecondaryIndexes(
oldTableDesc.ID, []IndexDescriptor{newIndexDesc}, colIDtoRowIndex, rowVals)
if pErr != nil {
return pErr
}
for _, secondaryIndexEntry := range secondaryIndexEntries {
if log.V(2) {
log.Infof("CPut %s -> %v", secondaryIndexEntry.key,
secondaryIndexEntry.value)
}
b.CPut(secondaryIndexEntry.key, secondaryIndexEntry.value, nil)
}
}
}
return rows.PErr()
}
return nil
}
示例12: backfillBatch
//.........這裏部分代碼省略.........
batch := &client.Batch{}
batch.Scan(start, start.PrefixEnd(), 0)
if err := p.txn.Run(batch); err != nil {
return err
}
for _, result := range batch.Results {
var sentinelKey roachpb.Key
for _, kv := range result.Rows {
if sentinelKey == nil || !bytes.HasPrefix(kv.Key, sentinelKey) {
// Sentinel keys have a 0 suffix indicating 0 bytes of column
// ID. Strip off that suffix to determine the prefix shared with the
// other keys for the row.
sentinelKey = stripColumnIDLength(kv.Key)
for _, columnDesc := range droppedColumnDescs {
// Delete the dropped column.
colKey := keys.MakeColumnKey(sentinelKey, uint32(columnDesc.ID))
if log.V(2) {
log.Infof("Del %s", colKey)
}
b.Del(colKey)
}
}
}
}
}
for _, indexDescriptor := range droppedIndexDescs {
indexPrefix := MakeIndexKeyPrefix(newTableDesc.ID, indexDescriptor.ID)
// Delete the index.
indexStartKey := roachpb.Key(indexPrefix)
indexEndKey := indexStartKey.PrefixEnd()
if log.V(2) {
log.Infof("DelRange %s - %s", indexStartKey, indexEndKey)
}
b.DelRange(indexStartKey, indexEndKey)
}
if len(newIndexDescs) > 0 {
// Get all the rows affected.
// TODO(vivek): Avoid going through Select.
// TODO(tamird): Support partial indexes?
// Use a scanNode with SELECT to pass in a TableDescriptor
// to the SELECT without needing to use a parser.QualifiedName,
// because we want to run schema changes from a gossip feed of
// table IDs.
scan := &scanNode{
planner: p,
txn: p.txn,
desc: oldTableDesc,
}
scan.initDescDefaults()
rows, err := p.selectWithScan(scan, &parser.Select{Exprs: oldTableDesc.allColumnsSelector()})
if err != nil {
return err
}
// Construct a map from column ID to the index the value appears at within a
// row.
colIDtoRowIndex, err := makeColIDtoRowIndex(rows, oldTableDesc)
if err != nil {
return err
}
// TODO(tamird): This will fall down in production use. We need to do
// something better (see #2036). In particular, this implementation
// has the following problems:
// - Very large tables will generate an enormous batch here. This
// isn't really a problem in itself except that it will exacerbate
// the other issue:
// - Any non-quiescent table that this runs against will end up with
// an inconsistent index. This is because as inserts/updates continue
// to roll in behind this operation's read front, the written index
// will become incomplete/stale before it's written.
for rows.Next() {
rowVals := rows.Values()
for _, newIndexDesc := range newIndexDescs {
secondaryIndexEntries, err := encodeSecondaryIndexes(
oldTableDesc.ID, []IndexDescriptor{newIndexDesc}, colIDtoRowIndex, rowVals)
if err != nil {
return err
}
for _, secondaryIndexEntry := range secondaryIndexEntries {
if log.V(2) {
log.Infof("CPut %s -> %v", secondaryIndexEntry.key,
secondaryIndexEntry.value)
}
b.CPut(secondaryIndexEntry.key, secondaryIndexEntry.value, nil)
}
}
}
return rows.Err()
}
return nil
}
示例13: Insert
// Insert inserts rows into the database.
// Privileges: INSERT on table
// Notes: postgres requires INSERT. No "on duplicate key update" option.
// mysql requires INSERT. Also requires UPDATE on "ON DUPLICATE KEY UPDATE".
func (p *planner) Insert(n *parser.Insert) (planNode, error) {
tableDesc, err := p.getTableDesc(n.Table)
if err != nil {
return nil, err
}
if err := p.checkPrivilege(tableDesc, privilege.INSERT); err != nil {
return nil, err
}
// Determine which columns we're inserting into.
cols, err := p.processColumns(tableDesc, n.Columns)
if err != nil {
return nil, err
}
// Construct a map from column ID to the index the value appears at within a
// row.
colIDtoRowIndex := map[ColumnID]int{}
for i, c := range cols {
colIDtoRowIndex[c.ID] = i
}
// Verify we have at least the columns that are part of the primary key.
primaryKeyCols := map[ColumnID]struct{}{}
for i, id := range tableDesc.PrimaryIndex.ColumnIDs {
if _, ok := colIDtoRowIndex[id]; !ok {
return nil, fmt.Errorf("missing %q primary key column", tableDesc.PrimaryIndex.ColumnNames[i])
}
primaryKeyCols[id] = struct{}{}
}
// Transform the values into a rows object. This expands SELECT statements or
// generates rows from the values contained within the query.
rows, err := p.makePlan(n.Rows)
if err != nil {
return nil, err
}
primaryIndex := tableDesc.PrimaryIndex
primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID)
b := client.Batch{}
for rows.Next() {
values := rows.Values()
for range cols[len(values):] {
values = append(values, parser.DNull)
}
for _, col := range tableDesc.Columns {
if !col.Nullable {
if i, ok := colIDtoRowIndex[col.ID]; !ok || values[i] == parser.DNull {
return nil, fmt.Errorf("null value in column %q violates not-null constraint", col.Name)
}
}
}
primaryIndexKey, _, err := encodeIndexKey(
primaryIndex.ColumnIDs, colIDtoRowIndex, values, primaryIndexKeyPrefix)
if err != nil {
return nil, err
}
// Write the secondary indexes.
secondaryIndexEntries, err := encodeSecondaryIndexes(
tableDesc.ID, tableDesc.Indexes, colIDtoRowIndex, values)
if err != nil {
return nil, err
}
for _, secondaryIndexEntry := range secondaryIndexEntries {
if log.V(2) {
log.Infof("CPut %q -> %v", secondaryIndexEntry.key, secondaryIndexEntry.value)
}
b.CPut(secondaryIndexEntry.key, secondaryIndexEntry.value, nil)
}
// Write the row sentinel.
if log.V(2) {
log.Infof("CPut %q -> NULL", primaryIndexKey)
}
b.CPut(primaryIndexKey, nil, nil)
// Write the row columns.
for i, val := range values {
col := cols[i]
// Make sure the value can be written to the column before proceeding.
primitive, err := convertDatum(col, val)
if err != nil {
return nil, err
}
if _, ok := primaryKeyCols[col.ID]; ok {
// Skip primary key columns as their values are encoded in the row
//.........這裏部分代碼省略.........
示例14: Insert
// Insert inserts rows into the database.
// Privileges: WRITE on table
// Notes: postgres requires INSERT. No "on duplicate key update" option.
// mysql requires INSERT. Also requires UPDATE on "ON DUPLICATE KEY UPDATE".
func (p *planner) Insert(n *parser.Insert) (planNode, error) {
tableDesc, err := p.getTableDesc(n.Table)
if err != nil {
return nil, err
}
if !tableDesc.HasPrivilege(p.user, parser.PrivilegeWrite) {
return nil, fmt.Errorf("user %s does not have %s privilege on table %s",
p.user, parser.PrivilegeWrite, tableDesc.Name)
}
// Determine which columns we're inserting into.
cols, err := p.processColumns(tableDesc, n.Columns)
if err != nil {
return nil, err
}
// Construct a map from column ID to the index the value appears at within a
// row.
colIDtoRowIndex := map[structured.ID]int{}
for i, c := range cols {
colIDtoRowIndex[c.ID] = i
}
// Verify we have at least the columns that are part of the primary key.
for i, id := range tableDesc.PrimaryIndex.ColumnIDs {
if _, ok := colIDtoRowIndex[id]; !ok {
return nil, fmt.Errorf("missing %q primary key column", tableDesc.PrimaryIndex.ColumnNames[i])
}
}
// Transform the values into a rows object. This expands SELECT statements or
// generates rows from the values contained within the query.
rows, err := p.makePlan(n.Rows)
if err != nil {
return nil, err
}
primaryIndex := tableDesc.PrimaryIndex
primaryIndexKeyPrefix := encodeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID)
b := client.Batch{}
for rows.Next() {
values := rows.Values()
if len(values) != len(cols) {
return nil, fmt.Errorf("invalid values for columns: %d != %d", len(values), len(cols))
}
primaryIndexKeySuffix, _, err := encodeIndexKey(primaryIndex.ColumnIDs, colIDtoRowIndex, values, nil)
if err != nil {
return nil, err
}
primaryIndexKey := bytes.Join([][]byte{primaryIndexKeyPrefix, primaryIndexKeySuffix}, nil)
// Write the secondary indexes.
secondaryIndexEntries, err := encodeSecondaryIndexes(tableDesc.ID, tableDesc.Indexes, colIDtoRowIndex, values, primaryIndexKeySuffix)
if err != nil {
return nil, err
}
for _, secondaryIndexEntry := range secondaryIndexEntries {
if log.V(2) {
log.Infof("CPut %q -> %v", secondaryIndexEntry.key, secondaryIndexEntry.value)
}
b.CPut(secondaryIndexEntry.key, secondaryIndexEntry.value, nil)
}
// Write the row.
for i, val := range values {
key := encodeColumnKey(cols[i], primaryIndexKey)
if log.V(2) {
log.Infof("CPut %q -> %v", key, val)
}
v, err := prepareVal(cols[i], val)
if err != nil {
return nil, err
}
b.CPut(key, v, nil)
}
}
if err := rows.Err(); err != nil {
return nil, err
}
if err := p.db.Run(&b); err != nil {
if tErr, ok := err.(*proto.ConditionFailedError); ok {
return nil, fmt.Errorf("duplicate key value %q violates unique constraint %s", tErr.ActualValue.Bytes, "TODO(tamird)")
}
return nil, err
}
// TODO(tamird/pmattis): return the number of affected rows
return &valuesNode{}, nil
}
示例15: RenameTable
// RenameTable renames the table.
// Privileges: WRITE on database.
// Notes: postgres requires the table owner.
// mysql requires ALTER, DROP on the original table, and CREATE, INSERT
// on the new table (and does not copy privileges over).
func (p *planner) RenameTable(n *parser.RenameTable) (planNode, error) {
if n.NewName == "" {
return nil, errEmptyTableName
}
if err := n.Name.NormalizeTableName(p.session.Database); err != nil {
return nil, err
}
if n.Name.Table() == string(n.NewName) {
// Noop.
return &valuesNode{}, nil
}
dbDesc, err := p.getDatabaseDesc(n.Name.Database())
if err != nil {
return nil, err
}
tbKey := tableKey{dbDesc.ID, string(n.Name.Table())}.Key()
// Check if table exists.
gr, err := p.txn.Get(tbKey)
if err != nil {
return nil, err
}
if !gr.Exists() {
if n.IfExists {
// Noop.
return &valuesNode{}, nil
}
// Key does not exist, but we want it to: error out.
return nil, fmt.Errorf("table %q does not exist", n.Name.Table())
}
if err := p.checkPrivilege(dbDesc, privilege.WRITE); err != nil {
return nil, err
}
tableDesc, err := p.getTableDesc(n.Name)
if err != nil {
return nil, err
}
tableDesc.SetName(string(n.NewName))
newTbKey := tableKey{dbDesc.ID, string(n.NewName)}.Key()
descKey := MakeDescMetadataKey(tableDesc.GetID())
b := client.Batch{}
b.Put(descKey, tableDesc)
b.CPut(newTbKey, descKey, nil)
b.Del(tbKey)
if err := p.txn.Run(&b); err != nil {
if _, ok := err.(*proto.ConditionFailedError); ok {
return nil, fmt.Errorf("table name %q already exists", n.NewName)
}
return nil, err
}
return &valuesNode{}, nil
}