本文整理汇总了Golang中k8s/io/apimachinery/pkg/apis/meta/v1.NewTime函数的典型用法代码示例。如果您正苦于以下问题:Golang NewTime函数的具体用法?Golang NewTime怎么用?Golang NewTime使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewTime函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestNodeConditionsObservedSince
func TestNodeConditionsObservedSince(t *testing.T) {
now := metav1.Now()
observedTime := metav1.NewTime(now.Time.Add(-1 * time.Minute))
testCases := map[string]struct {
observedAt nodeConditionsObservedAt
period time.Duration
now time.Time
result []v1.NodeConditionType
}{
"in-period": {
observedAt: nodeConditionsObservedAt{
v1.NodeMemoryPressure: observedTime.Time,
},
period: 2 * time.Minute,
now: now.Time,
result: []v1.NodeConditionType{v1.NodeMemoryPressure},
},
"out-of-period": {
observedAt: nodeConditionsObservedAt{
v1.NodeMemoryPressure: observedTime.Time,
},
period: 30 * time.Second,
now: now.Time,
result: []v1.NodeConditionType{},
},
}
for testName, testCase := range testCases {
actual := nodeConditionsObservedSince(testCase.observedAt, testCase.period, testCase.now)
if !nodeConditionList(actual).Equal(nodeConditionList(testCase.result)) {
t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual)
}
}
}
示例2: TestGracefulStoreCanDeleteIfExistingGracePeriodZero
// TestGracefulStoreCanDeleteIfExistingGracePeriodZero tests recovery from
// race condition where the graceful delete is unable to complete
// in prior operation, but the pod remains with deletion timestamp
// and grace period set to 0.
func TestGracefulStoreCanDeleteIfExistingGracePeriodZero(t *testing.T) {
deletionTimestamp := metav1.NewTime(time.Now())
deletionGracePeriodSeconds := int64(0)
initialGeneration := int64(1)
pod := &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Generation: initialGeneration,
DeletionGracePeriodSeconds: &deletionGracePeriodSeconds,
DeletionTimestamp: &deletionTimestamp,
},
Spec: api.PodSpec{NodeName: "machine"},
}
testContext := genericapirequest.WithNamespace(genericapirequest.NewContext(), "test")
destroyFunc, registry := NewTestGenericStoreRegistry(t)
registry.EnableGarbageCollection = false
defaultDeleteStrategy := testRESTStrategy{api.Scheme, names.SimpleNameGenerator, true, false, true}
registry.DeleteStrategy = testGracefulStrategy{defaultDeleteStrategy}
defer destroyFunc()
graceful, gracefulPending, err := rest.BeforeDelete(registry.DeleteStrategy, testContext, pod, api.NewDeleteOptions(0))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if graceful {
t.Fatalf("graceful should be false if object has DeletionTimestamp and DeletionGracePeriodSeconds is 0")
}
if gracefulPending {
t.Fatalf("gracefulPending should be false if object has DeletionTimestamp and DeletionGracePeriodSeconds is 0")
}
}
示例3: containerInfoV2ToStats
func (sb *summaryBuilder) containerInfoV2ToStats(
name string,
info *cadvisorapiv2.ContainerInfo) stats.ContainerStats {
cStats := stats.ContainerStats{
StartTime: metav1.NewTime(info.Spec.CreationTime),
Name: name,
}
cstat, found := sb.latestContainerStats(info)
if !found {
return cStats
}
if info.Spec.HasCpu {
cpuStats := stats.CPUStats{
Time: metav1.NewTime(cstat.Timestamp),
}
if cstat.CpuInst != nil {
cpuStats.UsageNanoCores = &cstat.CpuInst.Usage.Total
}
if cstat.Cpu != nil {
cpuStats.UsageCoreNanoSeconds = &cstat.Cpu.Usage.Total
}
cStats.CPU = &cpuStats
}
if info.Spec.HasMemory {
pageFaults := cstat.Memory.ContainerData.Pgfault
majorPageFaults := cstat.Memory.ContainerData.Pgmajfault
cStats.Memory = &stats.MemoryStats{
Time: metav1.NewTime(cstat.Timestamp),
UsageBytes: &cstat.Memory.Usage,
WorkingSetBytes: &cstat.Memory.WorkingSet,
RSSBytes: &cstat.Memory.RSS,
PageFaults: &pageFaults,
MajorPageFaults: &majorPageFaults,
}
// availableBytes = memory limit (if known) - workingset
if !isMemoryUnlimited(info.Spec.Memory.Limit) {
availableBytes := info.Spec.Memory.Limit - cstat.Memory.WorkingSet
cStats.Memory.AvailableBytes = &availableBytes
}
}
sb.containerInfoV2FsStats(info, &cStats)
cStats.UserDefinedMetrics = sb.containerInfoV2ToUserDefinedMetrics(info)
return cStats
}
示例4: TestThresholdsMetGracePeriod
func TestThresholdsMetGracePeriod(t *testing.T) {
now := metav1.Now()
hardThreshold := Threshold{
Signal: SignalMemoryAvailable,
Operator: OpLessThan,
Value: ThresholdValue{
Quantity: quantityMustParse("1Gi"),
},
}
softThreshold := Threshold{
Signal: SignalMemoryAvailable,
Operator: OpLessThan,
Value: ThresholdValue{
Quantity: quantityMustParse("2Gi"),
},
GracePeriod: 1 * time.Minute,
}
oldTime := metav1.NewTime(now.Time.Add(-2 * time.Minute))
testCases := map[string]struct {
observedAt thresholdsObservedAt
now time.Time
result []Threshold
}{
"empty": {
observedAt: thresholdsObservedAt{},
now: now.Time,
result: []Threshold{},
},
"hard-threshold-met": {
observedAt: thresholdsObservedAt{
hardThreshold: now.Time,
},
now: now.Time,
result: []Threshold{hardThreshold},
},
"soft-threshold-not-met": {
observedAt: thresholdsObservedAt{
softThreshold: now.Time,
},
now: now.Time,
result: []Threshold{},
},
"soft-threshold-met": {
observedAt: thresholdsObservedAt{
softThreshold: oldTime.Time,
},
now: now.Time,
result: []Threshold{softThreshold},
},
}
for testName, testCase := range testCases {
actual := thresholdsMetGracePeriod(testCase.observedAt, now.Time)
if !thresholdList(actual).Equal(thresholdList(testCase.result)) {
t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual)
}
}
}
示例5: setNodeDiskPressureCondition
// setNodeDiskPressureCondition for the node.
// TODO: this needs to move somewhere centralized...
func (kl *Kubelet) setNodeDiskPressureCondition(node *v1.Node) {
currentTime := metav1.NewTime(kl.clock.Now())
var condition *v1.NodeCondition
// Check if NodeDiskPressure condition already exists and if it does, just pick it up for update.
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == v1.NodeDiskPressure {
condition = &node.Status.Conditions[i]
}
}
newCondition := false
// If the NodeDiskPressure condition doesn't exist, create one
if condition == nil {
condition = &v1.NodeCondition{
Type: v1.NodeDiskPressure,
Status: v1.ConditionUnknown,
}
// cannot be appended to node.Status.Conditions here because it gets
// copied to the slice. So if we append to the slice here none of the
// updates we make below are reflected in the slice.
newCondition = true
}
// Update the heartbeat time
condition.LastHeartbeatTime = currentTime
// Note: The conditions below take care of the case when a new NodeDiskPressure condition is
// created and as well as the case when the condition already exists. When a new condition
// is created its status is set to v1.ConditionUnknown which matches either
// condition.Status != v1.ConditionTrue or
// condition.Status != v1.ConditionFalse in the conditions below depending on whether
// the kubelet is under disk pressure or not.
if kl.evictionManager.IsUnderDiskPressure() {
if condition.Status != v1.ConditionTrue {
condition.Status = v1.ConditionTrue
condition.Reason = "KubeletHasDiskPressure"
condition.Message = "kubelet has disk pressure"
condition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasDiskPressure")
}
} else {
if condition.Status != v1.ConditionFalse {
condition.Status = v1.ConditionFalse
condition.Reason = "KubeletHasNoDiskPressure"
condition.Message = "kubelet has no disk pressure"
condition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasNoDiskPressure")
}
}
if newCondition {
node.Status.Conditions = append(node.Status.Conditions, *condition)
}
}
示例6: TestActiveDeadlineHandler
// TestActiveDeadlineHandler verifies the active deadline handler functions as expected.
func TestActiveDeadlineHandler(t *testing.T) {
pods := newTestPods(4)
fakeClock := clock.NewFakeClock(time.Now())
podStatusProvider := &mockPodStatusProvider{pods: pods}
fakeRecorder := &record.FakeRecorder{}
handler, err := newActiveDeadlineHandler(podStatusProvider, fakeRecorder, fakeClock)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
now := metav1.Now()
startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute))
// this pod has exceeded its active deadline
exceededActiveDeadlineSeconds := int64(30)
pods[0].Status.StartTime = &startTime
pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
// this pod has not exceeded its active deadline
notYetActiveDeadlineSeconds := int64(120)
pods[1].Status.StartTime = &startTime
pods[1].Spec.ActiveDeadlineSeconds = ¬YetActiveDeadlineSeconds
// this pod has no deadline
pods[2].Status.StartTime = &startTime
pods[2].Spec.ActiveDeadlineSeconds = nil
testCases := []struct {
pod *v1.Pod
expected bool
}{{pods[0], true}, {pods[1], false}, {pods[2], false}, {pods[3], false}}
for i, testCase := range testCases {
if actual := handler.ShouldSync(testCase.pod); actual != testCase.expected {
t.Errorf("[%d] ShouldSync expected %#v, got %#v", i, testCase.expected, actual)
}
actual := handler.ShouldEvict(testCase.pod)
if actual.Evict != testCase.expected {
t.Errorf("[%d] ShouldEvict.Evict expected %#v, got %#v", i, testCase.expected, actual.Evict)
}
if testCase.expected {
if actual.Reason != reason {
t.Errorf("[%d] ShouldEvict.Reason expected %#v, got %#v", i, message, actual.Reason)
}
if actual.Message != message {
t.Errorf("[%d] ShouldEvict.Message expected %#v, got %#v", i, message, actual.Message)
}
}
}
}
示例7: TestValidateObjectMetaUpdateIgnoresCreationTimestamp
func TestValidateObjectMetaUpdateIgnoresCreationTimestamp(t *testing.T) {
if errs := ValidateObjectMetaUpdate(
&metav1.ObjectMeta{Name: "test", ResourceVersion: "1"},
&metav1.ObjectMeta{Name: "test", ResourceVersion: "1", CreationTimestamp: metav1.NewTime(time.Unix(10, 0))},
field.NewPath("field"),
); len(errs) != 0 {
t.Fatalf("unexpected errors: %v", errs)
}
if errs := ValidateObjectMetaUpdate(
&metav1.ObjectMeta{Name: "test", ResourceVersion: "1", CreationTimestamp: metav1.NewTime(time.Unix(10, 0))},
&metav1.ObjectMeta{Name: "test", ResourceVersion: "1"},
field.NewPath("field"),
); len(errs) != 0 {
t.Fatalf("unexpected errors: %v", errs)
}
if errs := ValidateObjectMetaUpdate(
&metav1.ObjectMeta{Name: "test", ResourceVersion: "1", CreationTimestamp: metav1.NewTime(time.Unix(10, 0))},
&metav1.ObjectMeta{Name: "test", ResourceVersion: "1", CreationTimestamp: metav1.NewTime(time.Unix(11, 0))},
field.NewPath("field"),
); len(errs) != 0 {
t.Fatalf("unexpected errors: %v", errs)
}
}
示例8: markAsDeleting
// markAsDeleting sets the obj's DeletionGracePeriodSeconds to 0, and sets the
// DeletionTimestamp to "now". Finalizers are watching for such updates and will
// finalize the object if their IDs are present in the object's Finalizers list.
func markAsDeleting(obj runtime.Object) (err error) {
objectMeta, kerr := metav1.ObjectMetaFor(obj)
if kerr != nil {
return kerr
}
now := metav1.NewTime(time.Now())
// This handles Generation bump for resources that don't support graceful deletion. For resources that support graceful deletion is handle in pkg/api/rest/delete.go
if objectMeta.DeletionTimestamp == nil && objectMeta.Generation > 0 {
objectMeta.Generation++
}
objectMeta.DeletionTimestamp = &now
var zero int64 = 0
objectMeta.DeletionGracePeriodSeconds = &zero
return nil
}
示例9: containerInfoV2ToUserDefinedMetrics
func (sb *summaryBuilder) containerInfoV2ToUserDefinedMetrics(info *cadvisorapiv2.ContainerInfo) []stats.UserDefinedMetric {
type specVal struct {
ref stats.UserDefinedMetricDescriptor
valType cadvisorapiv1.DataType
time time.Time
value float64
}
udmMap := map[string]*specVal{}
for _, spec := range info.Spec.CustomMetrics {
udmMap[spec.Name] = &specVal{
ref: stats.UserDefinedMetricDescriptor{
Name: spec.Name,
Type: stats.UserDefinedMetricType(spec.Type),
Units: spec.Units,
},
valType: spec.Format,
}
}
for _, stat := range info.Stats {
for name, values := range stat.CustomMetrics {
specVal, ok := udmMap[name]
if !ok {
glog.Warningf("spec for custom metric %q is missing from cAdvisor output. Spec: %+v, Metrics: %+v", name, info.Spec, stat.CustomMetrics)
continue
}
for _, value := range values {
// Pick the most recent value
if value.Timestamp.Before(specVal.time) {
continue
}
specVal.time = value.Timestamp
specVal.value = value.FloatValue
if specVal.valType == cadvisorapiv1.IntType {
specVal.value = float64(value.IntValue)
}
}
}
}
var udm []stats.UserDefinedMetric
for _, specVal := range udmMap {
udm = append(udm, stats.UserDefinedMetric{
UserDefinedMetricDescriptor: specVal.ref,
Time: metav1.NewTime(specVal.time),
Value: specVal.value,
})
}
return udm
}
示例10: TestThresholdsFirstObservedAt
func TestThresholdsFirstObservedAt(t *testing.T) {
hardThreshold := Threshold{
Signal: SignalMemoryAvailable,
Operator: OpLessThan,
Value: ThresholdValue{
Quantity: quantityMustParse("1Gi"),
},
}
now := metav1.Now()
oldTime := metav1.NewTime(now.Time.Add(-1 * time.Minute))
testCases := map[string]struct {
thresholds []Threshold
lastObservedAt thresholdsObservedAt
now time.Time
result thresholdsObservedAt
}{
"empty": {
thresholds: []Threshold{},
lastObservedAt: thresholdsObservedAt{},
now: now.Time,
result: thresholdsObservedAt{},
},
"no-previous-observation": {
thresholds: []Threshold{hardThreshold},
lastObservedAt: thresholdsObservedAt{},
now: now.Time,
result: thresholdsObservedAt{
hardThreshold: now.Time,
},
},
"previous-observation": {
thresholds: []Threshold{hardThreshold},
lastObservedAt: thresholdsObservedAt{
hardThreshold: oldTime.Time,
},
now: now.Time,
result: thresholdsObservedAt{
hardThreshold: oldTime.Time,
},
},
}
for testName, testCase := range testCases {
actual := thresholdsFirstObservedAt(testCase.thresholds, testCase.lastObservedAt, testCase.now)
if !reflect.DeepEqual(actual, testCase.result) {
t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual)
}
}
}
示例11: SetNodeCondition
// SetNodeCondition updates specific node condition with patch operation.
func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.NodeCondition) error {
generatePatch := func(condition v1.NodeCondition) ([]byte, error) {
raw, err := json.Marshal(&[]v1.NodeCondition{condition})
if err != nil {
return nil, err
}
return []byte(fmt.Sprintf(`{"status":{"conditions":%s}}`, raw)), nil
}
condition.LastHeartbeatTime = metav1.NewTime(time.Now())
patch, err := generatePatch(condition)
if err != nil {
return nil
}
_, err = c.Core().Nodes().PatchStatus(string(node), patch)
return err
}
示例12: newPod
func newPod(now time.Time, ready bool, beforeSec int) v1.Pod {
conditionStatus := v1.ConditionFalse
if ready {
conditionStatus = v1.ConditionTrue
}
return v1.Pod{
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
LastTransitionTime: metav1.NewTime(now.Add(-1 * time.Duration(beforeSec) * time.Second)),
Status: conditionStatus,
},
},
},
}
}
示例13: buildSummaryPods
// buildSummaryPods aggregates and returns the container stats in cinfos by the Pod managing the container.
// Containers not managed by a Pod are omitted.
func (sb *summaryBuilder) buildSummaryPods() []stats.PodStats {
// Map each container to a pod and update the PodStats with container data
podToStats := map[stats.PodReference]*stats.PodStats{}
for key, cinfo := range sb.infos {
// on systemd using devicemapper each mount into the container has an associated cgroup.
// we ignore them to ensure we do not get duplicate entries in our summary.
// for details on .mount units: http://man7.org/linux/man-pages/man5/systemd.mount.5.html
if strings.HasSuffix(key, ".mount") {
continue
}
// Build the Pod key if this container is managed by a Pod
if !sb.isPodManagedContainer(&cinfo) {
continue
}
ref := sb.buildPodRef(&cinfo)
// Lookup the PodStats for the pod using the PodRef. If none exists, initialize a new entry.
podStats, found := podToStats[ref]
if !found {
podStats = &stats.PodStats{PodRef: ref}
podToStats[ref] = podStats
}
// Update the PodStats entry with the stats from the container by adding it to stats.Containers
containerName := types.GetContainerName(cinfo.Spec.Labels)
if containerName == leaky.PodInfraContainerName {
// Special case for infrastructure container which is hidden from the user and has network stats
podStats.Network = sb.containerInfoV2ToNetworkStats("pod:"+ref.Namespace+"_"+ref.Name, &cinfo)
podStats.StartTime = metav1.NewTime(cinfo.Spec.CreationTime)
} else {
podStats.Containers = append(podStats.Containers, sb.containerInfoV2ToStats(containerName, &cinfo))
}
}
// Add each PodStats to the result
result := make([]stats.PodStats, 0, len(podToStats))
for _, podStats := range podToStats {
// Lookup the volume stats for each pod
podUID := kubetypes.UID(podStats.PodRef.UID)
if vstats, found := sb.fsResourceAnalyzer.GetPodVolumeStats(podUID); found {
podStats.VolumeStats = vstats.Volumes
}
result = append(result, *podStats)
}
return result
}
示例14: EventAggregate
// EventAggregate identifies similar events and groups into a common event if required
func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, error) {
aggregateKey, localKey := e.keyFunc(newEvent)
now := metav1.NewTime(e.clock.Now())
record := aggregateRecord{localKeys: sets.NewString(), lastTimestamp: now}
e.Lock()
defer e.Unlock()
value, found := e.cache.Get(aggregateKey)
if found {
record = value.(aggregateRecord)
}
// if the last event was far enough in the past, it is not aggregated, and we must reset state
maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second
interval := now.Time.Sub(record.lastTimestamp.Time)
if interval > maxInterval {
record = aggregateRecord{localKeys: sets.NewString()}
}
record.localKeys.Insert(localKey)
record.lastTimestamp = now
e.cache.Add(aggregateKey, record)
if record.localKeys.Len() < e.maxEvents {
return newEvent, nil
}
// do not grow our local key set any larger than max
record.localKeys.PopAny()
// create a new aggregate event
eventCopy := &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()),
Namespace: newEvent.Namespace,
},
Count: 1,
FirstTimestamp: now,
InvolvedObject: newEvent.InvolvedObject,
LastTimestamp: now,
Message: e.messageFunc(newEvent),
Type: newEvent.Type,
Reason: newEvent.Reason,
Source: newEvent.Source,
}
return eventCopy, nil
}
示例15: TestNodeConditionsLastObservedAt
func TestNodeConditionsLastObservedAt(t *testing.T) {
now := metav1.Now()
oldTime := metav1.NewTime(now.Time.Add(-1 * time.Minute))
testCases := map[string]struct {
nodeConditions []v1.NodeConditionType
lastObservedAt nodeConditionsObservedAt
now time.Time
result nodeConditionsObservedAt
}{
"no-previous-observation": {
nodeConditions: []v1.NodeConditionType{v1.NodeMemoryPressure},
lastObservedAt: nodeConditionsObservedAt{},
now: now.Time,
result: nodeConditionsObservedAt{
v1.NodeMemoryPressure: now.Time,
},
},
"previous-observation": {
nodeConditions: []v1.NodeConditionType{v1.NodeMemoryPressure},
lastObservedAt: nodeConditionsObservedAt{
v1.NodeMemoryPressure: oldTime.Time,
},
now: now.Time,
result: nodeConditionsObservedAt{
v1.NodeMemoryPressure: now.Time,
},
},
"old-observation": {
nodeConditions: []v1.NodeConditionType{},
lastObservedAt: nodeConditionsObservedAt{
v1.NodeMemoryPressure: oldTime.Time,
},
now: now.Time,
result: nodeConditionsObservedAt{
v1.NodeMemoryPressure: oldTime.Time,
},
},
}
for testName, testCase := range testCases {
actual := nodeConditionsLastObservedAt(testCase.nodeConditions, testCase.lastObservedAt, testCase.now)
if !reflect.DeepEqual(actual, testCase.result) {
t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual)
}
}
}