本文整理汇总了C#中Raven.Imports.Newtonsoft.Json.JsonTextWriter.Flush方法的典型用法代码示例。如果您正苦于以下问题:C# JsonTextWriter.Flush方法的具体用法?C# JsonTextWriter.Flush怎么用?C# JsonTextWriter.Flush使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Raven.Imports.Newtonsoft.Json.JsonTextWriter
的用法示例。
在下文中一共展示了JsonTextWriter.Flush方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: StreamToClient
private void StreamToClient(Stream stream, int pageSize, Etag etag, OrderedPartCollection<AbstractFileReadTrigger> readTriggers)
{
using (var cts = new CancellationTokenSource())
using (var timeout = cts.TimeoutAfter(FileSystemsLandlord.SystemConfiguration.DatabaseOperationTimeout))
using (var writer = new JsonTextWriter(new StreamWriter(stream)))
{
writer.WriteStartObject();
writer.WritePropertyName("Results");
writer.WriteStartArray();
Storage.Batch(accessor =>
{
var files = accessor.GetFilesAfter(etag, pageSize);
foreach (var file in files)
{
if (readTriggers.CanReadFile(file.FullPath, file.Metadata, ReadOperation.Load) == false)
continue;
timeout.Delay();
var doc = RavenJObject.FromObject(file);
doc.WriteTo(writer);
writer.WriteRaw(Environment.NewLine);
}
});
writer.WriteEndArray();
writer.WriteEndObject();
writer.Flush();
}
}
示例2: StreamToClient
private void StreamToClient(Stream stream, int pageSize, Etag etag)
{
using (var cts = new CancellationTokenSource())
using (var timeout = cts.TimeoutAfter(FileSystemsLandlord.SystemConfiguration.DatabaseOperationTimeout))
using (var writer = new JsonTextWriter(new StreamWriter(stream)))
{
writer.WriteStartObject();
writer.WritePropertyName("Results");
writer.WriteStartArray();
Storage.Batch(accessor =>
{
var files = accessor.GetFilesAfter(etag, pageSize);
foreach (var file in files)
{
timeout.Delay();
var doc = RavenJObject.FromObject(file);
doc.WriteTo(writer);
writer.WriteRaw(Environment.NewLine);
}
});
writer.WriteEndArray();
writer.WriteEndObject();
writer.Flush();
}
}
示例3: Respond
public override void Respond(IHttpContext context)
{
using (context.Response.Streaming())
{
context.Response.ContentType = "application/json; charset=utf-8";
using (var writer = new JsonTextWriter(new StreamWriter(context.Response.OutputStream)))
{
writer.WriteStartObject();
writer.WritePropertyName("Results");
writer.WriteStartArray();
Database.TransactionalStorage.Batch(accessor =>
{
var startsWith = context.Request.QueryString["startsWith"];
int pageSize = context.GetPageSize(int.MaxValue);
if (string.IsNullOrEmpty(context.Request.QueryString["pageSize"]))
pageSize = int.MaxValue;
// we may be sending a LOT of documents to the user, and most
// of them aren't going to be relevant for other ops, so we are going to skip
// the cache for that, to avoid filling it up very quickly
using (DocumentCacher.SkipSettingDocumentsInDocumentCache())
{
if (string.IsNullOrEmpty(startsWith))
{
Database.GetDocuments(context.GetStart(), pageSize, context.GetEtagFromQueryString(),
doc => doc.WriteTo(writer));
}
else
{
Database.GetDocumentsWithIdStartingWith(
startsWith,
context.Request.QueryString["matches"],
context.Request.QueryString["exclude"],
context.GetStart(),
pageSize,
doc => doc.WriteTo(writer));
}
}
});
writer.WriteEndArray();
writer.WriteEndObject();
writer.Flush();
}
}
}
示例4: ExecuteResult
public override void ExecuteResult(ControllerContext context)
{
if (context == null)
throw new ArgumentNullException("context");
var response = context.HttpContext.Response;
response.ContentType = !string.IsNullOrEmpty(ContentType) ? ContentType : "application/json";
if (ContentEncoding != null)
response.ContentEncoding = ContentEncoding;
if (Data == null) return;
var writer = new JsonTextWriter(response.Output) { Formatting = Formatting };
var serializer = JsonSerializer.Create(SerializerSettings);
serializer.Serialize(writer, Data);
writer.Flush();
}
示例5: Save
public Guid Save(ToDo todo)
{
database.BeginTransaction();
var id = Guid.NewGuid();
var ms = new MemoryStream();
var jsonTextWriter = new JsonTextWriter(new StreamWriter(ms));
new JsonSerializer().Serialize(
jsonTextWriter,
todo
);
jsonTextWriter.Flush();
todos.Put(id.ToByteArray(), ms.ToArray());
database.Commit();
return id;
}
示例6: ExecuteResult
public override void ExecuteResult(ControllerContext context)
{
if(context == null) {
throw new ArgumentNullException("context");
}
var response = context.HttpContext.Response;
response.ContentType = "application/json";
if(data == null) {
return;
}
var writer = new JsonTextWriter(response.Output);
var serializer = new JsonSerializer();
serializer.Configure();
serializer.Serialize(writer, data);
writer.Flush();
}
示例7: StreamToClient
private void StreamToClient(Stream stream, ExportOptions options, Lazy<NameValueCollection> headers, IPrincipal user)
{
var old = CurrentOperationContext.Headers.Value;
var oldUser = CurrentOperationContext.User.Value;
try
{
CurrentOperationContext.Headers.Value = headers;
CurrentOperationContext.User.Value = user;
Database.TransactionalStorage.Batch(accessor =>
{
var bufferStream = new BufferedStream(stream, 1024 * 64);
using (var cts = new CancellationTokenSource())
using (var timeout = cts.TimeoutAfter(DatabasesLandlord.SystemConfiguration.DatabaseOperationTimeout))
using (var streamWriter = new StreamWriter(bufferStream))
using (var writer = new JsonTextWriter(streamWriter))
{
writer.WriteStartObject();
writer.WritePropertyName("Results");
writer.WriteStartArray();
var exporter = new SmugglerExporter(Database, options);
exporter.Export(item => WriteToStream(writer, item, timeout), cts.Token);
writer.WriteEndArray();
writer.WriteEndObject();
writer.Flush();
bufferStream.Flush();
}
});
}
finally
{
CurrentOperationContext.Headers.Value = old;
CurrentOperationContext.User.Value = oldUser;
}
}
示例8: Respond
public override void Respond(IHttpContext context)
{
context.Response.BufferOutput = false;
using (var writer = new JsonTextWriter(new StreamWriter(context.Response.OutputStream)))
{
writer.WriteStartObject();
writer.WritePropertyName("Results");
writer.WriteStartArray();
Database.TransactionalStorage.Batch(accessor =>
{
var startsWith = context.Request.QueryString["startsWith"];
int pageSize = context.GetPageSize(int.MaxValue);
if (string.IsNullOrEmpty(context.Request.QueryString["pageSize"]))
pageSize = int.MaxValue;
if (string.IsNullOrEmpty(startsWith))
{
Database.GetDocuments(context.GetStart(), pageSize, context.GetEtagFromQueryString(),
doc => doc.WriteTo(writer));
}
else
{
Database.GetDocumentsWithIdStartingWith(
startsWith,
context.Request.QueryString["matches"],
context.GetStart(),
pageSize,
doc => doc.WriteTo(writer));
}
});
writer.WriteEndArray();
writer.WriteEndObject();
writer.Flush();
}
}
示例9: CreateInfoPackageForDatabase
public static void CreateInfoPackageForDatabase(ZipArchive package, DocumentDatabase database, RequestManager requestManager, string zipEntryPrefix = null)
{
zipEntryPrefix = zipEntryPrefix ?? string.Empty;
var databaseName = database.Name;
if (string.IsNullOrWhiteSpace(databaseName))
databaseName = Constants.SystemDatabase;
var jsonSerializer = JsonExtensions.CreateDefaultJsonSerializer();
jsonSerializer.Formatting=Formatting.Indented;
if (database.StartupTasks.OfType<ReplicationTask>().Any())
{
var replication = package.CreateEntry(zipEntryPrefix + "replication.json", compressionLevel);
using (var statsStream = replication.Open())
using (var streamWriter = new StreamWriter(statsStream))
{
jsonSerializer.Serialize(streamWriter, ReplicationUtils.GetReplicationInformation(database));
streamWriter.Flush();
}
}
var sqlReplicationTask = database.StartupTasks.OfType<SqlReplicationTask>().FirstOrDefault();
if (sqlReplicationTask != null)
{
var replication = package.CreateEntry(zipEntryPrefix + "sql_replication.json", compressionLevel);
using (var statsStream = replication.Open())
using (var streamWriter = new StreamWriter(statsStream))
{
jsonSerializer.Serialize(streamWriter, sqlReplicationTask.Statistics);
streamWriter.Flush();
}
}
var stats = package.CreateEntry(zipEntryPrefix + "stats.json", compressionLevel);
using (var statsStream = stats.Open())
using (var streamWriter = new StreamWriter(statsStream))
{
jsonSerializer.Serialize(streamWriter, database.Statistics);
streamWriter.Flush();
}
var metrics = package.CreateEntry(zipEntryPrefix + "metrics.json", compressionLevel);
using (var metricsStream = metrics.Open())
using (var streamWriter = new StreamWriter(metricsStream))
{
jsonSerializer.Serialize(streamWriter, database.CreateMetrics());
streamWriter.Flush();
}
var logs = package.CreateEntry(zipEntryPrefix + "logs.csv", compressionLevel);
using (var logsStream = logs.Open())
using (var streamWriter = new StreamWriter(logsStream))
{
var target = LogManager.GetTarget<DatabaseMemoryTarget>();
if (target == null) streamWriter.WriteLine("DatabaseMemoryTarget was not registered in the log manager, logs are not available");
else
{
var boundedMemoryTarget = target[databaseName];
var log = boundedMemoryTarget.GeneralLog;
streamWriter.WriteLine("time,logger,level,message,exception");
foreach (var logEvent in log)
{
streamWriter.WriteLine("{0:O},{1},{2},{3},{4}", logEvent.TimeStamp, logEvent.LoggerName, logEvent.Level, logEvent.FormattedMessage, logEvent.Exception);
}
}
streamWriter.Flush();
}
var config = package.CreateEntry(zipEntryPrefix + "config.json", compressionLevel);
using (var configStream = config.Open())
using (var streamWriter = new StreamWriter(configStream))
using (var jsonWriter = new JsonTextWriter(streamWriter) { Formatting = Formatting.Indented })
{
GetConfigForDebug(database).WriteTo(jsonWriter, new EtagJsonConverter());
jsonWriter.Flush();
}
var indexes = package.CreateEntry(zipEntryPrefix + "indexes.json", compressionLevel);
using (var indexesStream = indexes.Open())
using (var streamWriter = new StreamWriter(indexesStream))
{
jsonSerializer.Serialize(streamWriter, database.IndexDefinitionStorage.IndexDefinitions.ToDictionary(x => x.Key, x => x.Value));
streamWriter.Flush();
}
var currentlyIndexing = package.CreateEntry(zipEntryPrefix + "currently-indexing.json", compressionLevel);
using (var currentlyIndexingStream = currentlyIndexing.Open())
//.........这里部分代码省略.........
示例10: SendMessage
protected virtual async Task SendMessage(MemoryStream memoryStream, JsonSerializer serializer, object message, WebSocketSendAsync sendAsync, CancellationToken callCancelled)
{
memoryStream.Position = 0;
var jsonTextWriter = new JsonTextWriter(new StreamWriter(memoryStream));
serializer.Serialize(jsonTextWriter, message);
jsonTextWriter.Flush();
var arraySegment = new ArraySegment<byte>(memoryStream.GetBuffer(), 0, (int)memoryStream.Position);
await sendAsync(arraySegment, 1, true, callCancelled);
}
示例11: StreamToClient
private void StreamToClient(long id, SubscriptionActions subscriptions, Stream stream)
{
var sentDocuments = false;
var bufferStream = new BufferedStream(stream, 1024 * 64);
var lastBatchSentTime = Stopwatch.StartNew();
using (var writer = new JsonTextWriter(new StreamWriter(bufferStream)))
{
var options = subscriptions.GetBatchOptions(id);
writer.WriteStartObject();
writer.WritePropertyName("Results");
writer.WriteStartArray();
using (var cts = new CancellationTokenSource())
using (var timeout = cts.TimeoutAfter(DatabasesLandlord.SystemConfiguration.DatabaseOperationTimeout))
{
Etag lastProcessedDocEtag = null;
var batchSize = 0;
var batchDocCount = 0;
var processedDocumentsCount = 0;
var hasMoreDocs = false;
var config = subscriptions.GetSubscriptionConfig(id);
var startEtag = config.AckEtag;
var criteria = config.Criteria;
bool isPrefixCriteria = !string.IsNullOrWhiteSpace(criteria.KeyStartsWith);
Func<JsonDocument, bool> addDocument = doc =>
{
timeout.Delay();
if (doc == null)
{
// we only have this heartbeat when the streaming has gone on for a long time
// and we haven't sent anything to the user in a while (because of filtering, skipping, etc).
writer.WriteRaw(Environment.NewLine);
writer.Flush();
if (lastBatchSentTime.ElapsedMilliseconds > 30000)
return false;
return true;
}
processedDocumentsCount++;
// We cant continue because we have already maxed out the batch bytes size.
if (options.MaxSize.HasValue && batchSize >= options.MaxSize)
return false;
// We cant continue because we have already maxed out the amount of documents to send.
if (batchDocCount >= options.MaxDocCount)
return false;
// We can continue because we are ignoring system documents.
if (doc.Key.StartsWith("Raven/", StringComparison.InvariantCultureIgnoreCase))
return true;
// We can continue because we are ignoring the document as it doesn't fit the criteria.
if (MatchCriteria(criteria, doc) == false)
return true;
doc.ToJson().WriteTo(writer);
writer.WriteRaw(Environment.NewLine);
batchSize += doc.SerializedSizeOnDisk;
batchDocCount++;
return true; // We get the next document
};
var retries = 0;
do
{
var lastProcessedDocumentsCount = processedDocumentsCount;
Database.TransactionalStorage.Batch(accessor =>
{
// we may be sending a LOT of documents to the user, and most
// of them aren't going to be relevant for other ops, so we are going to skip
// the cache for that, to avoid filling it up very quickly
using (DocumentCacher.SkipSetAndGetDocumentsInDocumentCache())
{
if (isPrefixCriteria)
{
// If we don't get any document from GetDocumentsWithIdStartingWith it could be that we are in presence of a lagoon of uninteresting documents, so we are hitting a timeout.
lastProcessedDocEtag = Database.Documents.GetDocumentsWithIdStartingWith(criteria.KeyStartsWith, options.MaxDocCount - batchDocCount, startEtag, cts.Token, addDocument);
hasMoreDocs = false;
}
else
{
// It doesn't matter if we match the criteria or not, the document has been already processed.
lastProcessedDocEtag = Database.Documents.GetDocuments(-1, options.MaxDocCount - batchDocCount, startEtag, cts.Token, addDocument);
// If we don't get any document from GetDocuments it may be a signal that something is wrong.
if (lastProcessedDocEtag == null)
{
hasMoreDocs = false;
//.........这里部分代码省略.........
示例12: FloatingPointNonFiniteNumbers
public void FloatingPointNonFiniteNumbers()
{
StringBuilder sb = new StringBuilder();
StringWriter sw = new StringWriter(sb);
using (JsonWriter jsonWriter = new JsonTextWriter(sw))
{
jsonWriter.Formatting = Formatting.Indented;
jsonWriter.WriteStartArray();
jsonWriter.WriteValue(double.NaN);
jsonWriter.WriteValue(double.PositiveInfinity);
jsonWriter.WriteValue(double.NegativeInfinity);
jsonWriter.WriteValue(float.NaN);
jsonWriter.WriteValue(float.PositiveInfinity);
jsonWriter.WriteValue(float.NegativeInfinity);
jsonWriter.WriteEndArray();
jsonWriter.Flush();
}
string expected = @"[
NaN,
Infinity,
-Infinity,
NaN,
Infinity,
-Infinity
]";
string result = sb.ToString();
Assert.AreEqual(expected, result);
}
示例13: StreamToClient
private void StreamToClient(long id, SubscriptionActions subscriptions, Stream stream)
{
var sentDocuments = false;
using (var streamWriter = new StreamWriter(stream))
using (var writer = new JsonTextWriter(streamWriter))
{
var options = subscriptions.GetBatchOptions(id);
writer.WriteStartObject();
writer.WritePropertyName("Results");
writer.WriteStartArray();
using (var cts = new CancellationTokenSource())
using (var timeout = cts.TimeoutAfter(DatabasesLandlord.SystemConfiguration.DatabaseOperationTimeout))
{
Etag lastProcessedDocEtag = null;
var batchSize = 0;
var batchDocCount = 0;
var hasMoreDocs = false;
var config = subscriptions.GetSubscriptionConfig(id);
var startEtag = config.AckEtag;
var criteria = config.Criteria;
Action<JsonDocument> addDocument = doc =>
{
timeout.Delay();
if (options.MaxSize.HasValue && batchSize >= options.MaxSize)
return;
if (batchDocCount >= options.MaxDocCount)
return;
lastProcessedDocEtag = doc.Etag;
if (doc.Key.StartsWith("Raven/", StringComparison.InvariantCultureIgnoreCase))
return;
if (MatchCriteria(criteria, doc) == false)
return;
doc.ToJson().WriteTo(writer);
writer.WriteRaw(Environment.NewLine);
batchSize += doc.SerializedSizeOnDisk;
batchDocCount++;
};
int nextStart = 0;
do
{
Database.TransactionalStorage.Batch(accessor =>
{
// we may be sending a LOT of documents to the user, and most
// of them aren't going to be relevant for other ops, so we are going to skip
// the cache for that, to avoid filling it up very quickly
using (DocumentCacher.SkipSettingDocumentsInDocumentCache())
{
if (!string.IsNullOrWhiteSpace(criteria.KeyStartsWith))
{
Database.Documents.GetDocumentsWithIdStartingWith(criteria.KeyStartsWith, options.MaxDocCount - batchDocCount, startEtag, cts.Token, addDocument);
}
else
{
Database.Documents.GetDocuments(-1, options.MaxDocCount - batchDocCount, startEtag, cts.Token, addDocument);
}
}
if (lastProcessedDocEtag == null)
hasMoreDocs = false;
else
{
var lastDocEtag = accessor.Staleness.GetMostRecentDocumentEtag();
hasMoreDocs = EtagUtil.IsGreaterThan(lastDocEtag, lastProcessedDocEtag);
startEtag = lastProcessedDocEtag;
}
});
} while (hasMoreDocs && batchDocCount < options.MaxDocCount && (options.MaxSize.HasValue == false || batchSize < options.MaxSize));
writer.WriteEndArray();
if (batchDocCount > 0)
{
writer.WritePropertyName("LastProcessedEtag");
writer.WriteValue(lastProcessedDocEtag.ToString());
sentDocuments = true;
}
writer.WriteEndObject();
writer.Flush();
}
}
if (sentDocuments)
//.........这里部分代码省略.........
示例14: StreamToClient
private void StreamToClient(Stream stream, string startsWith, int start, int pageSize, Etag etag, string matches, int nextPageStart, string skipAfter,
Lazy<NameValueCollection> headers, IPrincipal user)
{
var old = CurrentOperationContext.Headers.Value;
var oldUser = CurrentOperationContext.User.Value;
try
{
CurrentOperationContext.Headers.Value = headers;
CurrentOperationContext.User.Value = user;
var bufferStream = new BufferedStream(stream, 1024 * 64);
using (var cts = new CancellationTokenSource())
using (var timeout = cts.TimeoutAfter(DatabasesLandlord.SystemConfiguration.DatabaseOperationTimeout))
using (var writer = new JsonTextWriter(new StreamWriter(bufferStream)))
{
writer.WriteStartObject();
writer.WritePropertyName("Results");
writer.WriteStartArray();
Action<JsonDocument> addDocument = doc =>
{
timeout.Delay();
doc.ToJson().WriteTo(writer);
writer.WriteRaw(Environment.NewLine);
};
Database.TransactionalStorage.Batch(accessor =>
{
// we may be sending a LOT of documents to the user, and most
// of them aren't going to be relevant for other ops, so we are going to skip
// the cache for that, to avoid filling it up very quickly
using (DocumentCacher.SkipSetAndGetDocumentsInDocumentCache())
{
if (string.IsNullOrEmpty(startsWith))
{
Database.Documents.GetDocuments(start, pageSize, etag, cts.Token, addDocument);
}
else
{
var nextPageStartInternal = nextPageStart;
Database.Documents.GetDocumentsWithIdStartingWith(startsWith, matches, null, start, pageSize, cts.Token, ref nextPageStartInternal, addDocument, skipAfter: skipAfter);
nextPageStart = nextPageStartInternal;
}
}
});
writer.WriteEndArray();
writer.WritePropertyName("NextPageStart");
writer.WriteValue(nextPageStart);
writer.WriteEndObject();
writer.Flush();
bufferStream.Flush();
}
}
finally
{
CurrentOperationContext.Headers.Value = old;
CurrentOperationContext.User.Value = oldUser;
}
}
示例15: StreamToClient
private void StreamToClient(Stream stream, string startsWith, int start, int pageSize, Etag etag, string matches, int nextPageStart, string skipAfter)
{
using (var cts = new CancellationTokenSource())
using (var timeout = cts.TimeoutAfter(DatabasesLandlord.SystemConfiguration.DatabaseOperationTimeout))
using (var writer = new JsonTextWriter(new StreamWriter(stream)))
{
writer.WriteStartObject();
writer.WritePropertyName("Results");
writer.WriteStartArray();
Database.TransactionalStorage.Batch(accessor =>
{
// we may be sending a LOT of documents to the user, and most
// of them aren't going to be relevant for other ops, so we are going to skip
// the cache for that, to avoid filling it up very quickly
using (DocumentCacher.SkipSettingDocumentsInDocumentCache())
{
if (string.IsNullOrEmpty(startsWith))
Database.Documents.GetDocuments(start, pageSize, etag, cts.Token, doc =>
{
timeout.Delay();
doc.WriteTo(writer);
writer.WriteRaw(Environment.NewLine);
});
else
{
var nextPageStartInternal = nextPageStart;
Database.Documents.GetDocumentsWithIdStartingWith(startsWith, matches, null, start, pageSize, cts.Token, ref nextPageStartInternal, doc =>
{
timeout.Delay();
doc.WriteTo(writer);
writer.WriteRaw(Environment.NewLine);
},skipAfter: skipAfter);
nextPageStart = nextPageStartInternal;
}
}
});
writer.WriteEndArray();
writer.WritePropertyName("NextPageStart");
writer.WriteValue(nextPageStart);
writer.WriteEndObject();
writer.Flush();
}
}