本文整理汇总了Golang中compress/gzip.Writer.Flush方法的典型用法代码示例。如果您正苦于以下问题:Golang Writer.Flush方法的具体用法?Golang Writer.Flush怎么用?Golang Writer.Flush使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类compress/gzip.Writer
的用法示例。
在下文中一共展示了Writer.Flush方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: webQuitHandler
func webQuitHandler(diskwriter *csv.Writer, gzipwriter *gzip.Writer, csvfile *os.File) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "flushing to disk and shutting down")
diskwriter.Flush()
if gzipwriter != nil {
gzipwriter.Flush()
gzipwriter.Close()
}
csvfile.Close()
os.Exit(0)
}
}
示例2: getPubmedRecords
func getPubmedRecords(urlFetcher *gopubmed.Fetcher, first bool, meshWriter *gzip.Writer, xmlWriter *gzip.Writer, transport *http.Transport, pmids []string) {
preUrlTime := time.Now()
articles, raw, err := urlFetcher.GetArticlesAndRaw(pmids)
if err != nil {
log.Fatal(err)
}
s := string(raw[:len(raw)])
for i := 0; i < len(articles); i++ {
pubmedArticle := articles[i]
if pubmedArticle.MedlineCitation != nil && pubmedArticle.MedlineCitation.MeshHeadingList != nil && pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading != nil {
fmt.Fprint(meshWriter, articles[i].MedlineCitation.PMID.Text)
for j := 0; j < len(pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading); j++ {
fmt.Fprint(meshWriter, "|")
fmt.Fprint(meshWriter, pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading[j].DescriptorName.Attr_UI)
fmt.Fprint(meshWriter, "::"+pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading[j].DescriptorName.Text)
if len(pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading[j].QualifierName) > 0 {
fmt.Fprint(meshWriter, "=")
for q := 0; q < len(pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading[j].QualifierName); q++ {
if q != 0 {
fmt.Fprint(meshWriter, "&")
}
fmt.Fprint(meshWriter, pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading[j].QualifierName[q].Attr_UI)
fmt.Fprint(meshWriter, "::"+pubmedArticle.MedlineCitation.MeshHeadingList.MeshHeading[j].QualifierName[q].Text)
}
}
}
fmt.Fprintln(meshWriter, "")
}
}
meshWriter.Flush()
if !first {
s = strings.Replace(s, startXml, "", -1)
s = strings.Replace(s, docType, "", -1)
s = strings.Replace(s, startPubmedArticleSet, "", -1)
}
s = strings.Replace(s, endPubmedArticleSet, "<!-- breakset -->", -1)
xmlWriter.Write([]byte(s))
postUrlTime := time.Now()
log.Println("Total request time:", postUrlTime.Sub(preUrlTime))
}
示例3: main
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
tr := &http.Transport{
ResponseHeaderTimeout: time.Second * 500,
DisableKeepAlives: false,
DisableCompression: false,
}
var wXml *gzip.Writer = nil
var ww *bufio.Writer = nil
var xFile *os.File = nil
meshFile, err2 := os.Create(meshFile)
if err2 != nil {
return
}
defer meshFile.Close()
wwMesh := bufio.NewWriter(meshFile)
wMesh := gzip.NewWriter(wwMesh)
//w := bufio.NewWriter(file)
numIdsPerUrl := findNumIdsPerUrl()
pmids := make([]string, numIdsPerUrl)
urlFetcher := gopubmed.Fetcher{
Ssl: false,
Transport: &http.Transport{
ResponseHeaderTimeout: time.Second * 500,
DisableKeepAlives: false,
DisableCompression: false,
//TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
allCount := 0
count := 0
reader, err := makeReader()
if err != nil {
log.Fatal(err)
}
first := true
chunkCount := 0
for {
thisNumIdsPerUrl := findNumIdsPerUrl()
if numIdsPerUrl != thisNumIdsPerUrl {
numIdsPerUrl = thisNumIdsPerUrl
pmids = make([]string, numIdsPerUrl)
}
line, err := reader.ReadString('\n')
if err != nil {
// You may check here if err == io.EOF
break
}
line = strings.TrimSpace(line)
err = lineChecker(line)
if err != nil {
log.Fatal(err)
}
//log.Println(line)
pmids[count] = line
if wXml == nil {
wXml, ww, xFile = makeXmlWriter(allCount, pmids[0])
}
count = count + 1
// Collected enough pmids: get their XML from NIH
if count == numIdsPerUrl {
getPubmedRecords(&urlFetcher, first, wMesh, wXml, tr, pmids)
checkTime()
first = false
count = 0
zeroArray(pmids)
} else {
}
allCount += 1
chunkCount += 1
// Start new xml file: close old one: open new one
if chunkCount > recordsPerFile {
fmt.Fprintln(wXml, endPubmedArticleSet)
wXml.Flush()
wXml.Close()
ww.Flush()
wXml, ww, xFile = makeXmlWriter(allCount, pmids[0])
chunkCount = 0
first = true
}
if allCount%500 == 0 {
log.Println(allCount)
}
}
if count != 0 {
getPubmedRecords(&urlFetcher, first, wMesh, wXml, tr, pmids)
}
fmt.Fprintln(wXml, endPubmedArticleSet)
//.........这里部分代码省略.........
示例4: sendContinuousChangesByWebSocket
func (h *handler) sendContinuousChangesByWebSocket(inChannels base.Set, options db.ChangesOptions) (error, bool) {
forceClose := false
handler := func(conn *websocket.Conn) {
h.logStatus(101, "Upgraded to WebSocket protocol")
defer func() {
conn.Close()
base.LogTo("HTTP+", "#%03d: --> WebSocket closed", h.serialNumber)
}()
// Read changes-feed options from an initial incoming WebSocket message in JSON format:
var compress bool
if msg, err := readWebSocketMessage(conn); err != nil {
return
} else {
var channelNames []string
var err error
if _, options, _, channelNames, _, compress, err = h.readChangesOptionsFromJSON(msg); err != nil {
return
}
if channelNames != nil {
inChannels, _ = channels.SetFromArray(channelNames, channels.ExpandStar)
}
}
// Set up GZip compression
var writer *bytes.Buffer
var zipWriter *gzip.Writer
if compress {
writer = bytes.NewBuffer(nil)
zipWriter = GetGZipWriter(writer)
}
caughtUp := false
_, forceClose = h.generateContinuousChanges(inChannels, options, func(changes []*db.ChangeEntry) error {
var data []byte
if changes != nil {
data, _ = json.Marshal(changes)
} else if !caughtUp {
caughtUp = true
data, _ = json.Marshal([]*db.ChangeEntry{})
} else {
data = []byte{}
}
if compress && len(data) > 8 {
// Compress JSON, using same GZip context, and send as binary msg:
zipWriter.Write(data)
zipWriter.Flush()
data = writer.Bytes()
writer.Reset()
conn.PayloadType = websocket.BinaryFrame
} else {
conn.PayloadType = websocket.TextFrame
}
_, err := conn.Write(data)
return err
})
if zipWriter != nil {
ReturnGZipWriter(zipWriter)
}
}
server := websocket.Server{
Handshake: func(*websocket.Config, *http.Request) error { return nil },
Handler: handler,
}
server.ServeHTTP(h.response, h.rq)
return nil, forceClose
}
示例5: processText
func (p Handler) processText(s *Session, w http.ResponseWriter, resp *http.Response) (err error) {
var (
zr *gzip.Reader
zw *gzip.Writer
body []byte
gzipped bool = resp.Header.Get("Content-Encoding") == "gzip"
reqHost string = resp.Request.URL.Host
reqPath string = resp.Request.URL.Path
)
if resp.ContentLength != 0 && resp.Request.Method != "HEAD" {
if gzipped {
zr, err = gzip.NewReader(resp.Body)
if err == nil {
body, err = ioutil.ReadAll(zr)
if !consumeError(&err) {
return dumpError(err)
}
}
} else {
body, err = ioutil.ReadAll(resp.Body)
if !consumeError(&err) {
return dumpError(err)
}
}
}
w.Header().Del("Content-Length")
w.Header().Set("Content-Encoding", "gzip")
w.WriteHeader(resp.StatusCode)
if len(body) <= 0 {
return
}
var (
rules []ReRule
bodyExtraHeader string
)
switch p {
case HD_html:
rules = reRules.Html
case HD_javascript:
rules = reRules.Js
case HD_json:
rules = reRules.Json
case HD_css:
rules = reRules.Css
}
if log.V(5) {
log.Infof("Original entity %s\n%s", reqPath, string(body))
}
if s.abusing {
imgSrc := fmt.Sprintf(`<img src="/!%s/sorry`, reqHost)
body = bytes.Replace(body, []byte(`<img src="/sorry`), []byte(imgSrc), 1)
rules = nil
}
for i, r := range rules {
if r.PathRe != nil && r.PathRe.FindString(reqPath) == NULL {
if log.V(4) {
log.Infof("re.%d=[%s] pathRe=deny", i, r.ContentPattern.Pattern)
}
continue
}
if log.V(4) {
log.Infof("re.%d=[%s] applied", i, r.ContentPattern.Pattern)
}
if r.Scheme&0xff > 0 {
body = r.ContentRe.Replace(body, r.Replacement)
}
if r.Scheme&0xff00 > 0 {
bodyExtraHeader += r.InsertHeader
}
}
zw = gzip.NewWriter(w)
if len(bodyExtraHeader) > 0 {
zw.Write([]byte(bodyExtraHeader))
}
zw.Write(body)
err = zw.Flush()
return
}
示例6: daemon
//.........这里部分代码省略.........
gzipwriter, _ = gzip.NewWriterLevel(csvfile, gzip.BestCompression)
defer gzipwriter.Close()
// wrap csv around gzipwriter
diskwriter = csv.NewWriter(gzipwriter)
} else {
// create a diskwriter (appends to csv on disk)
diskwriter = csv.NewWriter(csvfile)
}
// connect via telnet to the device and login
conn, err := p.DialTimeout("tcp", p.device, time.Duration(time.Second*30))
if err != nil {
log.Fatal("can't connect")
}
// create http handlers
http.HandleFunc("/quit", webQuitHandler(diskwriter, gzipwriter, csvfile))
http.HandleFunc("/history", webHistoryHandler)
http.HandleFunc("/stream", webStreamHandler)
http.HandleFunc("/read.csv", webReadCsvHandler(p))
http.HandleFunc("/read.json", webReadJsonHandler(p))
// needed for occasionally flushing on a newline
recordcount := 0
// start infinite polling loop
for {
// measure how long it takes
start := time.Now()
// specify correct format for dygraph
record := []string{start.Format("2006/01/02 15:04:05")}
// get statistics from device and cleanup
status := sendln(conn, plugGetInfoStats, '#')
status = strings.Replace(status, plugGetInfoStats+"\r\n", "", 1)
status = strings.Replace(status, "#", "", 1)
// split up the 4 results a newline
results := strings.SplitN(status, "\r\n", 4)
re := regexp.MustCompile("01(I|V|W|E)[0-9]+ 0*([0-9]+)")
// for each GetInfo result, do a regexp match, adjust value and create a CSV record
for i, result := range results {
match := re.FindStringSubmatch(result)
value := "0"
// check if we got the right size of slice
if len(match) == 3 {
value = match[2]
}
temp, _ := strconv.ParseFloat(value, 32)
switch i {
case 0:
// mAmp/10 -> Amp
value = strconv.FormatFloat(temp/10000, 'f', 2, 32)
// centiWatt -> Watt
case 1:
value = strconv.FormatFloat(temp/100, 'f', 2, 32)
// mWatt/h -> Watt/h | mVolt -> Volt
case 2, 3:
value = strconv.FormatFloat(temp/1000, 'f', 2, 32)
}
record = append(record, value)
recordcount += 1
}
// latestentry is needed in JSON for the realtime streaming
p.latestEntry, _ = json.Marshal(record)
// write the record to disk
err := diskwriter.Write(record)
if err != nil {
fmt.Println("Error:", err)
}
// write the record to buffer (in memory)
err = bufferwriter.Write(record)
if err != nil {
fmt.Println("Error:", err)
}
// flush disk every 25 records
if recordcount%100 == 0 {
diskwriter.Flush()
if strings.Contains(p.csvfile, ".gz") {
gzipwriter.Flush()
}
}
// flush memory immediately
bufferwriter.Flush()
if debug {
fmt.Print(record)
fmt.Println(" took", time.Since(start))
}
// sleep the right amount of time
time.Sleep(time.Second*time.Duration(p.delay) - time.Since(start))
}
}
示例7: NewEventHandler
func NewEventHandler(buildsDB BuildsDB, buildID int) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
flusher := w.(http.Flusher)
closed := w.(http.CloseNotifier).CloseNotify()
w.Header().Add("Content-Type", "text/event-stream; charset=utf-8")
w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Add("Connection", "keep-alive")
w.Header().Add(ProtocolVersionHeader, CurrentProtocolVersion)
var start uint = 0
if r.Header.Get("Last-Event-ID") != "" {
_, err := fmt.Sscanf(r.Header.Get("Last-Event-ID"), "%d", &start)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
start++
}
var responseWriter io.Writer = w
var responseFlusher *gzip.Writer
w.Header().Add("Vary", "Accept-Encoding")
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
w.Header().Set("Content-Encoding", "gzip")
gz := gzip.NewWriter(w)
defer gz.Close()
responseWriter = gz
responseFlusher = gz
}
events, err := buildsDB.GetBuildEvents(buildID, start)
if err != nil {
w.WriteHeader(http.StatusNotFound)
return
}
defer events.Close()
es := make(chan atc.Event)
errs := make(chan error, 1)
go func() {
for {
ev, err := events.Next()
if err != nil {
errs <- err
return
} else {
select {
case es <- ev:
case <-closed:
return
}
}
}
}()
for {
select {
case ev := <-es:
payload, err := json.Marshal(event.Message{ev})
if err != nil {
return
}
err = sse.Event{
ID: fmt.Sprintf("%d", start),
Name: "event",
Data: payload,
}.Write(responseWriter)
if err != nil {
return
}
start++
if responseFlusher != nil {
err = responseFlusher.Flush()
if err != nil {
return
}
}
flusher.Flush()
case err := <-errs:
if err == db.ErrEndOfBuildEventStream {
err = sse.Event{Name: "end"}.Write(responseWriter)
if err != nil {
return
}
}
return
case <-closed:
return
//.........这里部分代码省略.........