dl.google.com: Powered by Go
26 July 2013
Brad Fitzpatrick
Gopher, Google
Brad Fitzpatrick
Gopher, Google
$ apt-get update
each "payload" (~URL) described by a protobuf:
n, err := io.Copy(dst, src)
payload_server
, not the payload_fetcher
payload_fetcher
still runningpayload_fetcher
entirely; fast start-up time.package main import ( "fmt" "log" "net/http" "os" ) func handler(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(os.Stdout, "%s details: %+v\n", r.URL.Path, r) fmt.Fprintf(w, "Hello, world! at %s\n", r.URL.Path) } func main() { log.Printf("Running...") log.Fatal(http.ListenAndServe("127.0.0.1:8080", http.HandlerFunc(handler))) }
package main import ( "log" "net/http" "os" "path/filepath" ) func main() { log.Printf("Running...") log.Fatal(http.ListenAndServe( "127.0.0.1:8080", http.FileServer(http.Dir( filepath.Join(os.Getenv("HOME"), "go", "doc"))))) }
$ curl -H "Range: bytes=5-" http://localhost:8080
package main import ( "log" "net/http" "strings" "time" ) func main() { log.Printf("Running...") err := http.ListenAndServe("127.0.0.1:8080", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.ServeContent(w, r, "foo.txt", time.Now(), strings.NewReader("I am some content.\n")) })) log.Fatal(err) }
Declare who you are and who your peers are.
me := "http://10.0.0.1" peers := groupcache.NewHTTPPool(me) // Whenever peers change: peers.Set("http://10.0.0.1", "http://10.0.0.2", "http://10.0.0.3")
This peer interface is pluggable. (e.g. inside Google it's automatic.)
45Declare a group. (group of keys, shared between group of peers)
var thumbNails = groupcache.NewGroup("thumbnail", 64<<20, groupcache.GetterFunc( func(ctx groupcache.Context, key string, dest groupcache.Sink) error { fileName := key dest.SetBytes(generateThumbnail(fileName)) return nil }))
Request keys
var data []byte err := thumbNails.Get(ctx, "big-file.jpg", groupcache.AllocatingByteSliceSink(&data)) // ... http.ServeContent(w, r, "big-file-thumb.jpg", modTime, bytes.NewReader(data))
// A SizeReaderAt is a ReaderAt with a Size method. // // An io.SectionReader implements SizeReaderAt. type SizeReaderAt interface { Size() int64 io.ReaderAt } // NewMultiReaderAt is like io.MultiReader but produces a ReaderAt // (and Size), instead of just a reader. func NewMultiReaderAt(parts ...SizeReaderAt) SizeReaderAt { m := &multi{ parts: make([]offsetAndSource, 0, len(parts)), } var off int64 for _, p := range parts { m.parts = append(m.parts, offsetAndSource{off, p}) off += p.Size() } m.size = off return m }
// NewChunkAlignedReaderAt returns a ReaderAt wrapper that is backed // by a ReaderAt r of size totalSize where the wrapper guarantees that // all ReadAt calls are aligned to chunkSize boundaries and of size // chunkSize (except for the final chunk, which may be shorter). // // A chunk-aligned reader is good for caching, letting upper layers have // any access pattern, but guarantees that the wrapped ReaderAt sees // only nicely-cacheable access patterns & sizes. func NewChunkAlignedReaderAt(r SizeReaderAt, chunkSize int) SizeReaderAt { // ... }
r
only sees ReadAt calls on 2MB offset boundaries, of size 2MB (unless final chunk)// +build ignore,OMIT
package main
import (
"io"
"log"
"net/http"
"sort"
"strings"
"time"
)
var modTime = time.Unix(1374708739, 0)
func part(s string) SizeReaderAt { return io.NewSectionReader(strings.NewReader(s), 0, int64(len(s))) } func handler(w http.ResponseWriter, r *http.Request) { sra := NewMultiReaderAt( part("Hello, "), part(" world! "), part("You requested "+r.URL.Path+"\n"), ) rs := io.NewSectionReader(sra, 0, sra.Size()) http.ServeContent(w, r, "foo.txt", modTime, rs) }
func main() {
log.Printf("Running...")
http.HandleFunc("/", handler)
log.Fatal(http.ListenAndServe("127.0.0.1:8080", nil))
}
// START_1 OMIT
// A SizeReaderAt is a ReaderAt with a Size method.
//
// An io.SectionReader implements SizeReaderAt.
type SizeReaderAt interface {
Size() int64
io.ReaderAt
}
// NewMultiReaderAt is like io.MultiReader but produces a ReaderAt
// (and Size), instead of just a reader.
func NewMultiReaderAt(parts ...SizeReaderAt) SizeReaderAt {
m := &multi{
parts: make([]offsetAndSource, 0, len(parts)),
}
var off int64
for _, p := range parts {
m.parts = append(m.parts, offsetAndSource{off, p})
off += p.Size()
}
m.size = off
return m
}
// END_1 OMIT
type offsetAndSource struct {
off int64
SizeReaderAt
}
type multi struct {
parts []offsetAndSource
size int64
}
func (m *multi) Size() int64 { return m.size }
func (m *multi) ReadAt(p []byte, off int64) (n int, err error) {
wantN := len(p)
// Skip past the requested offset.
skipParts := sort.Search(len(m.parts), func(i int) bool {
// This function returns whether parts[i] will
// contribute any bytes to our output.
part := m.parts[i]
return part.off+part.Size() > off
})
parts := m.parts[skipParts:]
// How far to skip in the first part.
needSkip := off
if len(parts) > 0 {
needSkip -= parts[0].off
}
for len(parts) > 0 && len(p) > 0 {
readP := p
partSize := parts[0].Size()
if int64(len(readP)) > partSize-needSkip {
readP = readP[:partSize-needSkip]
}
pn, err0 := parts[0].ReadAt(readP, needSkip)
if err0 != nil {
return n, err0
}
n += pn
p = p[pn:]
if int64(pn)+needSkip == partSize {
parts = parts[1:]
}
needSkip = 0
}
if n != wantN {
err = io.ErrUnexpectedEOF
}
return
}
payload_server
, no payload_fetcher
)groupcache
, now open source (github.com/golang/groupcache)Brad Fitzpatrick
Gopher, Google