Initialize module and dependencies

This commit is contained in:
dwrz
2026-01-04 20:57:40 +00:00
commit a3b390c008
514 changed files with 310495 additions and 0 deletions

140
vendor/golang.org/x/telemetry/internal/config/config.go generated vendored Normal file
View File

@@ -0,0 +1,140 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// package config provides methods for loading and querying a
// telemetry upload config file.
package config
import (
"encoding/json"
"os"
"strings"
"golang.org/x/telemetry/internal/telemetry"
)
// Config is a wrapper around telemetry.UploadConfig that provides some
// convenience methods for checking the contents of a report.
type Config struct {
*telemetry.UploadConfig
program map[string]bool
goos map[string]bool
goarch map[string]bool
goversion map[string]bool
pgversion map[pgkey]bool
pgcounter map[pgkey]bool
pgcounterprefix map[pgkey]bool
pgstack map[pgkey]bool
rate map[pgkey]float64
}
type pgkey struct {
program, key string
}
func ReadConfig(file string) (*Config, error) {
data, err := os.ReadFile(file)
if err != nil {
return nil, err
}
var cfg telemetry.UploadConfig
if err := json.Unmarshal(data, &cfg); err != nil {
return nil, err
}
return NewConfig(&cfg), nil
}
func NewConfig(cfg *telemetry.UploadConfig) *Config {
ucfg := Config{UploadConfig: cfg}
ucfg.goos = set(ucfg.GOOS)
ucfg.goarch = set(ucfg.GOARCH)
ucfg.goversion = set(ucfg.GoVersion)
ucfg.program = make(map[string]bool, len(ucfg.Programs))
ucfg.pgversion = make(map[pgkey]bool, len(ucfg.Programs))
ucfg.pgcounter = make(map[pgkey]bool, len(ucfg.Programs))
ucfg.pgcounterprefix = make(map[pgkey]bool, len(ucfg.Programs))
ucfg.pgstack = make(map[pgkey]bool, len(ucfg.Programs))
ucfg.rate = make(map[pgkey]float64)
for _, p := range ucfg.Programs {
ucfg.program[p.Name] = true
for _, v := range p.Versions {
ucfg.pgversion[pgkey{p.Name, v}] = true
}
for _, c := range p.Counters {
for _, e := range Expand(c.Name) {
ucfg.pgcounter[pgkey{p.Name, e}] = true
ucfg.rate[pgkey{p.Name, e}] = c.Rate
}
prefix, _, found := strings.Cut(c.Name, ":")
if found {
ucfg.pgcounterprefix[pgkey{p.Name, prefix}] = true
}
}
for _, s := range p.Stacks {
ucfg.pgstack[pgkey{p.Name, s.Name}] = true
ucfg.rate[pgkey{p.Name, s.Name}] = s.Rate
}
}
return &ucfg
}
func (r *Config) HasProgram(s string) bool {
return r.program[s]
}
func (r *Config) HasGOOS(s string) bool {
return r.goos[s]
}
func (r *Config) HasGOARCH(s string) bool {
return r.goarch[s]
}
func (r *Config) HasGoVersion(s string) bool {
return r.goversion[s]
}
func (r *Config) HasVersion(program, version string) bool {
return r.pgversion[pgkey{program, version}]
}
func (r *Config) HasCounter(program, counter string) bool {
return r.pgcounter[pgkey{program, counter}]
}
func (r *Config) HasCounterPrefix(program, prefix string) bool {
return r.pgcounterprefix[pgkey{program, prefix}]
}
func (r *Config) HasStack(program, stack string) bool {
return r.pgstack[pgkey{program, stack}]
}
func (r *Config) Rate(program, name string) float64 {
return r.rate[pgkey{program, name}]
}
func set(slice []string) map[string]bool {
s := make(map[string]bool, len(slice))
for _, v := range slice {
s[v] = true
}
return s
}
// Expand takes a counter defined with buckets and expands it into distinct
// strings for each bucket.
func Expand(counter string) []string {
prefix, rest, hasBuckets := strings.Cut(counter, "{")
var counters []string
if hasBuckets {
buckets := strings.Split(strings.TrimSuffix(rest, "}"), ",")
for _, b := range buckets {
counters = append(counters, prefix+b)
}
} else {
counters = append(counters, prefix)
}
return counters
}

View File

@@ -0,0 +1,86 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package configstore abstracts interaction with the telemetry config server.
// Telemetry config (golang.org/x/telemetry/config) is distributed as a go
// module containing go.mod and config.json. Programs that upload collected
// counters download the latest config using `go mod download`. This provides
// verification of downloaded configuration and cacheability.
package configstore
import (
"bytes"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"sync/atomic"
"golang.org/x/telemetry/internal/telemetry"
)
const (
ModulePath = "golang.org/x/telemetry/config"
configFileName = "config.json"
)
// needNoConsole is used on windows to set the windows.CREATE_NO_WINDOW
// creation flag.
var needNoConsole = func(cmd *exec.Cmd) {}
var downloads int64
// Downloads reports, for testing purposes, the number of times [Download] has
// been called.
func Downloads() int64 {
return atomic.LoadInt64(&downloads)
}
// Download fetches the requested telemetry UploadConfig using "go mod
// download". If envOverlay is provided, it is appended to the environment used
// for invoking the go command.
//
// The second result is the canonical version of the requested configuration.
func Download(version string, envOverlay []string) (*telemetry.UploadConfig, string, error) {
atomic.AddInt64(&downloads, 1)
if version == "" {
version = "latest"
}
modVer := ModulePath + "@" + version
var stdout, stderr bytes.Buffer
cmd := exec.Command("go", "mod", "download", "-json", modVer)
needNoConsole(cmd)
cmd.Env = append(os.Environ(), envOverlay...)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
var info struct {
Error string
}
if err := json.Unmarshal(stdout.Bytes(), &info); err == nil && info.Error != "" {
return nil, "", fmt.Errorf("failed to download config module: %v", info.Error)
}
return nil, "", fmt.Errorf("failed to download config module: %w\n%s", err, &stderr)
}
var info struct {
Dir string
Version string
Error string
}
if err := json.Unmarshal(stdout.Bytes(), &info); err != nil || info.Dir == "" {
return nil, "", fmt.Errorf("failed to download config module (invalid JSON): %w", err)
}
data, err := os.ReadFile(filepath.Join(info.Dir, configFileName))
if err != nil {
return nil, "", fmt.Errorf("invalid config module: %w", err)
}
cfg := new(telemetry.UploadConfig)
if err := json.Unmarshal(data, cfg); err != nil {
return nil, "", fmt.Errorf("invalid config: %w", err)
}
return cfg, info.Version, nil
}

View File

@@ -0,0 +1,33 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build windows
package configstore
import (
"os/exec"
"syscall"
"golang.org/x/sys/windows"
)
func init() {
needNoConsole = needNoConsoleWindows
}
func needNoConsoleWindows(cmd *exec.Cmd) {
// The uploader main process is likely a daemonized process with no console.
// (see x/telemetry/start_windows.go) The console creation behavior when
// a parent is a console process without console is not clearly documented
// but empirically we observed the new console is created and attached to the
// subprocess in the default setup.
//
// Ensure no new console is attached to the subprocess by setting CREATE_NO_WINDOW.
// https://learn.microsoft.com/en-us/windows/console/creation-of-a-console
// https://learn.microsoft.com/en-us/windows/win32/procthread/process-creation-flags
cmd.SysProcAttr = &syscall.SysProcAttr{
CreationFlags: windows.CREATE_NO_WINDOW,
}
}

View File

@@ -0,0 +1,401 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package internal/counter implements the internals of the public counter package.
// In addition to the public API, this package also includes APIs to parse and
// manage the counter files, needed by the upload package.
package counter
import (
"fmt"
"os"
"runtime"
"strings"
"sync/atomic"
)
var (
// Note: not using internal/godebug, so that internal/godebug can use
// internal/counter.
debugCounter = strings.Contains(os.Getenv("GODEBUG"), "countertrace=1")
CrashOnBugs = false // for testing; if set, exit on fatal log messages
)
// debugPrintf formats a debug message if GODEBUG=countertrace=1.
func debugPrintf(format string, args ...any) {
if debugCounter {
if len(format) == 0 || format[len(format)-1] != '\n' {
format += "\n"
}
fmt.Fprintf(os.Stderr, "counter: "+format, args...)
}
}
// debugFatalf logs a fatal error if GODEBUG=countertrace=1.
func debugFatalf(format string, args ...any) {
if debugCounter || CrashOnBugs {
if len(format) == 0 || format[len(format)-1] != '\n' {
format += "\n"
}
fmt.Fprintf(os.Stderr, "counter bug: "+format, args...)
os.Exit(1)
}
}
// A Counter is a single named event counter.
// A Counter is safe for use by multiple goroutines simultaneously.
//
// Counters should typically be created using New
// and stored as global variables, like:
//
// package mypackage
// var errorCount = counter.New("mypackage/errors")
//
// (The initialization of errorCount in this example is handled
// entirely by the compiler and linker; this line executes no code
// at program startup.)
//
// Then code can call Add to increment the counter
// each time the corresponding event is observed.
//
// Although it is possible to use New to create
// a Counter each time a particular event needs to be recorded,
// that usage fails to amortize the construction cost over
// multiple calls to Add, so it is more expensive and not recommended.
type Counter struct {
name string
file *file
next atomic.Pointer[Counter]
state counterState
ptr counterPtr
}
func (c *Counter) Name() string {
return c.name
}
type counterPtr struct {
m *mappedFile
count *atomic.Uint64
}
type counterState struct {
bits atomic.Uint64
}
func (s *counterState) load() counterStateBits {
return counterStateBits(s.bits.Load())
}
func (s *counterState) update(old *counterStateBits, new counterStateBits) bool {
if s.bits.CompareAndSwap(uint64(*old), uint64(new)) {
*old = new
return true
}
return false
}
type counterStateBits uint64
const (
stateReaders counterStateBits = 1<<30 - 1
stateLocked counterStateBits = stateReaders
stateHavePtr counterStateBits = 1 << 30
stateExtraShift = 31
stateExtra counterStateBits = 1<<64 - 1<<stateExtraShift
)
func (b counterStateBits) readers() int { return int(b & stateReaders) }
func (b counterStateBits) locked() bool { return b&stateReaders == stateLocked }
func (b counterStateBits) havePtr() bool { return b&stateHavePtr != 0 }
func (b counterStateBits) extra() uint64 { return uint64(b&stateExtra) >> stateExtraShift }
func (b counterStateBits) incReader() counterStateBits { return b + 1 }
func (b counterStateBits) decReader() counterStateBits { return b - 1 }
func (b counterStateBits) setLocked() counterStateBits { return b | stateLocked }
func (b counterStateBits) clearLocked() counterStateBits { return b &^ stateLocked }
func (b counterStateBits) setHavePtr() counterStateBits { return b | stateHavePtr }
func (b counterStateBits) clearHavePtr() counterStateBits { return b &^ stateHavePtr }
func (b counterStateBits) clearExtra() counterStateBits { return b &^ stateExtra }
func (b counterStateBits) addExtra(n uint64) counterStateBits {
const maxExtra = uint64(stateExtra) >> stateExtraShift // 0x1ffffffff
x := b.extra()
if x+n < x || x+n > maxExtra {
x = maxExtra
} else {
x += n
}
return b.clearExtra() | counterStateBits(x)<<stateExtraShift
}
// New returns a counter with the given name.
// New can be called in global initializers and will be compiled down to
// linker-initialized data. That is, calling New to initialize a global
// has no cost at program startup.
func New(name string) *Counter {
// Note: not calling defaultFile.New in order to keep this
// function something the compiler can inline and convert
// into static data initializations, with no init-time footprint.
return &Counter{name: name, file: &defaultFile}
}
// Inc adds 1 to the counter.
func (c *Counter) Inc() {
c.Add(1)
}
// Add adds n to the counter. n cannot be negative, as counts cannot decrease.
func (c *Counter) Add(n int64) {
debugPrintf("Add %q += %d", c.name, n)
if n < 0 {
panic("Counter.Add negative")
}
if n == 0 {
return
}
c.file.register(c)
state := c.state.load()
for ; ; state = c.state.load() {
switch {
case !state.locked() && state.havePtr():
if !c.state.update(&state, state.incReader()) {
continue
}
// Counter unlocked or counter shared; has an initialized count pointer; acquired shared lock.
if c.ptr.count == nil {
for !c.state.update(&state, state.addExtra(uint64(n))) {
// keep trying - we already took the reader lock
state = c.state.load()
}
debugPrintf("Add %q += %d: nil extra=%d\n", c.name, n, state.extra())
} else {
sum := c.add(uint64(n))
debugPrintf("Add %q += %d: count=%d\n", c.name, n, sum)
}
c.releaseReader(state)
return
case state.locked():
if !c.state.update(&state, state.addExtra(uint64(n))) {
continue
}
debugPrintf("Add %q += %d: locked extra=%d\n", c.name, n, state.extra())
return
case !state.havePtr():
if !c.state.update(&state, state.addExtra(uint64(n)).setLocked()) {
continue
}
debugPrintf("Add %q += %d: noptr extra=%d\n", c.name, n, state.extra())
c.releaseLock(state)
return
}
}
}
func (c *Counter) releaseReader(state counterStateBits) {
for ; ; state = c.state.load() {
// If we are the last reader and havePtr was cleared
// while this batch of readers was using c.ptr,
// it's our job to update c.ptr by upgrading to a full lock
// and letting releaseLock do the work.
// Note: no new reader will attempt to add itself now that havePtr is clear,
// so we are only racing against possible additions to extra.
if state.readers() == 1 && !state.havePtr() {
if !c.state.update(&state, state.setLocked()) {
continue
}
debugPrintf("releaseReader %s: last reader, need ptr\n", c.name)
c.releaseLock(state)
return
}
// Release reader.
if !c.state.update(&state, state.decReader()) {
continue
}
debugPrintf("releaseReader %s: released (%d readers now)\n", c.name, state.readers())
return
}
}
func (c *Counter) releaseLock(state counterStateBits) {
for ; ; state = c.state.load() {
if !state.havePtr() {
// Set havePtr before updating ptr,
// to avoid race with the next clear of havePtr.
if !c.state.update(&state, state.setHavePtr()) {
continue
}
debugPrintf("releaseLock %s: reset havePtr (extra=%d)\n", c.name, state.extra())
// Optimization: only bother loading a new pointer
// if we have a value to add to it.
c.ptr = counterPtr{nil, nil}
if state.extra() != 0 {
c.ptr = c.file.lookup(c.name)
debugPrintf("releaseLock %s: ptr=%v\n", c.name, c.ptr)
}
}
if extra := state.extra(); extra != 0 && c.ptr.count != nil {
if !c.state.update(&state, state.clearExtra()) {
continue
}
sum := c.add(extra)
debugPrintf("releaseLock %s: flush extra=%d -> count=%d\n", c.name, extra, sum)
}
// Took care of refreshing ptr and flushing extra.
// Now we can release the lock, unless of course
// another goroutine cleared havePtr or added to extra,
// in which case we go around again.
if !c.state.update(&state, state.clearLocked()) {
continue
}
debugPrintf("releaseLock %s: unlocked\n", c.name)
return
}
}
// add wraps the atomic.Uint64.Add operation to handle integer overflow.
func (c *Counter) add(n uint64) uint64 {
count := c.ptr.count
for {
old := count.Load()
sum := old + n
if sum < old {
sum = ^uint64(0)
}
if count.CompareAndSwap(old, sum) {
runtime.KeepAlive(c.ptr.m)
return sum
}
}
}
func (c *Counter) invalidate() {
for {
state := c.state.load()
if !state.havePtr() {
debugPrintf("invalidate %s: no ptr\n", c.name)
return
}
if c.state.update(&state, state.clearHavePtr()) {
debugPrintf("invalidate %s: cleared havePtr\n", c.name)
return
}
}
}
func (c *Counter) refresh() {
for {
state := c.state.load()
if state.havePtr() || state.readers() > 0 || state.extra() == 0 {
debugPrintf("refresh %s: havePtr=%v readers=%d extra=%d\n", c.name, state.havePtr(), state.readers(), state.extra())
return
}
if c.state.update(&state, state.setLocked()) {
debugPrintf("refresh %s: locked havePtr=%v readers=%d extra=%d\n", c.name, state.havePtr(), state.readers(), state.extra())
c.releaseLock(state)
return
}
}
}
// Read reads the given counter.
// This is the implementation of x/telemetry/counter/countertest.ReadCounter.
func Read(c *Counter) (uint64, error) {
if c.file.current.Load() == nil {
return c.state.load().extra(), nil
}
pf, err := readFile(c.file)
if err != nil {
return 0, err
}
v, ok := pf.Count[DecodeStack(c.Name())]
if !ok {
return v, fmt.Errorf("not found:%q", DecodeStack(c.Name()))
}
return v, nil
}
func readFile(f *file) (*File, error) {
if f == nil {
debugPrintf("No file")
return nil, fmt.Errorf("counter is not initialized - was Open called?")
}
// Note: don't call f.rotate here as this will enqueue a follow-up rotation.
f.rotate1()
if f.err != nil {
return nil, fmt.Errorf("failed to rotate mapped file - %v", f.err)
}
current := f.current.Load()
if current == nil {
return nil, fmt.Errorf("counter has no mapped file")
}
name := current.f.Name()
data, err := ReadMapped(name)
if err != nil {
return nil, fmt.Errorf("failed to read from file: %v", err)
}
pf, err := Parse(name, data)
if err != nil {
return nil, fmt.Errorf("failed to parse: %v", err)
}
return pf, nil
}
// ReadFile reads the counters and stack counters from the given file.
// This is the implementation of x/telemetry/counter/countertest.ReadFile.
func ReadFile(name string) (counters, stackCounters map[string]uint64, _ error) {
// TODO: Document the format of the stackCounters names.
data, err := ReadMapped(name)
if err != nil {
return nil, nil, fmt.Errorf("failed to read from file: %v", err)
}
pf, err := Parse(name, data)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse: %v", err)
}
counters = make(map[string]uint64)
stackCounters = make(map[string]uint64)
for k, v := range pf.Count {
if IsStackCounter(k) {
stackCounters[DecodeStack(k)] = v
} else {
counters[k] = v
}
}
return counters, stackCounters, nil
}
// ReadMapped reads the contents of the given file by memory mapping.
//
// This avoids file synchronization issues.
func ReadMapped(name string) ([]byte, error) {
f, err := os.OpenFile(name, os.O_RDWR, 0666)
if err != nil {
return nil, err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return nil, err
}
mapping, err := memmap(f)
if err != nil {
return nil, err
}
data := make([]byte, fi.Size())
copy(data, mapping.Data)
munmap(mapping)
return data, nil
}

814
vendor/golang.org/x/telemetry/internal/counter/file.go generated vendored Normal file
View File

@@ -0,0 +1,814 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package counter
import (
"bytes"
"errors"
"fmt"
"math/rand"
"os"
"path"
"path/filepath"
"runtime"
"runtime/debug"
"sync"
"sync/atomic"
"time"
"unsafe"
"golang.org/x/telemetry/internal/mmap"
"golang.org/x/telemetry/internal/telemetry"
)
// A file is a counter file.
type file struct {
// Linked list of all known counters.
// (Linked list insertion is easy to make lock-free,
// and we don't want the initial counters incremented
// by a program to cause significant contention.)
counters atomic.Pointer[Counter] // head of list
end Counter // list ends at &end instead of nil
mu sync.Mutex
buildInfo *debug.BuildInfo
timeBegin, timeEnd time.Time
err error
// current holds the current file mapping, which may change when the file is
// rotated or extended.
//
// current may be read without holding mu, but may be nil.
//
// The cleanup logic for file mappings is complicated, because invalidating
// counter pointers is reentrant: [file.invalidateCounters] may call
// [file.lookup], which acquires mu. Therefore, writing current must be done
// as follows:
// 1. record the previous value of current
// 2. Store a new value in current
// 3. unlock mu
// 4. call invalidateCounters
// 5. close the previous mapped value from (1)
// TODO(rfindley): simplify
current atomic.Pointer[mappedFile]
}
var defaultFile file
// register ensures that the counter c is registered with the file.
func (f *file) register(c *Counter) {
debugPrintf("register %s %p\n", c.Name(), c)
// If counter is not registered with file, register it.
// Doing this lazily avoids init-time work
// as well as any execution cost at all for counters
// that are not used in a given program.
wroteNext := false
for wroteNext || c.next.Load() == nil {
head := f.counters.Load()
next := head
if next == nil {
next = &f.end
}
debugPrintf("register %s next %p\n", c.Name(), next)
if !wroteNext {
if !c.next.CompareAndSwap(nil, next) {
debugPrintf("register %s cas failed %p\n", c.Name(), c.next.Load())
continue
}
wroteNext = true
} else {
c.next.Store(next)
}
if f.counters.CompareAndSwap(head, c) {
debugPrintf("registered %s %p\n", c.Name(), f.counters.Load())
return
}
debugPrintf("register %s cas2 failed %p %p\n", c.Name(), f.counters.Load(), head)
}
}
// invalidateCounters marks as invalid all the pointers
// held by f's counters and then refreshes them.
//
// invalidateCounters cannot be called while holding f.mu,
// because a counter refresh may call f.lookup.
func (f *file) invalidateCounters() {
// Mark every counter as needing to refresh its count pointer.
if head := f.counters.Load(); head != nil {
for c := head; c != &f.end; c = c.next.Load() {
c.invalidate()
}
for c := head; c != &f.end; c = c.next.Load() {
c.refresh()
}
}
}
// lookup looks up the counter with the given name in the file,
// allocating it if needed, and returns a pointer to the atomic.Uint64
// containing the counter data.
// If the file has not been opened yet, lookup returns nil.
func (f *file) lookup(name string) counterPtr {
current := f.current.Load()
if current == nil {
debugPrintf("lookup %s - no mapped file\n", name)
return counterPtr{}
}
ptr := f.newCounter(name)
if ptr == nil {
return counterPtr{}
}
return counterPtr{current, ptr}
}
// ErrDisabled is the error returned when telemetry is disabled.
var ErrDisabled = errors.New("counter: disabled as Go telemetry is off")
var (
errNoBuildInfo = errors.New("counter: missing build info")
errCorrupt = errors.New("counter: corrupt counter file")
)
// weekEnd returns the day of the week on which uploads occur (and therefore
// counters expire).
//
// Reads the weekends file, creating one if none exists.
func weekEnd() (time.Weekday, error) {
// If there is no 'weekends' file create it and initialize it
// to a random day of the week. There is a short interval for
// a race.
weekends := filepath.Join(telemetry.Default.LocalDir(), "weekends")
day := fmt.Sprintf("%d\n", rand.Intn(7))
if _, err := os.ReadFile(weekends); err != nil {
if err := os.MkdirAll(telemetry.Default.LocalDir(), 0777); err != nil {
debugPrintf("%v: could not create telemetry.LocalDir %s", err, telemetry.Default.LocalDir())
return 0, err
}
if err = os.WriteFile(weekends, []byte(day), 0666); err != nil {
return 0, err
}
}
// race is over, read the file
buf, err := os.ReadFile(weekends)
// There is no reasonable way of recovering from errors
// so we just fail
if err != nil {
return 0, err
}
buf = bytes.TrimSpace(buf)
if len(buf) == 0 {
return 0, fmt.Errorf("empty weekends file")
}
weekend := time.Weekday(buf[0] - '0') // 0 is Sunday
// paranoia to make sure the value is legal
weekend %= 7
if weekend < 0 {
weekend += 7
}
return weekend, nil
}
// rotate checks to see whether the file f needs to be rotated,
// meaning to start a new counter file with a different date in the name.
// rotate is also used to open the file initially, meaning f.current can be nil.
// In general rotate should be called just once for each file.
// rotate will arrange a timer to call itself again when necessary.
func (f *file) rotate() {
expiry := f.rotate1()
if !expiry.IsZero() {
delay := time.Until(expiry)
// Some tests set CounterTime to a time in the past, causing delay to be
// negative. Avoid infinite loops by delaying at least a short interval.
//
// TODO(rfindley): instead, just also mock AfterFunc.
const minDelay = 1 * time.Minute
if delay < minDelay {
delay = minDelay
}
// TODO(rsc): Does this do the right thing for laptops closing?
time.AfterFunc(delay, f.rotate)
}
}
func nop() {}
// CounterTime returns the current UTC time.
// Mutable for testing.
var CounterTime = func() time.Time {
return time.Now().UTC()
}
// counterSpan returns the current time span for a counter file, as determined
// by [CounterTime] and the [weekEnd].
func counterSpan() (begin, end time.Time, _ error) {
year, month, day := CounterTime().Date()
begin = time.Date(year, month, day, 0, 0, 0, 0, time.UTC)
// files always begin today, but expire on the next day of the week
// from the 'weekends' file.
weekend, err := weekEnd()
if err != nil {
return time.Time{}, time.Time{}, err
}
incr := int(weekend - begin.Weekday())
if incr <= 0 {
incr += 7 // ensure that end is later than begin
}
end = time.Date(year, month, day+incr, 0, 0, 0, 0, time.UTC)
return begin, end, nil
}
// rotate1 rotates the current counter file, returning its expiry, or the zero
// time if rotation failed.
func (f *file) rotate1() time.Time {
// Cleanup must be performed while unlocked, since invalidateCounters may
// involve calls to f.lookup.
var previous *mappedFile // read below while holding the f.mu.
defer func() {
// Counters must be invalidated whenever the mapped file changes.
if next := f.current.Load(); next != previous {
f.invalidateCounters()
// Ensure that the previous counter mapped file is closed.
if previous != nil {
previous.close() // safe to call multiple times
}
}
}()
f.mu.Lock()
defer f.mu.Unlock()
previous = f.current.Load()
if f.err != nil {
return time.Time{} // already in failed state; nothing to do
}
fail := func(err error) {
debugPrintf("rotate: %v", err)
f.err = err
f.current.Store(nil)
}
if mode, _ := telemetry.Default.Mode(); mode == "off" {
// TODO(rfindley): do we ever want to make ErrDisabled recoverable?
// Specifically, if f.err is ErrDisabled, should we check again during when
// rotating?
fail(ErrDisabled)
return time.Time{}
}
if f.buildInfo == nil {
bi, ok := debug.ReadBuildInfo()
if !ok {
fail(errNoBuildInfo)
return time.Time{}
}
f.buildInfo = bi
}
begin, end, err := counterSpan()
if err != nil {
fail(err)
return time.Time{}
}
if f.timeBegin.Equal(begin) && f.timeEnd.Equal(end) {
return f.timeEnd // nothing to do
}
f.timeBegin, f.timeEnd = begin, end
goVers, progPath, progVers := telemetry.ProgramInfo(f.buildInfo)
meta := fmt.Sprintf("TimeBegin: %s\nTimeEnd: %s\nProgram: %s\nVersion: %s\nGoVersion: %s\nGOOS: %s\nGOARCH: %s\n\n",
f.timeBegin.Format(time.RFC3339), f.timeEnd.Format(time.RFC3339),
progPath, progVers, goVers, runtime.GOOS, runtime.GOARCH)
if len(meta) > maxMetaLen { // should be impossible for our use
fail(fmt.Errorf("metadata too long"))
return time.Time{}
}
if progVers != "" {
progVers = "@" + progVers
}
baseName := fmt.Sprintf("%s%s-%s-%s-%s-%s.%s.count",
path.Base(progPath),
progVers,
goVers,
runtime.GOOS,
runtime.GOARCH,
f.timeBegin.Format(telemetry.DateOnly),
FileVersion,
)
dir := telemetry.Default.LocalDir()
if err := os.MkdirAll(dir, 0777); err != nil {
fail(fmt.Errorf("making local dir: %v", err))
return time.Time{}
}
name := filepath.Join(dir, baseName)
m, err := openMapped(name, meta)
if err != nil {
// Mapping failed:
// If there used to be a mapped file, after cleanup
// incrementing counters will only change their internal state.
// (before cleanup the existing mapped file would be updated)
fail(fmt.Errorf("openMapped: %v", err))
return time.Time{}
}
debugPrintf("using %v", m.f.Name())
f.current.Store(m)
return f.timeEnd
}
func (f *file) newCounter(name string) *atomic.Uint64 {
v, cleanup := f.newCounter1(name)
cleanup()
return v
}
func (f *file) newCounter1(name string) (v *atomic.Uint64, cleanup func()) {
f.mu.Lock()
defer f.mu.Unlock()
current := f.current.Load()
if current == nil {
return nil, nop
}
debugPrintf("newCounter %s in %s\n", name, current.f.Name())
if v, _, _, _ := current.lookup(name); v != nil {
return v, nop
}
v, newM, err := current.newCounter(name)
if err != nil {
debugPrintf("newCounter %s: %v\n", name, err)
return nil, nop
}
cleanup = nop
if newM != nil {
f.current.Store(newM)
cleanup = func() {
f.invalidateCounters()
current.close()
}
}
return v, cleanup
}
var (
openOnce sync.Once
// rotating reports whether the call to Open had rotate = true.
//
// In golang/go#68497, we observed that file rotation can break runtime
// deadlock detection. To minimize the fix for 1.23, we are splitting the
// Open API into one version that rotates the counter file, and another that
// does not. The rotating variable guards against use of both APIs from the
// same process.
rotating bool
)
// Open associates counting with the defaultFile.
// The returned function is for testing only, and should
// be called after all Inc()s are finished, but before
// any reports are generated.
// (Otherwise expired count files will not be deleted on Windows.)
func Open(rotate bool) func() {
if telemetry.DisabledOnPlatform {
return func() {}
}
close := func() {}
openOnce.Do(func() {
rotating = rotate
if mode, _ := telemetry.Default.Mode(); mode == "off" {
// Don't open the file when telemetry is off.
defaultFile.err = ErrDisabled
// No need to clean up.
return
}
debugPrintf("Open(%v)", rotate)
if rotate {
defaultFile.rotate() // calls rotate1 and schedules a rotation
} else {
defaultFile.rotate1()
}
close = func() {
// Once this has been called, the defaultFile is no longer usable.
mf := defaultFile.current.Load()
if mf == nil {
// telemetry might have been off
return
}
mf.close()
}
})
if rotating != rotate {
panic("BUG: Open called with inconsistent values for 'rotate'")
}
return close
}
const (
FileVersion = "v1"
hdrPrefix = "# telemetry/counter file " + FileVersion + "\n"
recordUnit = 32
maxMetaLen = 512
numHash = 512 // 2kB for hash table
maxNameLen = 4 * 1024
limitOff = 0
hashOff = 4
pageSize = 16 * 1024
minFileLen = 16 * 1024
)
// A mappedFile is a counter file mmapped into memory.
//
// The file layout for a mappedFile m is as follows:
//
// offset, byte size: description
// ------------------ -----------
// 0, hdrLen: header, containing metadata; see [mappedHeader]
// hdrLen+limitOff, 4: uint32 allocation limit (byte offset of the end of counter records)
// hdrLen+hashOff, 4*numHash: hash table, stores uint32 heads of a linked list of records, keyed by name hash
// hdrLen+hashOff+4*numHash to limit: counter records: see record syntax below
//
// The record layout is as follows:
//
// offset, byte size: description
// ------------------ -----------
// 0, 8: uint64 counter value
// 8, 12: uint32 name length
// 12, 16: uint32 offset of next record in linked list
// 16, name length: counter name
type mappedFile struct {
meta string
hdrLen uint32
zero [4]byte
closeOnce sync.Once
f *os.File
mapping *mmap.Data
}
// openMapped opens and memory maps a file.
//
// name is the path to the file.
//
// meta is the file metadata, which must match the metadata of the file on disk
// exactly.
//
// existing should be nil the first time this is called for a file,
// and when remapping, should be the previous mappedFile.
func openMapped(name, meta string) (_ *mappedFile, err error) {
hdr, err := mappedHeader(meta)
if err != nil {
return nil, err
}
f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, 0666)
if err != nil {
return nil, err
}
// Note: using local variable m here, not return value,
// so that return nil, err does not set m = nil and break the code in the defer.
m := &mappedFile{
f: f,
meta: meta,
}
defer func() {
if err != nil {
m.close()
}
}()
info, err := f.Stat()
if err != nil {
return nil, err
}
// Establish file header and initial data area if not already present.
if info.Size() < minFileLen {
if _, err := f.WriteAt(hdr, 0); err != nil {
return nil, err
}
// Write zeros at the end of the file to extend it to minFileLen.
if _, err := f.WriteAt(m.zero[:], int64(minFileLen-len(m.zero))); err != nil {
return nil, err
}
info, err = f.Stat()
if err != nil {
return nil, err
}
if info.Size() < minFileLen {
return nil, fmt.Errorf("counter: writing file did not extend it")
}
}
// Map into memory.
mapping, err := memmap(f)
if err != nil {
return nil, err
}
m.mapping = mapping
if !bytes.HasPrefix(m.mapping.Data, hdr) {
// TODO(rfindley): we can and should do better here, reading the mapped
// header length and comparing headers exactly.
return nil, fmt.Errorf("counter: header mismatch")
}
m.hdrLen = uint32(len(hdr))
return m, nil
}
func mappedHeader(meta string) ([]byte, error) {
if len(meta) > maxMetaLen {
return nil, fmt.Errorf("counter: metadata too large")
}
np := round(len(hdrPrefix), 4)
n := round(np+4+len(meta), 32)
hdr := make([]byte, n)
copy(hdr, hdrPrefix)
*(*uint32)(unsafe.Pointer(&hdr[np])) = uint32(n)
copy(hdr[np+4:], meta)
return hdr, nil
}
func (m *mappedFile) place(limit uint32, name string) (start, end uint32) {
if limit == 0 {
// first record in file
limit = m.hdrLen + hashOff + 4*numHash
}
n := round(uint32(16+len(name)), recordUnit)
start = round(limit, recordUnit) // should already be rounded but just in case
// Note: Checking for crossing a page boundary would be
// start/pageSize != (start+n-1)/pageSize,
// but we are checking for reaching the page end, so no -1.
// The page end is reserved for use by extend.
// See the comment in m.extend.
if start/pageSize != (start+n)/pageSize {
// bump start to next page
start = round(limit, pageSize)
}
return start, start + n
}
var memmap = mmap.Mmap
var munmap = mmap.Munmap
func (m *mappedFile) close() {
m.closeOnce.Do(func() {
if m.mapping != nil {
munmap(m.mapping)
m.mapping = nil
}
if m.f != nil {
m.f.Close() // best effort
m.f = nil
}
})
}
// hash returns the hash code for name.
// The implementation is FNV-1a.
// This hash function is a fixed detail of the file format.
// It cannot be changed without also changing the file format version.
func hash(name string) uint32 {
const (
offset32 = 2166136261
prime32 = 16777619
)
h := uint32(offset32)
for i := 0; i < len(name); i++ {
c := name[i]
h = (h ^ uint32(c)) * prime32
}
return (h ^ (h >> 16)) % numHash
}
func (m *mappedFile) load32(off uint32) uint32 {
if int64(off) >= int64(len(m.mapping.Data)) {
return 0
}
return (*atomic.Uint32)(unsafe.Pointer(&m.mapping.Data[off])).Load()
}
func (m *mappedFile) cas32(off, old, new uint32) bool {
if int64(off) >= int64(len(m.mapping.Data)) {
panic("bad cas32") // return false would probably loop
}
return (*atomic.Uint32)(unsafe.Pointer(&m.mapping.Data[off])).CompareAndSwap(old, new)
}
// entryAt reads a counter record at the given byte offset.
//
// See the documentation for [mappedFile] for a description of the counter record layout.
func (m *mappedFile) entryAt(off uint32) (name []byte, next uint32, v *atomic.Uint64, ok bool) {
if off < m.hdrLen+hashOff || int64(off)+16 > int64(len(m.mapping.Data)) {
return nil, 0, nil, false
}
nameLen := m.load32(off+8) & 0x00ffffff
if nameLen == 0 || int64(off)+16+int64(nameLen) > int64(len(m.mapping.Data)) {
return nil, 0, nil, false
}
name = m.mapping.Data[off+16 : off+16+nameLen]
next = m.load32(off + 12)
v = (*atomic.Uint64)(unsafe.Pointer(&m.mapping.Data[off]))
return name, next, v, true
}
// writeEntryAt writes a new counter record at the given offset.
//
// See the documentation for [mappedFile] for a description of the counter record layout.
//
// writeEntryAt only returns false in the presence of some form of corruption:
// an offset outside the bounds of the record region in the mapped file.
func (m *mappedFile) writeEntryAt(off uint32, name string) (next *atomic.Uint32, v *atomic.Uint64, ok bool) {
// TODO(rfindley): shouldn't this first condition be off < m.hdrLen+hashOff+4*numHash?
if off < m.hdrLen+hashOff || int64(off)+16+int64(len(name)) > int64(len(m.mapping.Data)) {
return nil, nil, false
}
copy(m.mapping.Data[off+16:], name)
atomic.StoreUint32((*uint32)(unsafe.Pointer(&m.mapping.Data[off+8])), uint32(len(name))|0xff000000)
next = (*atomic.Uint32)(unsafe.Pointer(&m.mapping.Data[off+12]))
v = (*atomic.Uint64)(unsafe.Pointer(&m.mapping.Data[off]))
return next, v, true
}
// lookup searches the mapped file for a counter record with the given name, returning:
// - v: the mapped counter value
// - headOff: the offset of the head pointer (see [mappedFile])
// - head: the value of the head pointer
// - ok: whether lookup succeeded
func (m *mappedFile) lookup(name string) (v *atomic.Uint64, headOff, head uint32, ok bool) {
h := hash(name)
headOff = m.hdrLen + hashOff + h*4
head = m.load32(headOff)
off := head
for off != 0 {
ename, next, v, ok := m.entryAt(off)
if !ok {
return nil, 0, 0, false
}
if string(ename) == name {
return v, headOff, head, true
}
off = next
}
return nil, headOff, head, true
}
// newCounter allocates and writes a new counter record with the given name.
//
// If name is already recorded in the file, newCounter returns the existing counter.
func (m *mappedFile) newCounter(name string) (v *atomic.Uint64, m1 *mappedFile, err error) {
if len(name) > maxNameLen {
return nil, nil, fmt.Errorf("counter name too long")
}
orig := m
defer func() {
if m != orig {
if err != nil {
m.close()
} else {
m1 = m
}
}
}()
v, headOff, head, ok := m.lookup(name)
for tries := 0; !ok; tries++ {
if tries >= 10 {
debugFatalf("corrupt: failed to remap after 10 tries")
return nil, nil, errCorrupt
}
// Lookup found an invalid pointer,
// perhaps because the file has grown larger than the mapping.
limit := m.load32(m.hdrLen + limitOff)
if limit, datalen := int64(limit), int64(len(m.mapping.Data)); limit <= datalen {
// Mapping doesn't need to grow, so lookup found actual corruption,
// in the form of an entry pointer that exceeds the recorded allocation
// limit. This should never happen, unless the actual file contents are
// corrupt.
debugFatalf("corrupt: limit %d is within mapping length %d", limit, datalen)
return nil, nil, errCorrupt
}
// That the recorded limit is greater than the mapped data indicates that
// an external process has extended the file. Re-map to pick up this extension.
newM, err := openMapped(m.f.Name(), m.meta)
if err != nil {
return nil, nil, err
}
if limit, datalen := int64(limit), int64(len(newM.mapping.Data)); limit > datalen {
// We've re-mapped, yet limit still exceeds the data length. This
// indicates that the underlying file was somehow truncated, or the
// recorded limit is corrupt.
debugFatalf("corrupt: limit %d exceeds file size %d", limit, datalen)
return nil, nil, errCorrupt
}
// If m != orig, this is at least the second time around the loop
// trying to open the mapping. Close the previous attempt.
if m != orig {
m.close()
}
m = newM
v, headOff, head, ok = m.lookup(name)
}
if v != nil {
return v, nil, nil
}
// Reserve space for new record.
// We are competing against other programs using the same file,
// so we use a compare-and-swap on the allocation limit in the header.
var start, end uint32
for {
// Determine where record should end, and grow file if needed.
limit := m.load32(m.hdrLen + limitOff)
start, end = m.place(limit, name)
debugPrintf("place %s at %#x-%#x\n", name, start, end)
if int64(end) > int64(len(m.mapping.Data)) {
newM, err := m.extend(end)
if err != nil {
return nil, nil, err
}
if m != orig {
m.close()
}
m = newM
continue
}
// Attempt to reserve that space for our record.
if m.cas32(m.hdrLen+limitOff, limit, end) {
break
}
}
// Write record.
next, v, ok := m.writeEntryAt(start, name)
if !ok {
debugFatalf("corrupt: failed to write entry: %#x+%d vs %#x\n", start, len(name), len(m.mapping.Data))
return nil, nil, errCorrupt // more likely our math is wrong
}
// Link record into hash chain, making sure not to introduce a duplicate.
// We know name does not appear in the chain starting at head.
for {
next.Store(head)
if m.cas32(headOff, head, start) {
return v, nil, nil
}
// Check new elements in chain for duplicates.
old := head
head = m.load32(headOff)
for off := head; off != old; {
ename, enext, v, ok := m.entryAt(off)
if !ok {
return nil, nil, errCorrupt
}
if string(ename) == name {
next.Store(^uint32(0)) // mark ours as dead
return v, nil, nil
}
off = enext
}
}
}
func (m *mappedFile) extend(end uint32) (*mappedFile, error) {
end = round(end, pageSize)
info, err := m.f.Stat()
if err != nil {
return nil, err
}
if info.Size() < int64(end) {
// Note: multiple processes could be calling extend at the same time,
// but this write only writes the last 4 bytes of the page.
// The last 4 bytes of the page are reserved for this purpose and hold no data.
// (In m.place, if a new record would extend to the very end of the page,
// it is placed in the next page instead.)
// So it is fine if multiple processes extend at the same time.
if _, err := m.f.WriteAt(m.zero[:], int64(end)-int64(len(m.zero))); err != nil {
return nil, err
}
}
newM, err := openMapped(m.f.Name(), m.meta)
if err != nil {
return nil, err
}
if int64(len(newM.mapping.Data)) < int64(end) {
// File system or logic bug: new file is somehow not extended.
// See go.dev/issue/68311, where this appears to have been happening.
newM.close()
return nil, errCorrupt
}
return newM, err
}
// round returns x rounded up to the next multiple of unit,
// which must be a power of two.
func round[T int | uint32](x T, unit T) T {
return (x + unit - 1) &^ (unit - 1)
}

View File

@@ -0,0 +1,82 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package counter
import (
"bytes"
"fmt"
"strings"
"unsafe"
"golang.org/x/telemetry/internal/mmap"
)
type File struct {
Meta map[string]string
Count map[string]uint64
}
func Parse(filename string, data []byte) (*File, error) {
if !bytes.HasPrefix(data, []byte(hdrPrefix)) || len(data) < pageSize {
if len(data) < pageSize {
return nil, fmt.Errorf("%s: file too short (%d<%d)", filename, len(data), pageSize)
}
return nil, fmt.Errorf("%s: wrong hdr (not %q)", filename, hdrPrefix)
}
corrupt := func() (*File, error) {
// TODO(rfindley): return a useful error message.
return nil, fmt.Errorf("%s: corrupt counter file", filename)
}
f := &File{
Meta: make(map[string]string),
Count: make(map[string]uint64),
}
np := round(len(hdrPrefix), 4)
hdrLen := *(*uint32)(unsafe.Pointer(&data[np]))
if hdrLen > pageSize {
return corrupt()
}
meta := data[np+4 : hdrLen]
if i := bytes.IndexByte(meta, 0); i >= 0 {
meta = meta[:i]
}
m := &mappedFile{
meta: string(meta),
hdrLen: hdrLen,
mapping: &mmap.Data{Data: data},
}
lines := strings.Split(m.meta, "\n")
for _, line := range lines {
if line == "" {
continue
}
k, v, ok := strings.Cut(line, ": ")
if !ok {
return corrupt()
}
f.Meta[k] = v
}
for i := uint32(0); i < numHash; i++ {
headOff := hdrLen + hashOff + i*4
head := m.load32(headOff)
off := head
for off != 0 {
ename, next, v, ok := m.entryAt(off)
if !ok {
return corrupt()
}
if _, ok := f.Count[string(ename)]; ok {
return corrupt()
}
ctrName := DecodeStack(string(ename))
f.Count[ctrName] = v.Load()
off = next
}
}
return f, nil
}

View File

@@ -0,0 +1,212 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package counter
import (
"fmt"
"runtime"
"strings"
"sync"
)
// On the disk, and upstream, stack counters look like sets of
// regular counters with names that include newlines.
// a StackCounter is the in-memory knowledge about a stack counter.
// StackCounters are more expensive to use than regular Counters,
// requiring, at a minimum, a call to runtime.Callers.
type StackCounter struct {
name string
depth int
file *file
mu sync.Mutex
// as this is a detail of the implementation, it could be replaced
// by a more efficient mechanism
stacks []stack
}
type stack struct {
pcs []uintptr
counter *Counter
}
func NewStack(name string, depth int) *StackCounter {
return &StackCounter{name: name, depth: depth, file: &defaultFile}
}
// Inc increments a stack counter. It computes the caller's stack and
// looks up the corresponding counter. It then increments that counter,
// creating it if necessary.
func (c *StackCounter) Inc() {
pcs := make([]uintptr, c.depth)
n := runtime.Callers(2, pcs) // caller of Inc
pcs = pcs[:n]
c.mu.Lock()
defer c.mu.Unlock()
// Existing counter?
var ctr *Counter
for _, s := range c.stacks {
if eq(s.pcs, pcs) {
if s.counter != nil {
ctr = s.counter
break
}
}
}
if ctr == nil {
// Create new counter.
ctr = &Counter{
name: EncodeStack(pcs, c.name),
file: c.file,
}
c.stacks = append(c.stacks, stack{pcs: pcs, counter: ctr})
}
ctr.Inc()
}
// EncodeStack returns the name of the counter to
// use for the given stack of program counters.
// The name encodes the stack.
func EncodeStack(pcs []uintptr, prefix string) string {
var locs []string
lastImport := ""
frs := runtime.CallersFrames(pcs)
for {
fr, more := frs.Next()
// TODO(adonovan): this CutLast(".") operation isn't
// appropriate for generic function symbols.
path, fname := cutLastDot(fr.Function)
if path == lastImport {
path = `"` // (a ditto mark)
} else {
lastImport = path
}
var loc string
if fr.Func != nil {
// Use function-relative line numbering.
// f:+2 means two lines into function f.
// f:-1 should never happen, but be conservative.
//
// An inlined call is replaced by a NOP instruction
// with the correct pclntab information.
_, entryLine := fr.Func.FileLine(fr.Entry)
loc = fmt.Sprintf("%s.%s:%+d,+0x%x", path, fname, fr.Line-entryLine, fr.PC-fr.Entry)
} else {
// The function is non-Go code or is fully inlined:
// use absolute line number within enclosing file.
//
// For inlined calls, the PC and Entry values
// both refer to the enclosing combined function.
// For example, both these PCs are relative to "caller":
//
// callee:=1,+0x12 ('=' means inlined)
// caller:+2,+0x34
loc = fmt.Sprintf("%s.%s:=%d,+0x%x", path, fname, fr.Line, fr.PC-fr.Entry)
}
locs = append(locs, loc)
if !more {
break
}
}
name := prefix + "\n" + strings.Join(locs, "\n")
if len(name) > maxNameLen {
const bad = "\ntruncated\n"
name = name[:maxNameLen-len(bad)] + bad
}
return name
}
// DecodeStack expands the (compressed) stack encoded in the counter name.
func DecodeStack(ename string) string {
if !strings.Contains(ename, "\n") {
return ename // not a stack counter
}
lines := strings.Split(ename, "\n")
var lastPath string // empty or ends with .
for i, line := range lines {
path, rest := cutLastDot(line)
if len(path) == 0 {
continue // unchanged
}
if len(path) == 1 && path[0] == '"' {
lines[i] = lastPath + rest
} else {
lastPath = path + "."
// line unchanged
}
}
return strings.Join(lines, "\n") // trailing \n?
}
// input is <import path>.<function name>
// output is (import path, function name)
func cutLastDot(x string) (before, after string) {
i := strings.LastIndex(x, ".")
if i < 0 {
return "", x
}
return x[:i], x[i+1:]
}
// Names reports all the counter names associated with a StackCounter.
func (c *StackCounter) Names() []string {
c.mu.Lock()
defer c.mu.Unlock()
names := make([]string, len(c.stacks))
for i, s := range c.stacks {
names[i] = s.counter.Name()
}
return names
}
// Counters returns the known Counters for a StackCounter.
// There may be more in the count file.
func (c *StackCounter) Counters() []*Counter {
c.mu.Lock()
defer c.mu.Unlock()
counters := make([]*Counter, len(c.stacks))
for i, s := range c.stacks {
counters[i] = s.counter
}
return counters
}
func eq(a, b []uintptr) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
// ReadStack reads the given stack counter.
// This is the implementation of
// golang.org/x/telemetry/counter/countertest.ReadStackCounter.
func ReadStack(c *StackCounter) (map[string]uint64, error) {
ret := map[string]uint64{}
for _, ctr := range c.Counters() {
v, err := Read(ctr)
if err != nil {
return nil, err
}
ret[DecodeStack(ctr.Name())] = v
}
return ret, nil
}
// IsStackCounter reports whether the counter name is for a stack counter.
func IsStackCounter(name string) bool {
return strings.Contains(name, "\n")
}

View File

@@ -0,0 +1,328 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package crashmonitor
// This file defines a monitor that reports arbitrary Go runtime
// crashes to telemetry.
import (
"bytes"
"fmt"
"io"
"log"
"os"
"reflect"
"runtime/debug"
"strconv"
"strings"
"golang.org/x/telemetry/internal/counter"
)
// Parent sets up the parent side of the crashmonitor. It requires
// exclusive use of a writable pipe connected to the child process's stdin.
func Parent(pipe *os.File) {
writeSentinel(pipe)
// Ensure that we get pc=0x%x values in the traceback.
debug.SetTraceback("system")
debug.SetCrashOutput(pipe, debug.CrashOptions{}) // ignore error
}
// Child runs the part of the crashmonitor that runs in the child process.
// It expects its stdin to be connected via a pipe to the parent which has
// run Parent.
func Child() {
// Wait for parent process's dying gasp.
// If the parent dies for any reason this read will return.
data, err := io.ReadAll(os.Stdin)
if err != nil {
log.Fatalf("failed to read from input pipe: %v", err)
}
// If the only line is the sentinel, it wasn't a crash.
if bytes.Count(data, []byte("\n")) < 2 {
childExitHook()
os.Exit(0) // parent exited without crash report
}
log.Printf("parent reported crash:\n%s", data)
// Parse the stack out of the crash report
// and record a telemetry count for it.
name, err := telemetryCounterName(data)
if err != nil {
// Keep count of how often this happens
// so that we can investigate if necessary.
incrementCounter("crash/malformed")
// Something went wrong.
// Save the crash securely in the file system.
f, err := os.CreateTemp(os.TempDir(), "*.crash")
if err != nil {
log.Fatal(err)
}
if _, err := f.Write(data); err != nil {
log.Fatal(err)
}
if err := f.Close(); err != nil {
log.Fatal(err)
}
log.Printf("failed to report crash to telemetry: %v", err)
log.Fatalf("crash report saved at %s", f.Name())
}
incrementCounter(name)
childExitHook()
log.Fatalf("telemetry crash recorded")
}
// (stubbed by test)
var (
incrementCounter = func(name string) { counter.New(name).Inc() }
childExitHook = func() {}
)
// The sentinel function returns its address. The difference between
// this value as observed by calls in two different processes of the
// same executable tells us the relative offset of their text segments.
//
// It would be nice if SetCrashOutput took care of this as it's fiddly
// and likely to confuse every user at first.
func sentinel() uint64 {
return uint64(reflect.ValueOf(sentinel).Pointer())
}
func writeSentinel(out io.Writer) {
fmt.Fprintf(out, "sentinel %x\n", sentinel())
}
// telemetryCounterName parses a crash report produced by the Go
// runtime, extracts the stack of the first runnable goroutine,
// converts each line into telemetry form ("symbol:relative-line"),
// and returns this as the name of a counter.
func telemetryCounterName(crash []byte) (string, error) {
pcs, err := parseStackPCs(string(crash))
if err != nil {
return "", err
}
// Limit the number of frames we request.
pcs = pcs[:min(len(pcs), 16)]
if len(pcs) == 0 {
// This can occur if all goroutines are idle, as when
// caught in a deadlock, or killed by an async signal
// while blocked.
//
// TODO(adonovan): consider how to report such
// situations. Reporting a goroutine in [sleep] or
// [select] state could be quite confusing without
// further information about the nature of the crash,
// as the problem is not local to the code location.
//
// For now, we keep count of this situation so that we
// can access whether it needs a more involved solution.
return "crash/no-running-goroutine", nil
}
// This string appears at the start of all
// crashmonitor-generated counter names.
//
// It is tempting to expose this as a parameter of Start, but
// it is not without risk. What value should most programs
// provide? There's no point giving the name of the executable
// as this is already recorded by telemetry. What if the
// application runs in multiple modes? Then it might be useful
// to record the mode. The problem is that an application with
// multiple modes probably doesn't know its mode by line 1 of
// main.main: it might require flag or argument parsing, or
// even validation of an environment variable, and we really
// want to steer users aware from any logic before Start. The
// flags and arguments will be wrong in the child process, and
// every extra conditional branch creates a risk that the
// recursively executed child program will behave not like the
// monitor but like the application. If the child process
// exits before calling Start, then the parent application
// will not have a monitor, and its crash reports will be
// discarded (written in to a pipe that is never read).
//
// So for now, we use this constant string.
const prefix = "crash/crash"
return counter.EncodeStack(pcs, prefix), nil
}
// parseStackPCs parses the parent process's program counters for the
// first running goroutine out of a GOTRACEBACK=system traceback,
// adjusting them so that they are valid for the child process's text
// segment.
//
// This function returns only program counter values, ensuring that
// there is no possibility of strings from the crash report (which may
// contain PII) leaking into the telemetry system.
func parseStackPCs(crash string) ([]uintptr, error) {
// getSymbol parses the symbol name out of a line of the form:
// SYMBOL(ARGS)
//
// Note: SYMBOL may contain parens "pkg.(*T).method". However, type
// parameters are always replaced with ..., so they cannot introduce
// more parens. e.g., "pkg.(*T[...]).method".
//
// ARGS can contain parens. We want the first paren that is not
// immediately preceded by a ".".
//
// TODO(prattmic): This is mildly complicated and is only used to find
// runtime.sigpanic, so perhaps simplify this by checking explicitly
// for sigpanic.
getSymbol := func(line string) (string, error) {
var prev rune
for i, c := range line {
if line[i] != '(' {
prev = c
continue
}
if prev == '.' {
prev = c
continue
}
return line[:i], nil
}
return "", fmt.Errorf("no symbol for stack frame: %s", line)
}
// getPC parses the PC out of a line of the form:
// \tFILE:LINE +0xRELPC sp=... fp=... pc=...
getPC := func(line string) (uint64, error) {
_, pcstr, ok := strings.Cut(line, " pc=") // e.g. pc=0x%x
if !ok {
return 0, fmt.Errorf("no pc= for stack frame: %s", line)
}
return strconv.ParseUint(pcstr, 0, 64) // 0 => allow 0x prefix
}
var (
pcs []uintptr
parentSentinel uint64
childSentinel = sentinel()
on = false // are we in the first running goroutine?
lines = strings.Split(crash, "\n")
symLine = true // within a goroutine, every other line is a symbol or file/line/pc location, starting with symbol.
currSymbol string
prevSymbol string // symbol of the most recent previous frame with a PC.
)
for i := 0; i < len(lines); i++ {
line := lines[i]
// Read sentinel value.
if parentSentinel == 0 && strings.HasPrefix(line, "sentinel ") {
_, err := fmt.Sscanf(line, "sentinel %x", &parentSentinel)
if err != nil {
return nil, fmt.Errorf("can't read sentinel line")
}
continue
}
// Search for "goroutine GID [STATUS]"
if !on {
if strings.HasPrefix(line, "goroutine ") &&
strings.Contains(line, " [running]:") {
on = true
if parentSentinel == 0 {
return nil, fmt.Errorf("no sentinel value in crash report")
}
}
continue
}
// A blank line marks end of a goroutine stack.
if line == "" {
break
}
// Skip the final "created by SYMBOL in goroutine GID" part.
if strings.HasPrefix(line, "created by ") {
break
}
// Expect a pair of lines:
// SYMBOL(ARGS)
// \tFILE:LINE +0xRELPC sp=0x%x fp=0x%x pc=0x%x
// Note: SYMBOL may contain parens "pkg.(*T).method"
// The RELPC is sometimes missing.
if symLine {
var err error
currSymbol, err = getSymbol(line)
if err != nil {
return nil, fmt.Errorf("error extracting symbol: %v", err)
}
symLine = false // Next line is FILE:LINE.
} else {
// Parse the PC, and correct for the parent and child's
// different mappings of the text section.
pc, err := getPC(line)
if err != nil {
// Inlined frame, perhaps; skip it.
// Done with this frame. Next line is a new frame.
//
// Don't update prevSymbol; we only want to
// track frames with a PC.
currSymbol = ""
symLine = true
continue
}
pc = pc - parentSentinel + childSentinel
// If the previous frame was sigpanic, then this frame
// was a trap (e.g., SIGSEGV).
//
// Typically all middle frames are calls, and report
// the "return PC". That is, the instruction following
// the CALL where the callee will eventually return to.
//
// runtime.CallersFrames is aware of this property and
// will decrement each PC by 1 to "back up" to the
// location of the CALL, which is the actual line
// number the user expects.
//
// This does not work for traps, as a trap is not a
// call, so the reported PC is not the return PC, but
// the actual PC of the trap.
//
// runtime.Callers is aware of this and will
// intentionally increment trap PCs in order to correct
// for the decrement performed by
// runtime.CallersFrames. See runtime.tracebackPCs and
// runtume.(*unwinder).symPC.
//
// We must emulate the same behavior, otherwise we will
// report the location of the instruction immediately
// prior to the trap, which may be on a different line,
// or even a different inlined functions.
//
// TODO(prattmic): The runtime applies the same trap
// behavior for other "injected calls", see injectCall
// in runtime.(*unwinder).next. Do we want to handle
// those as well? I don't believe we'd ever see
// runtime.asyncPreempt or runtime.debugCallV2 in a
// typical crash.
if prevSymbol == "runtime.sigpanic" {
pc++
}
pcs = append(pcs, uintptr(pc))
// Done with this frame. Next line is a new frame.
prevSymbol = currSymbol
currSymbol = ""
symLine = true
}
}
return pcs, nil
}

36
vendor/golang.org/x/telemetry/internal/mmap/mmap.go generated vendored Normal file
View File

@@ -0,0 +1,36 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This package is a lightly modified version of the mmap code
// in github.com/google/codesearch/index.
// The mmap package provides an abstraction for memory mapping files
// on different platforms.
package mmap
import (
"os"
)
// The backing file is never closed, so Data
// remains valid for the lifetime of the process.
type Data struct {
// TODO(pjw): might be better to define versions of Data
// for the 3 specializations
f *os.File
Data []byte
// Some windows magic
Windows interface{}
}
// Mmap maps the given file into memory.
// When remapping a file, pass the most recently returned Data.
func Mmap(f *os.File) (*Data, error) {
return mmapFile(f)
}
// Munmap unmaps the given file from memory.
func Munmap(d *Data) error {
return munmapFile(d)
}

View File

@@ -0,0 +1,25 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (js && wasm) || wasip1 || plan9
package mmap
import (
"io"
"os"
)
// mmapFile on other systems doesn't mmap the file. It just reads everything.
func mmapFile(f *os.File) (*Data, error) {
b, err := io.ReadAll(f)
if err != nil {
return nil, err
}
return &Data{f, b, nil}, nil
}
func munmapFile(_ *Data) error {
return nil
}

View File

@@ -0,0 +1,47 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix
package mmap
import (
"fmt"
"io/fs"
"os"
"syscall"
)
func mmapFile(f *os.File) (*Data, error) {
st, err := f.Stat()
if err != nil {
return nil, err
}
size := st.Size()
pagesize := int64(os.Getpagesize())
if int64(int(size+(pagesize-1))) != size+(pagesize-1) {
return nil, fmt.Errorf("%s: too large for mmap", f.Name())
}
n := int(size)
if n == 0 {
return &Data{f, nil, nil}, nil
}
mmapLength := int(((size + pagesize - 1) / pagesize) * pagesize) // round up to page size
data, err := syscall.Mmap(int(f.Fd()), 0, mmapLength, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)
if err != nil {
return nil, &fs.PathError{Op: "mmap", Path: f.Name(), Err: err}
}
return &Data{f, data[:n], nil}, nil
}
func munmapFile(d *Data) error {
if len(d.Data) == 0 {
return nil
}
err := syscall.Munmap(d.Data)
if err != nil {
return &fs.PathError{Op: "munmap", Path: d.f.Name(), Err: err}
}
return nil
}

View File

@@ -0,0 +1,52 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mmap
import (
"fmt"
"os"
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
func mmapFile(f *os.File) (*Data, error) {
st, err := f.Stat()
if err != nil {
return nil, err
}
size := st.Size()
if size == 0 {
return &Data{f, nil, nil}, nil
}
// set the min and max sizes to zero to map the whole file, as described in
// https://learn.microsoft.com/en-us/windows/win32/memory/creating-a-file-mapping-object#file-mapping-size
h, err := windows.CreateFileMapping(windows.Handle(f.Fd()), nil, syscall.PAGE_READWRITE, 0, 0, nil)
if err != nil {
return nil, fmt.Errorf("CreateFileMapping %s: %w", f.Name(), err)
}
// the mapping extends from zero to the end of the file mapping
// https://learn.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-mapviewoffile
addr, err := windows.MapViewOfFile(h, syscall.FILE_MAP_READ|syscall.FILE_MAP_WRITE, 0, 0, 0)
if err != nil {
return nil, fmt.Errorf("MapViewOfFile %s: %w", f.Name(), err)
}
// Note: previously, we called windows.VirtualQuery here to get the exact
// size of the memory mapped region, but VirtualQuery reported sizes smaller
// than the actual file size (hypothesis: VirtualQuery only reports pages in
// a certain state, and newly written pages may not be counted).
return &Data{f, unsafe.Slice((*byte)(unsafe.Pointer(addr)), size), h}, nil
}
func munmapFile(d *Data) error {
err := windows.UnmapViewOfFile(uintptr(unsafe.Pointer(&d.Data[0])))
x, ok := d.Windows.(windows.Handle)
if ok {
windows.CloseHandle(x)
}
d.f.Close()
return err
}

View File

@@ -0,0 +1,9 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package telemetry
// TODO(rfindley): replace uses of DateOnly with time.DateOnly once we no
// longer support building gopls with go 1.19.
const DateOnly = "2006-01-02"

163
vendor/golang.org/x/telemetry/internal/telemetry/dir.go generated vendored Normal file
View File

@@ -0,0 +1,163 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package telemetry manages the telemetry mode file.
package telemetry
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"time"
)
// Default is the default directory containing Go telemetry configuration and
// data.
//
// If Default is uninitialized, Default.Mode will be "off". As a consequence,
// no data should be written to the directory, and so the path values of
// LocalDir, UploadDir, etc. must not matter.
//
// Default is a global for convenience and testing, but should not be mutated
// outside of tests.
//
// TODO(rfindley): it would be nice to completely eliminate this global state,
// or at least push it in the golang.org/x/telemetry package
var Default Dir
// A Dir holds paths to telemetry data inside a directory.
type Dir struct {
dir, local, upload, debug, modefile string
}
// NewDir creates a new Dir encapsulating paths in the given dir.
//
// NewDir does not create any new directories or files--it merely encapsulates
// the telemetry directory layout.
func NewDir(dir string) Dir {
return Dir{
dir: dir,
local: filepath.Join(dir, "local"),
upload: filepath.Join(dir, "upload"),
debug: filepath.Join(dir, "debug"),
modefile: filepath.Join(dir, "mode"),
}
}
func init() {
cfgDir, err := os.UserConfigDir()
if err != nil {
return
}
Default = NewDir(filepath.Join(cfgDir, "go", "telemetry"))
}
func (d Dir) Dir() string {
return d.dir
}
func (d Dir) LocalDir() string {
return d.local
}
func (d Dir) UploadDir() string {
return d.upload
}
func (d Dir) DebugDir() string {
return d.debug
}
func (d Dir) ModeFile() string {
return d.modefile
}
// SetMode updates the telemetry mode with the given mode.
// Acceptable values for mode are "on", "off", or "local".
//
// SetMode always writes the mode file, and explicitly records the date at
// which the modefile was updated. This means that calling SetMode with "on"
// effectively resets the timeout before the next telemetry report is uploaded.
func (d Dir) SetMode(mode string) error {
return d.SetModeAsOf(mode, time.Now())
}
// SetModeAsOf is like SetMode, but accepts an explicit time to use to
// back-date the mode state. This exists only for testing purposes.
func (d Dir) SetModeAsOf(mode string, asofTime time.Time) error {
mode = strings.TrimSpace(mode)
switch mode {
case "on", "off", "local":
default:
return fmt.Errorf("invalid telemetry mode: %q", mode)
}
if d.modefile == "" {
return fmt.Errorf("cannot determine telemetry mode file name")
}
// TODO(rfindley): why is this not 777, consistent with the use of 666 below?
if err := os.MkdirAll(filepath.Dir(d.modefile), 0755); err != nil {
return fmt.Errorf("cannot create a telemetry mode file: %w", err)
}
asof := asofTime.UTC().Format(DateOnly)
// Defensively guarantee that we can parse the asof time.
if _, err := time.Parse(DateOnly, asof); err != nil {
return fmt.Errorf("internal error: invalid mode date %q: %v", asof, err)
}
data := []byte(mode + " " + asof)
return os.WriteFile(d.modefile, data, 0666)
}
// Mode returns the current telemetry mode, as well as the time that the mode
// was effective.
//
// If there is no effective time, the second result is the zero time.
//
// If Mode is "off", no data should be written to the telemetry directory, and
// the other paths values referenced by Dir should be considered undefined.
// This accounts for the case where initializing [Default] fails, and therefore
// local telemetry paths are unknown.
func (d Dir) Mode() (string, time.Time) {
if d.modefile == "" {
return "off", time.Time{} // it's likely LocalDir/UploadDir are empty too. Turn off telemetry.
}
data, err := os.ReadFile(d.modefile)
if err != nil {
return "local", time.Time{} // default
}
mode := string(data)
mode = strings.TrimSpace(mode)
// Forward compatibility for https://go.dev/issue/63142#issuecomment-1734025130
//
// If the modefile contains a date, return it.
if idx := strings.Index(mode, " "); idx >= 0 {
d, err := time.Parse(DateOnly, mode[idx+1:])
if err != nil {
d = time.Time{}
}
return mode[:idx], d
}
return mode, time.Time{}
}
// DisabledOnPlatform indicates whether telemetry is disabled
// due to bugs in the current platform.
//
// TODO(rfindley): move to a more appropriate file.
const DisabledOnPlatform = false ||
// The following platforms could potentially be supported in the future:
runtime.GOOS == "openbsd" || // #60614
runtime.GOOS == "solaris" || // #60968 #60970
runtime.GOOS == "android" || // #60967
runtime.GOOS == "illumos" || // #65544
// These platforms fundamentally can't be supported:
runtime.GOOS == "js" || // #60971
runtime.GOOS == "wasip1" || // #60971
runtime.GOOS == "plan9" || // https://github.com/golang/go/issues/57540#issuecomment-1470766639
runtime.GOARCH == "mips" || runtime.GOARCH == "mipsle" // mips lacks cross-process 64-bit atomics

View File

@@ -0,0 +1,57 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package telemetry
import (
"go/version"
"os"
"path/filepath"
"runtime/debug"
"strings"
)
// IsToolchainProgram reports whether a program with the given path is a Go
// toolchain program.
func IsToolchainProgram(progPath string) bool {
return strings.HasPrefix(progPath, "cmd/")
}
// ProgramInfo extracts the go version, program package path, and program
// version to use for counter files.
//
// For programs in the Go toolchain, the program version will be the same as
// the Go version, and will typically be of the form "go1.2.3", not a semantic
// version of the form "v1.2.3". Go versions may also include spaces and
// special characters.
func ProgramInfo(info *debug.BuildInfo) (goVers, progPath, progVers string) {
goVers = info.GoVersion
if strings.Contains(goVers, "devel") || strings.Contains(goVers, "-") || !version.IsValid(goVers) {
goVers = "devel"
}
progPath = info.Path
if progPath == "" {
progPath = strings.TrimSuffix(filepath.Base(os.Args[0]), ".exe")
}
// Main module version information is not populated for the cmd module, but
// we can re-use the Go version here.
if IsToolchainProgram(progPath) {
progVers = goVers
} else {
progVers = info.Main.Version
if strings.Contains(progVers, "devel") || strings.Count(progVers, "-") > 1 {
// Heuristically mark all pseudo-version-like version strings as "devel"
// to avoid creating too many counter files.
// We should not use regexp that pulls in large dependencies.
// Pseudo-versions have at least three parts (https://go.dev/ref/mod#pseudo-versions).
// This heuristic still allows use to track prerelease
// versions (e.g. gopls@v0.16.0-pre.1, vscgo@v0.42.0-rc.1).
progVers = "devel"
}
}
return goVers, progPath, progVers
}

View File

@@ -0,0 +1,51 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package telemetry
// Common types and directories used by multiple packages.
// An UploadConfig controls what data is uploaded.
type UploadConfig struct {
GOOS []string
GOARCH []string
GoVersion []string
SampleRate float64
Programs []*ProgramConfig
}
type ProgramConfig struct {
// the counter names may have to be
// repeated for each program. (e.g., if the counters are in a package
// that is used in more than one program.)
Name string
Versions []string // versions present in a counterconfig
Counters []CounterConfig `json:",omitempty"`
Stacks []CounterConfig `json:",omitempty"`
}
type CounterConfig struct {
Name string // The "collapsed" counter: <chart>:{<bucket1>,<bucket2>,...}
Rate float64 // If X <= Rate, report this counter
Depth int `json:",omitempty"` // for stack counters
}
// A Report is the weekly aggregate of counters.
type Report struct {
Week string // End day this report covers (YYYY-MM-DD)
LastWeek string // Week field from latest previous report uploaded
X float64 // A random probability used to determine which counters are uploaded
Programs []*ProgramReport
Config string // version of UploadConfig used
}
type ProgramReport struct {
Program string // Package path of the program.
Version string // Program version. Go version if the program is part of the go distribution. Module version, otherwise.
GoVersion string // Go version used to build the program.
GOOS string
GOARCH string
Counters map[string]int64
Stacks map[string]int64
}

45
vendor/golang.org/x/telemetry/internal/upload/Doc.txt generated vendored Normal file
View File

@@ -0,0 +1,45 @@
The upload process converts count files into reports, and
uploads reports. There will be only one report, named YYYY-MM-DD.json,
for a given day.
First phase. Look at the localdir (os.UserConfigdir()/go/telemetry/local)
and find all .count and .json files. Find the count files that are no
longer active by looking at their metadata.
Second phase. Group the inactive count files by their expiry date, and
for each date generate the local report and the upload report. (The upload
report only contains the counters in the upload configuration.) The upload
report is saved in the local directory with a name like YYYY-MM-DD.json, if
there is no file already existing with that name.
If the local report is different, it is saved in the local directory
with a name like local.YYYY-MM-DD.json. The new upload report is
added to the list of .json files from the first phase. At this point
the count files are no longer needed and can be deleted.
Third phase. Look at the .json files in the list from the first phase.
If the name starts with local, skip it. If there is a file with the
identical name in the upload directory, remove the one in the local directory.
Otherwise try to upload the one in the local directory,
If the upload succeeds, move the file to the uploaded directory.
There are various error conditions.
1. Several processes could look at localdir and see work to do.
1A. They could see different sets of expired count files for some day.
This could happen if another process is removing count files. In this
case there is already a YYYY-MM-DD.json file either in localdir
or updatedir, so the process seeing fewer count files will not generate
a report.
1B. They could see the same count files, and no report in either directory.
They will both generate (in memory) reports and check to see if there
is a YYYY-MM-DD.json file in either directory. They could both then
write two files with the same name, but different X values, but
otherwise the same contents. The X values are very close to the front
of the file. Assuming reasonable file system semantics one version of
the file will be written. To minimize this, just before writing reports
the code checks again to see if they exist.
1C. Once there is an existing well-formed file YYYY-MM-DD.json in localdir
eventually the upload will succeed, and the file will be moved to updatedir.
It is possible that other processes will not see the file in updatedir and
upload it again and also move it to uploaddir. This is harmless as all
the uploaded files are identical.

85
vendor/golang.org/x/telemetry/internal/upload/date.go generated vendored Normal file
View File

@@ -0,0 +1,85 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package upload
import (
"fmt"
"os"
"sync"
"time"
"golang.org/x/telemetry/internal/counter"
"golang.org/x/telemetry/internal/telemetry"
)
// time and date handling
var distantPast = 21 * 24 * time.Hour
// reports that are too old (21 days) are not uploaded
func (u *uploader) tooOld(date string, uploadStartTime time.Time) bool {
t, err := time.Parse(telemetry.DateOnly, date)
if err != nil {
u.logger.Printf("tooOld: %v", err)
return false
}
age := uploadStartTime.Sub(t)
return age > distantPast
}
// counterDateSpan parses the counter file named fname and returns the (begin,
// end) span recorded in its metadata, or an error if this data could not be
// extracted.
func (u *uploader) counterDateSpan(fname string) (begin, end time.Time, _ error) {
parsed, err := u.parseCountFile(fname)
if err != nil {
return time.Time{}, time.Time{}, err
}
timeBegin, ok := parsed.Meta["TimeBegin"]
if !ok {
return time.Time{}, time.Time{}, fmt.Errorf("missing counter metadata for TimeBegin")
}
begin, err = time.Parse(time.RFC3339, timeBegin)
if err != nil {
return time.Time{}, time.Time{}, fmt.Errorf("failed to parse TimeBegin: %v", err)
}
timeEnd, ok := parsed.Meta["TimeEnd"]
if !ok {
return time.Time{}, time.Time{}, fmt.Errorf("missing counter metadata for TimeEnd")
}
end, err = time.Parse(time.RFC3339, timeEnd)
if err != nil {
return time.Time{}, time.Time{}, fmt.Errorf("failed to parse TimeEnd: %v", err)
}
return begin, end, nil
}
// avoid parsing count files multiple times
type parsedCache struct {
mu sync.Mutex
m map[string]*counter.File
}
func (u *uploader) parseCountFile(fname string) (*counter.File, error) {
u.cache.mu.Lock()
defer u.cache.mu.Unlock()
if u.cache.m == nil {
u.cache.m = make(map[string]*counter.File)
}
if f, ok := u.cache.m[fname]; ok {
return f, nil
}
buf, err := os.ReadFile(fname)
if err != nil {
return nil, fmt.Errorf("parse ReadFile: %v for %s", err, fname)
}
f, err := counter.Parse(fname, buf)
if err != nil {
return nil, fmt.Errorf("parse Parse: %v for %s", err, fname)
}
u.cache.m[fname] = f
return f, nil
}

View File

@@ -0,0 +1,102 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package upload
import (
"os"
"path/filepath"
"strings"
)
// files to handle
type work struct {
// absolute file names
countfiles []string // count files to process
readyfiles []string // old reports to upload
// relative names
uploaded map[string]bool // reports that have been uploaded
}
// find all the files that look like counter files or reports
// that need to be uploaded. (There may be unexpected leftover files
// and uploading is supposed to be idempotent.)
func (u *uploader) findWork() work {
localdir, uploaddir := u.dir.LocalDir(), u.dir.UploadDir()
var ans work
fis, err := os.ReadDir(localdir)
if err != nil {
u.logger.Printf("Could not find work: failed to read local dir %s: %v", localdir, err)
return ans
}
mode, asof := u.dir.Mode()
u.logger.Printf("Finding work: mode %s asof %s", mode, asof)
// count files end in .v1.count
// reports end in .json. If they are not to be uploaded they
// start with local.
for _, fi := range fis {
if strings.HasSuffix(fi.Name(), ".v1.count") {
fname := filepath.Join(localdir, fi.Name())
_, expiry, err := u.counterDateSpan(fname)
switch {
case err != nil:
u.logger.Printf("Error reading expiry for count file %s: %v", fi.Name(), err)
case expiry.After(u.startTime):
u.logger.Printf("Skipping count file %s: still active", fi.Name())
default:
u.logger.Printf("Collecting count file %s", fi.Name())
ans.countfiles = append(ans.countfiles, fname)
}
} else if strings.HasPrefix(fi.Name(), "local.") {
// skip
} else if strings.HasSuffix(fi.Name(), ".json") && mode == "on" {
// Collect reports that are ready for upload.
reportDate := u.uploadReportDate(fi.Name())
if !asof.IsZero() && !reportDate.IsZero() {
// If both the mode asof date and the report date are present, do the
// right thing...
//
// (see https://github.com/golang/go/issues/63142#issuecomment-1734025130)
if asof.Before(reportDate) {
// Note: since this report was created after telemetry was enabled,
// we can only assume that the process that created it checked that
// the counter data contained therein was all from after the asof
// date.
//
// TODO(rfindley): store the begin date in reports, so that we can
// verify this assumption.
u.logger.Printf("Uploadable: %s", fi.Name())
ans.readyfiles = append(ans.readyfiles, filepath.Join(localdir, fi.Name()))
}
} else {
// ...otherwise fall back on the old behavior of uploading all
// unuploaded files.
//
// TODO(rfindley): invert this logic following more testing. We
// should only upload if we know both the asof date and the report
// date, and they are acceptable.
u.logger.Printf("Uploadable (missing date): %s", fi.Name())
ans.readyfiles = append(ans.readyfiles, filepath.Join(localdir, fi.Name()))
}
}
}
fis, err = os.ReadDir(uploaddir)
if err != nil {
os.MkdirAll(uploaddir, 0777)
return ans
}
// There should be only one of these per day; maybe sometime
// we'll want to clean the directory.
ans.uploaded = make(map[string]bool)
for _, fi := range fis {
if strings.HasSuffix(fi.Name(), ".json") {
u.logger.Printf("Already uploaded: %s", fi.Name())
ans.uploaded[fi.Name()] = true
}
}
return ans
}

View File

@@ -0,0 +1,344 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package upload
import (
"crypto/rand"
"encoding/binary"
"encoding/json"
"fmt"
"math"
"os"
"path/filepath"
"strings"
"time"
"golang.org/x/telemetry/internal/config"
"golang.org/x/telemetry/internal/counter"
"golang.org/x/telemetry/internal/telemetry"
)
// reports generates reports from inactive count files
func (u *uploader) reports(todo *work) ([]string, error) {
if mode, _ := u.dir.Mode(); mode == "off" {
return nil, nil // no reports
}
thisInstant := u.startTime
today := thisInstant.Format(telemetry.DateOnly)
lastWeek := latestReport(todo.uploaded)
if lastWeek >= today { //should never happen
lastWeek = ""
}
u.logger.Printf("Last week: %s, today: %s", lastWeek, today)
countFiles := make(map[string][]string) // expiry date string->filenames
earliest := make(map[string]time.Time) // earliest begin time for any counter
for _, f := range todo.countfiles {
begin, end, err := u.counterDateSpan(f)
if err != nil {
// This shouldn't happen: we should have already skipped count files that
// don't contain valid start or end times.
u.logger.Printf("BUG: failed to parse expiry for collected count file: %v", err)
continue
}
if end.Before(thisInstant) {
expiry := end.Format(dateFormat)
countFiles[expiry] = append(countFiles[expiry], f)
if earliest[expiry].IsZero() || earliest[expiry].After(begin) {
earliest[expiry] = begin
}
}
}
for expiry, files := range countFiles {
if notNeeded(expiry, *todo) {
u.logger.Printf("Files for %s not needed, deleting %v", expiry, files)
// The report already exists.
// There's another check in createReport.
u.deleteFiles(files)
continue
}
fname, err := u.createReport(earliest[expiry], expiry, files, lastWeek)
if err != nil {
u.logger.Printf("Failed to create report for %s: %v", expiry, err)
continue
}
if fname != "" {
u.logger.Printf("Ready to upload: %s", filepath.Base(fname))
todo.readyfiles = append(todo.readyfiles, fname)
}
}
return todo.readyfiles, nil
}
// latestReport returns the YYYY-MM-DD of the last report uploaded
// or the empty string if there are no reports.
func latestReport(uploaded map[string]bool) string {
var latest string
for name := range uploaded {
if strings.HasSuffix(name, ".json") {
if name > latest {
latest = name
}
}
}
if latest == "" {
return ""
}
// strip off the .json
return latest[:len(latest)-len(".json")]
}
// notNeeded returns true if the report for date has already been created
func notNeeded(date string, todo work) bool {
if todo.uploaded != nil && todo.uploaded[date+".json"] {
return true
}
// maybe the report is already in todo.readyfiles
for _, f := range todo.readyfiles {
if strings.Contains(f, date) {
return true
}
}
return false
}
func (u *uploader) deleteFiles(files []string) {
for _, f := range files {
if err := os.Remove(f); err != nil {
// this could be a race condition.
// conversely, on Windows, err may be nil and
// the file not deleted if anyone has it open.
u.logger.Printf("%v failed to remove %s", err, f)
}
}
}
// createReport creates local and upload report files by
// combining all the count files for the expiryDate, and
// returns the upload report file's path.
// It may delete the count files once local and upload report
// files are successfully created.
func (u *uploader) createReport(start time.Time, expiryDate string, countFiles []string, lastWeek string) (string, error) {
uploadOK := true
mode, asof := u.dir.Mode()
if mode != "on" {
u.logger.Printf("No upload config or mode %q is not 'on'", mode)
uploadOK = false // no config, nothing to upload
}
if u.tooOld(expiryDate, u.startTime) {
u.logger.Printf("Expiry date %s is too old", expiryDate)
uploadOK = false
}
// If the mode is recorded with an asof date, don't upload if the report
// includes any data on or before the asof date.
if !asof.IsZero() && !asof.Before(start) {
u.logger.Printf("As-of date %s is not before start %s", asof, start)
uploadOK = false
}
// TODO(rfindley): check that all the x.Meta are consistent for GOOS, GOARCH, etc.
report := &telemetry.Report{
Config: u.configVersion,
X: computeRandom(), // json encodes all the bits
Week: expiryDate,
LastWeek: lastWeek,
}
if report.X > u.config.SampleRate && u.config.SampleRate > 0 {
u.logger.Printf("X: %f > SampleRate:%f, not uploadable", report.X, u.config.SampleRate)
uploadOK = false
}
var succeeded bool
for _, f := range countFiles {
fok := false
x, err := u.parseCountFile(f)
if err != nil {
u.logger.Printf("Unparseable count file %s: %v", filepath.Base(f), err)
continue
}
prog := findProgReport(x.Meta, report)
for k, v := range x.Count {
if counter.IsStackCounter(k) {
// stack
prog.Stacks[k] += int64(v)
} else {
// counter
prog.Counters[k] += int64(v)
}
succeeded = true
fok = true
}
if !fok {
u.logger.Printf("no counters found in %s", f)
}
}
if !succeeded {
return "", fmt.Errorf("none of the %d count files for %s contained counters", len(countFiles), expiryDate)
}
// 1. generate the local report
localContents, err := json.MarshalIndent(report, "", " ")
if err != nil {
return "", fmt.Errorf("failed to marshal report for %s: %v", expiryDate, err)
}
// check that the report can be read back
// TODO(pjw): remove for production?
var report2 telemetry.Report
if err := json.Unmarshal(localContents, &report2); err != nil {
return "", fmt.Errorf("failed to unmarshal local report for %s: %v", expiryDate, err)
}
var uploadContents []byte
if uploadOK {
// 2. create the uploadable version
cfg := config.NewConfig(u.config)
upload := &telemetry.Report{
Week: report.Week,
LastWeek: report.LastWeek,
X: report.X,
Config: report.Config,
}
for _, p := range report.Programs {
// does the uploadConfig want this program?
// if so, copy over the Stacks and Counters
// that the uploadConfig mentions.
if !cfg.HasGoVersion(p.GoVersion) || !cfg.HasProgram(p.Program) || !cfg.HasVersion(p.Program, p.Version) {
continue
}
x := &telemetry.ProgramReport{
Program: p.Program,
Version: p.Version,
GOOS: p.GOOS,
GOARCH: p.GOARCH,
GoVersion: p.GoVersion,
Counters: make(map[string]int64),
Stacks: make(map[string]int64),
}
upload.Programs = append(upload.Programs, x)
for k, v := range p.Counters {
if cfg.HasCounter(p.Program, k) && report.X <= cfg.Rate(p.Program, k) {
x.Counters[k] = v
}
}
// and the same for Stacks
// this can be made more efficient, when it matters
for k, v := range p.Stacks {
before, _, _ := strings.Cut(k, "\n")
if cfg.HasStack(p.Program, before) && report.X <= cfg.Rate(p.Program, before) {
x.Stacks[k] = v
}
}
}
uploadContents, err = json.MarshalIndent(upload, "", " ")
if err != nil {
return "", fmt.Errorf("failed to marshal upload report for %s: %v", expiryDate, err)
}
}
localFileName := filepath.Join(u.dir.LocalDir(), "local."+expiryDate+".json")
uploadFileName := filepath.Join(u.dir.LocalDir(), expiryDate+".json")
/* Prepare to write files */
// if either file exists, someone has been here ahead of us
// (there is still a race, but this check shortens the open window)
if _, err := os.Stat(localFileName); err == nil {
u.deleteFiles(countFiles)
return "", fmt.Errorf("local report %s already exists", localFileName)
}
if _, err := os.Stat(uploadFileName); err == nil {
u.deleteFiles(countFiles)
return "", fmt.Errorf("report %s already exists", uploadFileName)
}
// write the uploadable file
var errUpload, errLocal error
if uploadOK {
_, errUpload = exclusiveWrite(uploadFileName, uploadContents)
}
// write the local file
_, errLocal = exclusiveWrite(localFileName, localContents)
/* Wrote the files */
// even though these errors won't occur, what should happen
// if errUpload == nil and it is ok to upload, and errLocal != nil?
if errLocal != nil {
return "", fmt.Errorf("failed to write local file %s (%v)", localFileName, errLocal)
}
if errUpload != nil {
return "", fmt.Errorf("failed to write upload file %s (%v)", uploadFileName, errUpload)
}
u.logger.Printf("Created %s, deleting %d count files", filepath.Base(uploadFileName), len(countFiles))
u.deleteFiles(countFiles)
if uploadOK {
return uploadFileName, nil
}
return "", nil
}
// exclusiveWrite attempts to create filename exclusively, and if successful,
// writes content to the resulting file handle.
//
// It returns a boolean indicating whether the exclusive handle was acquired,
// and an error indicating whether the operation succeeded.
// If the file already exists, exclusiveWrite returns (false, nil).
func exclusiveWrite(filename string, content []byte) (_ bool, rerr error) {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644)
if err != nil {
if os.IsExist(err) {
return false, nil
}
return false, err
}
defer func() {
if err := f.Close(); err != nil && rerr == nil {
rerr = err
}
}()
if _, err := f.Write(content); err != nil {
return false, err
}
return true, nil
}
// return an existing ProgremReport, or create anew
func findProgReport(meta map[string]string, report *telemetry.Report) *telemetry.ProgramReport {
for _, prog := range report.Programs {
if prog.Program == meta["Program"] && prog.Version == meta["Version"] &&
prog.GoVersion == meta["GoVersion"] && prog.GOOS == meta["GOOS"] &&
prog.GOARCH == meta["GOARCH"] {
return prog
}
}
prog := telemetry.ProgramReport{
Program: meta["Program"],
Version: meta["Version"],
GoVersion: meta["GoVersion"],
GOOS: meta["GOOS"],
GOARCH: meta["GOARCH"],
Counters: make(map[string]int64),
Stacks: make(map[string]int64),
}
report.Programs = append(report.Programs, &prog)
return &prog
}
// computeRandom returns a cryptographic random float64 in the range [0, 1],
// with 52 bits of precision.
func computeRandom() float64 {
for {
b := make([]byte, 8)
_, err := rand.Read(b)
if err != nil {
panic(fmt.Sprintf("rand.Read failed: %v", err))
}
// and turn it into a float64
x := math.Float64frombits(binary.LittleEndian.Uint64(b))
if math.IsNaN(x) || math.IsInf(x, 0) {
continue
}
x = math.Abs(x)
if x < 0x1p-1000 { // avoid underflow patterns
continue
}
frac, _ := math.Frexp(x) // 52 bits of randomness
return frac*2 - 1
}
}

226
vendor/golang.org/x/telemetry/internal/upload/run.go generated vendored Normal file
View File

@@ -0,0 +1,226 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package upload
import (
"fmt"
"io"
"log"
"os"
"path"
"path/filepath"
"runtime/debug"
"strings"
"time"
"golang.org/x/telemetry/internal/configstore"
"golang.org/x/telemetry/internal/telemetry"
)
// RunConfig configures non-default behavior of a call to Run.
//
// All fields are optional, for testing or observability.
type RunConfig struct {
TelemetryDir string // if set, overrides the telemetry data directory
UploadURL string // if set, overrides the telemetry upload endpoint
LogWriter io.Writer // if set, used for detailed logging of the upload process
Env []string // if set, appended to the config download environment
StartTime time.Time // if set, overrides the upload start time
}
// Run generates and uploads reports, as allowed by the mode file.
func Run(config RunConfig) error {
defer func() {
if err := recover(); err != nil {
log.Printf("upload recover: %v", err)
}
}()
uploader, err := newUploader(config)
if err != nil {
return err
}
defer uploader.Close()
return uploader.Run()
}
// uploader encapsulates a single upload operation, carrying parameters and
// shared state.
type uploader struct {
// config is used to select counters to upload.
config *telemetry.UploadConfig //
configVersion string // version of the config
dir telemetry.Dir // the telemetry dir to process
uploadServerURL string
startTime time.Time
cache parsedCache
logFile *os.File
logger *log.Logger
}
// newUploader creates a new uploader to use for running the upload for the
// given config.
//
// Uploaders should only be used for one call to [uploader.Run].
func newUploader(rcfg RunConfig) (*uploader, error) {
// Determine the upload directory.
var dir telemetry.Dir
if rcfg.TelemetryDir != "" {
dir = telemetry.NewDir(rcfg.TelemetryDir)
} else {
dir = telemetry.Default
}
// Determine the upload URL.
uploadURL := rcfg.UploadURL
if uploadURL == "" {
uploadURL = "https://telemetry.go.dev/upload"
}
// Determine the upload logger.
//
// This depends on the provided rcfg.LogWriter and the presence of
// dir.DebugDir, as follows:
// 1. If LogWriter is present, log to it.
// 2. If DebugDir is present, log to a file within it.
// 3. If both LogWriter and DebugDir are present, log to a multi writer.
// 4. If neither LogWriter nor DebugDir are present, log to a noop logger.
var logWriters []io.Writer
logFile, err := debugLogFile(dir.DebugDir())
if err != nil {
logFile = nil
}
if logFile != nil {
logWriters = append(logWriters, logFile)
}
if rcfg.LogWriter != nil {
logWriters = append(logWriters, rcfg.LogWriter)
}
var logWriter io.Writer
switch len(logWriters) {
case 0:
logWriter = io.Discard
case 1:
logWriter = logWriters[0]
default:
logWriter = io.MultiWriter(logWriters...)
}
logger := log.New(logWriter, "", log.Ltime|log.Lmicroseconds|log.Lshortfile)
// Fetch the upload config, if it is not provided.
var (
config *telemetry.UploadConfig
configVersion string
)
if mode, _ := dir.Mode(); mode == "on" {
// golang/go#68946: only download the upload config if it will be used.
//
// TODO(rfindley): This is a narrow change aimed at minimally fixing the
// associated bug. In the future, we should read the mode only once during
// the upload process.
config, configVersion, err = configstore.Download("latest", rcfg.Env)
if err != nil {
return nil, err
}
} else {
config = &telemetry.UploadConfig{}
configVersion = "v0.0.0-0"
}
// Set the start time, if it is not provided.
startTime := time.Now().UTC()
if !rcfg.StartTime.IsZero() {
startTime = rcfg.StartTime
}
return &uploader{
config: config,
configVersion: configVersion,
dir: dir,
uploadServerURL: uploadURL,
startTime: startTime,
logFile: logFile,
logger: logger,
}, nil
}
// Close cleans up any resources associated with the uploader.
func (u *uploader) Close() error {
if u.logFile == nil {
return nil
}
return u.logFile.Close()
}
// Run generates and uploads reports
func (u *uploader) Run() error {
if telemetry.DisabledOnPlatform {
return nil
}
todo := u.findWork()
ready, err := u.reports(&todo)
if err != nil {
u.logger.Printf("Error building reports: %v", err)
return fmt.Errorf("reports failed: %v", err)
}
u.logger.Printf("Uploading %d reports", len(ready))
for _, f := range ready {
u.uploadReport(f)
}
return nil
}
// debugLogFile arranges to write a log file in the given debug directory, if
// it exists.
func debugLogFile(debugDir string) (*os.File, error) {
fd, err := os.Stat(debugDir)
if os.IsNotExist(err) {
return nil, nil
}
if err != nil {
return nil, err
}
if !fd.IsDir() {
return nil, fmt.Errorf("debug path %q is not a directory", debugDir)
}
info, ok := debug.ReadBuildInfo()
if !ok {
return nil, fmt.Errorf("no build info")
}
year, month, day := time.Now().UTC().Date()
goVers := info.GoVersion
// E.g., goVers:"go1.22-20240109-RC01 cl/597041403 +dcbe772469 X:loopvar"
words := strings.Fields(goVers)
goVers = words[0]
progPkgPath := info.Path
if progPkgPath == "" {
progPkgPath = strings.TrimSuffix(filepath.Base(os.Args[0]), ".exe")
}
prog := path.Base(progPkgPath)
progVers := info.Main.Version
if progVers == "(devel)" { // avoid special characters in created file names
progVers = "devel"
}
logBase := strings.ReplaceAll(
fmt.Sprintf("%s-%s-%s-%4d%02d%02d-%d.log", prog, progVers, goVers, year, month, day, os.Getpid()),
" ", "")
fname := filepath.Join(debugDir, logBase)
if _, err := os.Stat(fname); err == nil {
// This process previously called upload.Run
return nil, nil
}
f, err := os.OpenFile(fname, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
if os.IsExist(err) {
return nil, nil // this process previously called upload.Run
}
return nil, err
}
return f, nil
}

117
vendor/golang.org/x/telemetry/internal/upload/upload.go generated vendored Normal file
View File

@@ -0,0 +1,117 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package upload
import (
"bytes"
"net/http"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"golang.org/x/telemetry/internal/telemetry"
)
var (
dateRE = regexp.MustCompile(`(\d\d\d\d-\d\d-\d\d)[.]json$`)
dateFormat = telemetry.DateOnly
// TODO(rfindley): use dateFormat throughout.
)
// uploadReportDate returns the date component of the upload file name, or "" if the
// date was unmatched.
func (u *uploader) uploadReportDate(fname string) time.Time {
match := dateRE.FindStringSubmatch(fname)
if match == nil || len(match) < 2 {
u.logger.Printf("malformed report name: missing date: %q", filepath.Base(fname))
return time.Time{}
}
d, err := time.Parse(dateFormat, match[1])
if err != nil {
u.logger.Printf("malformed report name: bad date: %q", filepath.Base(fname))
return time.Time{}
}
return d
}
func (u *uploader) uploadReport(fname string) {
thisInstant := u.startTime
// TODO(rfindley): use uploadReportDate here, once we've done a gopls release.
// first make sure it is not in the future
today := thisInstant.Format(telemetry.DateOnly)
match := dateRE.FindStringSubmatch(fname)
if match == nil || len(match) < 2 {
u.logger.Printf("Report name %q missing date", filepath.Base(fname))
} else if match[1] > today {
u.logger.Printf("Report date for %q is later than today (%s)", filepath.Base(fname), today)
return // report is in the future, which shouldn't happen
}
buf, err := os.ReadFile(fname)
if err != nil {
u.logger.Printf("%v reading %s", err, fname)
return
}
if u.uploadReportContents(fname, buf) {
// anything left to do?
}
}
// try to upload the report, 'true' if successful
func (u *uploader) uploadReportContents(fname string, buf []byte) bool {
fdate := strings.TrimSuffix(filepath.Base(fname), ".json")
fdate = fdate[len(fdate)-len(telemetry.DateOnly):]
newname := filepath.Join(u.dir.UploadDir(), fdate+".json")
// Lock the upload, to prevent duplicate uploads.
{
lockname := newname + ".lock"
lockfile, err := os.OpenFile(lockname, os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
u.logger.Printf("Failed to acquire lock %s: %v", lockname, err)
return false
}
_ = lockfile.Close()
defer os.Remove(lockname)
}
if _, err := os.Stat(newname); err == nil {
// Another process uploaded but failed to clean up (or hasn't yet cleaned
// up). Ensure that cleanup occurs.
u.logger.Printf("After acquire: report already uploaded")
_ = os.Remove(fname)
return false
}
endpoint := u.uploadServerURL + "/" + fdate
b := bytes.NewReader(buf)
resp, err := http.Post(endpoint, "application/json", b)
if err != nil {
u.logger.Printf("Error upload %s to %s: %v", filepath.Base(fname), endpoint, err)
return false
}
// hope for a 200, remove file on a 4xx, otherwise it will be retried by another process
if resp.StatusCode != 200 {
u.logger.Printf("Failed to upload %s to %s: %s", filepath.Base(fname), endpoint, resp.Status)
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
err := os.Remove(fname)
if err == nil {
u.logger.Printf("Removed local/%s", filepath.Base(fname))
} else {
u.logger.Printf("Error removing local/%s: %v", filepath.Base(fname), err)
}
}
return false
}
// Store a copy of the uploaded report in the uploaded directory.
if err := os.WriteFile(newname, buf, 0644); err == nil {
os.Remove(fname) // if it exists
}
u.logger.Printf("Uploaded %s to %q", fdate+".json", endpoint)
return true
}