Initialize module and dependencies

This commit is contained in:
dwrz
2026-01-04 20:57:40 +00:00
commit a3b390c008
514 changed files with 310495 additions and 0 deletions

45
vendor/golang.org/x/telemetry/internal/upload/Doc.txt generated vendored Normal file
View File

@@ -0,0 +1,45 @@
The upload process converts count files into reports, and
uploads reports. There will be only one report, named YYYY-MM-DD.json,
for a given day.
First phase. Look at the localdir (os.UserConfigdir()/go/telemetry/local)
and find all .count and .json files. Find the count files that are no
longer active by looking at their metadata.
Second phase. Group the inactive count files by their expiry date, and
for each date generate the local report and the upload report. (The upload
report only contains the counters in the upload configuration.) The upload
report is saved in the local directory with a name like YYYY-MM-DD.json, if
there is no file already existing with that name.
If the local report is different, it is saved in the local directory
with a name like local.YYYY-MM-DD.json. The new upload report is
added to the list of .json files from the first phase. At this point
the count files are no longer needed and can be deleted.
Third phase. Look at the .json files in the list from the first phase.
If the name starts with local, skip it. If there is a file with the
identical name in the upload directory, remove the one in the local directory.
Otherwise try to upload the one in the local directory,
If the upload succeeds, move the file to the uploaded directory.
There are various error conditions.
1. Several processes could look at localdir and see work to do.
1A. They could see different sets of expired count files for some day.
This could happen if another process is removing count files. In this
case there is already a YYYY-MM-DD.json file either in localdir
or updatedir, so the process seeing fewer count files will not generate
a report.
1B. They could see the same count files, and no report in either directory.
They will both generate (in memory) reports and check to see if there
is a YYYY-MM-DD.json file in either directory. They could both then
write two files with the same name, but different X values, but
otherwise the same contents. The X values are very close to the front
of the file. Assuming reasonable file system semantics one version of
the file will be written. To minimize this, just before writing reports
the code checks again to see if they exist.
1C. Once there is an existing well-formed file YYYY-MM-DD.json in localdir
eventually the upload will succeed, and the file will be moved to updatedir.
It is possible that other processes will not see the file in updatedir and
upload it again and also move it to uploaddir. This is harmless as all
the uploaded files are identical.

85
vendor/golang.org/x/telemetry/internal/upload/date.go generated vendored Normal file
View File

@@ -0,0 +1,85 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package upload
import (
"fmt"
"os"
"sync"
"time"
"golang.org/x/telemetry/internal/counter"
"golang.org/x/telemetry/internal/telemetry"
)
// time and date handling
var distantPast = 21 * 24 * time.Hour
// reports that are too old (21 days) are not uploaded
func (u *uploader) tooOld(date string, uploadStartTime time.Time) bool {
t, err := time.Parse(telemetry.DateOnly, date)
if err != nil {
u.logger.Printf("tooOld: %v", err)
return false
}
age := uploadStartTime.Sub(t)
return age > distantPast
}
// counterDateSpan parses the counter file named fname and returns the (begin,
// end) span recorded in its metadata, or an error if this data could not be
// extracted.
func (u *uploader) counterDateSpan(fname string) (begin, end time.Time, _ error) {
parsed, err := u.parseCountFile(fname)
if err != nil {
return time.Time{}, time.Time{}, err
}
timeBegin, ok := parsed.Meta["TimeBegin"]
if !ok {
return time.Time{}, time.Time{}, fmt.Errorf("missing counter metadata for TimeBegin")
}
begin, err = time.Parse(time.RFC3339, timeBegin)
if err != nil {
return time.Time{}, time.Time{}, fmt.Errorf("failed to parse TimeBegin: %v", err)
}
timeEnd, ok := parsed.Meta["TimeEnd"]
if !ok {
return time.Time{}, time.Time{}, fmt.Errorf("missing counter metadata for TimeEnd")
}
end, err = time.Parse(time.RFC3339, timeEnd)
if err != nil {
return time.Time{}, time.Time{}, fmt.Errorf("failed to parse TimeEnd: %v", err)
}
return begin, end, nil
}
// avoid parsing count files multiple times
type parsedCache struct {
mu sync.Mutex
m map[string]*counter.File
}
func (u *uploader) parseCountFile(fname string) (*counter.File, error) {
u.cache.mu.Lock()
defer u.cache.mu.Unlock()
if u.cache.m == nil {
u.cache.m = make(map[string]*counter.File)
}
if f, ok := u.cache.m[fname]; ok {
return f, nil
}
buf, err := os.ReadFile(fname)
if err != nil {
return nil, fmt.Errorf("parse ReadFile: %v for %s", err, fname)
}
f, err := counter.Parse(fname, buf)
if err != nil {
return nil, fmt.Errorf("parse Parse: %v for %s", err, fname)
}
u.cache.m[fname] = f
return f, nil
}

View File

@@ -0,0 +1,102 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package upload
import (
"os"
"path/filepath"
"strings"
)
// files to handle
type work struct {
// absolute file names
countfiles []string // count files to process
readyfiles []string // old reports to upload
// relative names
uploaded map[string]bool // reports that have been uploaded
}
// find all the files that look like counter files or reports
// that need to be uploaded. (There may be unexpected leftover files
// and uploading is supposed to be idempotent.)
func (u *uploader) findWork() work {
localdir, uploaddir := u.dir.LocalDir(), u.dir.UploadDir()
var ans work
fis, err := os.ReadDir(localdir)
if err != nil {
u.logger.Printf("Could not find work: failed to read local dir %s: %v", localdir, err)
return ans
}
mode, asof := u.dir.Mode()
u.logger.Printf("Finding work: mode %s asof %s", mode, asof)
// count files end in .v1.count
// reports end in .json. If they are not to be uploaded they
// start with local.
for _, fi := range fis {
if strings.HasSuffix(fi.Name(), ".v1.count") {
fname := filepath.Join(localdir, fi.Name())
_, expiry, err := u.counterDateSpan(fname)
switch {
case err != nil:
u.logger.Printf("Error reading expiry for count file %s: %v", fi.Name(), err)
case expiry.After(u.startTime):
u.logger.Printf("Skipping count file %s: still active", fi.Name())
default:
u.logger.Printf("Collecting count file %s", fi.Name())
ans.countfiles = append(ans.countfiles, fname)
}
} else if strings.HasPrefix(fi.Name(), "local.") {
// skip
} else if strings.HasSuffix(fi.Name(), ".json") && mode == "on" {
// Collect reports that are ready for upload.
reportDate := u.uploadReportDate(fi.Name())
if !asof.IsZero() && !reportDate.IsZero() {
// If both the mode asof date and the report date are present, do the
// right thing...
//
// (see https://github.com/golang/go/issues/63142#issuecomment-1734025130)
if asof.Before(reportDate) {
// Note: since this report was created after telemetry was enabled,
// we can only assume that the process that created it checked that
// the counter data contained therein was all from after the asof
// date.
//
// TODO(rfindley): store the begin date in reports, so that we can
// verify this assumption.
u.logger.Printf("Uploadable: %s", fi.Name())
ans.readyfiles = append(ans.readyfiles, filepath.Join(localdir, fi.Name()))
}
} else {
// ...otherwise fall back on the old behavior of uploading all
// unuploaded files.
//
// TODO(rfindley): invert this logic following more testing. We
// should only upload if we know both the asof date and the report
// date, and they are acceptable.
u.logger.Printf("Uploadable (missing date): %s", fi.Name())
ans.readyfiles = append(ans.readyfiles, filepath.Join(localdir, fi.Name()))
}
}
}
fis, err = os.ReadDir(uploaddir)
if err != nil {
os.MkdirAll(uploaddir, 0777)
return ans
}
// There should be only one of these per day; maybe sometime
// we'll want to clean the directory.
ans.uploaded = make(map[string]bool)
for _, fi := range fis {
if strings.HasSuffix(fi.Name(), ".json") {
u.logger.Printf("Already uploaded: %s", fi.Name())
ans.uploaded[fi.Name()] = true
}
}
return ans
}

View File

@@ -0,0 +1,344 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package upload
import (
"crypto/rand"
"encoding/binary"
"encoding/json"
"fmt"
"math"
"os"
"path/filepath"
"strings"
"time"
"golang.org/x/telemetry/internal/config"
"golang.org/x/telemetry/internal/counter"
"golang.org/x/telemetry/internal/telemetry"
)
// reports generates reports from inactive count files
func (u *uploader) reports(todo *work) ([]string, error) {
if mode, _ := u.dir.Mode(); mode == "off" {
return nil, nil // no reports
}
thisInstant := u.startTime
today := thisInstant.Format(telemetry.DateOnly)
lastWeek := latestReport(todo.uploaded)
if lastWeek >= today { //should never happen
lastWeek = ""
}
u.logger.Printf("Last week: %s, today: %s", lastWeek, today)
countFiles := make(map[string][]string) // expiry date string->filenames
earliest := make(map[string]time.Time) // earliest begin time for any counter
for _, f := range todo.countfiles {
begin, end, err := u.counterDateSpan(f)
if err != nil {
// This shouldn't happen: we should have already skipped count files that
// don't contain valid start or end times.
u.logger.Printf("BUG: failed to parse expiry for collected count file: %v", err)
continue
}
if end.Before(thisInstant) {
expiry := end.Format(dateFormat)
countFiles[expiry] = append(countFiles[expiry], f)
if earliest[expiry].IsZero() || earliest[expiry].After(begin) {
earliest[expiry] = begin
}
}
}
for expiry, files := range countFiles {
if notNeeded(expiry, *todo) {
u.logger.Printf("Files for %s not needed, deleting %v", expiry, files)
// The report already exists.
// There's another check in createReport.
u.deleteFiles(files)
continue
}
fname, err := u.createReport(earliest[expiry], expiry, files, lastWeek)
if err != nil {
u.logger.Printf("Failed to create report for %s: %v", expiry, err)
continue
}
if fname != "" {
u.logger.Printf("Ready to upload: %s", filepath.Base(fname))
todo.readyfiles = append(todo.readyfiles, fname)
}
}
return todo.readyfiles, nil
}
// latestReport returns the YYYY-MM-DD of the last report uploaded
// or the empty string if there are no reports.
func latestReport(uploaded map[string]bool) string {
var latest string
for name := range uploaded {
if strings.HasSuffix(name, ".json") {
if name > latest {
latest = name
}
}
}
if latest == "" {
return ""
}
// strip off the .json
return latest[:len(latest)-len(".json")]
}
// notNeeded returns true if the report for date has already been created
func notNeeded(date string, todo work) bool {
if todo.uploaded != nil && todo.uploaded[date+".json"] {
return true
}
// maybe the report is already in todo.readyfiles
for _, f := range todo.readyfiles {
if strings.Contains(f, date) {
return true
}
}
return false
}
func (u *uploader) deleteFiles(files []string) {
for _, f := range files {
if err := os.Remove(f); err != nil {
// this could be a race condition.
// conversely, on Windows, err may be nil and
// the file not deleted if anyone has it open.
u.logger.Printf("%v failed to remove %s", err, f)
}
}
}
// createReport creates local and upload report files by
// combining all the count files for the expiryDate, and
// returns the upload report file's path.
// It may delete the count files once local and upload report
// files are successfully created.
func (u *uploader) createReport(start time.Time, expiryDate string, countFiles []string, lastWeek string) (string, error) {
uploadOK := true
mode, asof := u.dir.Mode()
if mode != "on" {
u.logger.Printf("No upload config or mode %q is not 'on'", mode)
uploadOK = false // no config, nothing to upload
}
if u.tooOld(expiryDate, u.startTime) {
u.logger.Printf("Expiry date %s is too old", expiryDate)
uploadOK = false
}
// If the mode is recorded with an asof date, don't upload if the report
// includes any data on or before the asof date.
if !asof.IsZero() && !asof.Before(start) {
u.logger.Printf("As-of date %s is not before start %s", asof, start)
uploadOK = false
}
// TODO(rfindley): check that all the x.Meta are consistent for GOOS, GOARCH, etc.
report := &telemetry.Report{
Config: u.configVersion,
X: computeRandom(), // json encodes all the bits
Week: expiryDate,
LastWeek: lastWeek,
}
if report.X > u.config.SampleRate && u.config.SampleRate > 0 {
u.logger.Printf("X: %f > SampleRate:%f, not uploadable", report.X, u.config.SampleRate)
uploadOK = false
}
var succeeded bool
for _, f := range countFiles {
fok := false
x, err := u.parseCountFile(f)
if err != nil {
u.logger.Printf("Unparseable count file %s: %v", filepath.Base(f), err)
continue
}
prog := findProgReport(x.Meta, report)
for k, v := range x.Count {
if counter.IsStackCounter(k) {
// stack
prog.Stacks[k] += int64(v)
} else {
// counter
prog.Counters[k] += int64(v)
}
succeeded = true
fok = true
}
if !fok {
u.logger.Printf("no counters found in %s", f)
}
}
if !succeeded {
return "", fmt.Errorf("none of the %d count files for %s contained counters", len(countFiles), expiryDate)
}
// 1. generate the local report
localContents, err := json.MarshalIndent(report, "", " ")
if err != nil {
return "", fmt.Errorf("failed to marshal report for %s: %v", expiryDate, err)
}
// check that the report can be read back
// TODO(pjw): remove for production?
var report2 telemetry.Report
if err := json.Unmarshal(localContents, &report2); err != nil {
return "", fmt.Errorf("failed to unmarshal local report for %s: %v", expiryDate, err)
}
var uploadContents []byte
if uploadOK {
// 2. create the uploadable version
cfg := config.NewConfig(u.config)
upload := &telemetry.Report{
Week: report.Week,
LastWeek: report.LastWeek,
X: report.X,
Config: report.Config,
}
for _, p := range report.Programs {
// does the uploadConfig want this program?
// if so, copy over the Stacks and Counters
// that the uploadConfig mentions.
if !cfg.HasGoVersion(p.GoVersion) || !cfg.HasProgram(p.Program) || !cfg.HasVersion(p.Program, p.Version) {
continue
}
x := &telemetry.ProgramReport{
Program: p.Program,
Version: p.Version,
GOOS: p.GOOS,
GOARCH: p.GOARCH,
GoVersion: p.GoVersion,
Counters: make(map[string]int64),
Stacks: make(map[string]int64),
}
upload.Programs = append(upload.Programs, x)
for k, v := range p.Counters {
if cfg.HasCounter(p.Program, k) && report.X <= cfg.Rate(p.Program, k) {
x.Counters[k] = v
}
}
// and the same for Stacks
// this can be made more efficient, when it matters
for k, v := range p.Stacks {
before, _, _ := strings.Cut(k, "\n")
if cfg.HasStack(p.Program, before) && report.X <= cfg.Rate(p.Program, before) {
x.Stacks[k] = v
}
}
}
uploadContents, err = json.MarshalIndent(upload, "", " ")
if err != nil {
return "", fmt.Errorf("failed to marshal upload report for %s: %v", expiryDate, err)
}
}
localFileName := filepath.Join(u.dir.LocalDir(), "local."+expiryDate+".json")
uploadFileName := filepath.Join(u.dir.LocalDir(), expiryDate+".json")
/* Prepare to write files */
// if either file exists, someone has been here ahead of us
// (there is still a race, but this check shortens the open window)
if _, err := os.Stat(localFileName); err == nil {
u.deleteFiles(countFiles)
return "", fmt.Errorf("local report %s already exists", localFileName)
}
if _, err := os.Stat(uploadFileName); err == nil {
u.deleteFiles(countFiles)
return "", fmt.Errorf("report %s already exists", uploadFileName)
}
// write the uploadable file
var errUpload, errLocal error
if uploadOK {
_, errUpload = exclusiveWrite(uploadFileName, uploadContents)
}
// write the local file
_, errLocal = exclusiveWrite(localFileName, localContents)
/* Wrote the files */
// even though these errors won't occur, what should happen
// if errUpload == nil and it is ok to upload, and errLocal != nil?
if errLocal != nil {
return "", fmt.Errorf("failed to write local file %s (%v)", localFileName, errLocal)
}
if errUpload != nil {
return "", fmt.Errorf("failed to write upload file %s (%v)", uploadFileName, errUpload)
}
u.logger.Printf("Created %s, deleting %d count files", filepath.Base(uploadFileName), len(countFiles))
u.deleteFiles(countFiles)
if uploadOK {
return uploadFileName, nil
}
return "", nil
}
// exclusiveWrite attempts to create filename exclusively, and if successful,
// writes content to the resulting file handle.
//
// It returns a boolean indicating whether the exclusive handle was acquired,
// and an error indicating whether the operation succeeded.
// If the file already exists, exclusiveWrite returns (false, nil).
func exclusiveWrite(filename string, content []byte) (_ bool, rerr error) {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644)
if err != nil {
if os.IsExist(err) {
return false, nil
}
return false, err
}
defer func() {
if err := f.Close(); err != nil && rerr == nil {
rerr = err
}
}()
if _, err := f.Write(content); err != nil {
return false, err
}
return true, nil
}
// return an existing ProgremReport, or create anew
func findProgReport(meta map[string]string, report *telemetry.Report) *telemetry.ProgramReport {
for _, prog := range report.Programs {
if prog.Program == meta["Program"] && prog.Version == meta["Version"] &&
prog.GoVersion == meta["GoVersion"] && prog.GOOS == meta["GOOS"] &&
prog.GOARCH == meta["GOARCH"] {
return prog
}
}
prog := telemetry.ProgramReport{
Program: meta["Program"],
Version: meta["Version"],
GoVersion: meta["GoVersion"],
GOOS: meta["GOOS"],
GOARCH: meta["GOARCH"],
Counters: make(map[string]int64),
Stacks: make(map[string]int64),
}
report.Programs = append(report.Programs, &prog)
return &prog
}
// computeRandom returns a cryptographic random float64 in the range [0, 1],
// with 52 bits of precision.
func computeRandom() float64 {
for {
b := make([]byte, 8)
_, err := rand.Read(b)
if err != nil {
panic(fmt.Sprintf("rand.Read failed: %v", err))
}
// and turn it into a float64
x := math.Float64frombits(binary.LittleEndian.Uint64(b))
if math.IsNaN(x) || math.IsInf(x, 0) {
continue
}
x = math.Abs(x)
if x < 0x1p-1000 { // avoid underflow patterns
continue
}
frac, _ := math.Frexp(x) // 52 bits of randomness
return frac*2 - 1
}
}

226
vendor/golang.org/x/telemetry/internal/upload/run.go generated vendored Normal file
View File

@@ -0,0 +1,226 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package upload
import (
"fmt"
"io"
"log"
"os"
"path"
"path/filepath"
"runtime/debug"
"strings"
"time"
"golang.org/x/telemetry/internal/configstore"
"golang.org/x/telemetry/internal/telemetry"
)
// RunConfig configures non-default behavior of a call to Run.
//
// All fields are optional, for testing or observability.
type RunConfig struct {
TelemetryDir string // if set, overrides the telemetry data directory
UploadURL string // if set, overrides the telemetry upload endpoint
LogWriter io.Writer // if set, used for detailed logging of the upload process
Env []string // if set, appended to the config download environment
StartTime time.Time // if set, overrides the upload start time
}
// Run generates and uploads reports, as allowed by the mode file.
func Run(config RunConfig) error {
defer func() {
if err := recover(); err != nil {
log.Printf("upload recover: %v", err)
}
}()
uploader, err := newUploader(config)
if err != nil {
return err
}
defer uploader.Close()
return uploader.Run()
}
// uploader encapsulates a single upload operation, carrying parameters and
// shared state.
type uploader struct {
// config is used to select counters to upload.
config *telemetry.UploadConfig //
configVersion string // version of the config
dir telemetry.Dir // the telemetry dir to process
uploadServerURL string
startTime time.Time
cache parsedCache
logFile *os.File
logger *log.Logger
}
// newUploader creates a new uploader to use for running the upload for the
// given config.
//
// Uploaders should only be used for one call to [uploader.Run].
func newUploader(rcfg RunConfig) (*uploader, error) {
// Determine the upload directory.
var dir telemetry.Dir
if rcfg.TelemetryDir != "" {
dir = telemetry.NewDir(rcfg.TelemetryDir)
} else {
dir = telemetry.Default
}
// Determine the upload URL.
uploadURL := rcfg.UploadURL
if uploadURL == "" {
uploadURL = "https://telemetry.go.dev/upload"
}
// Determine the upload logger.
//
// This depends on the provided rcfg.LogWriter and the presence of
// dir.DebugDir, as follows:
// 1. If LogWriter is present, log to it.
// 2. If DebugDir is present, log to a file within it.
// 3. If both LogWriter and DebugDir are present, log to a multi writer.
// 4. If neither LogWriter nor DebugDir are present, log to a noop logger.
var logWriters []io.Writer
logFile, err := debugLogFile(dir.DebugDir())
if err != nil {
logFile = nil
}
if logFile != nil {
logWriters = append(logWriters, logFile)
}
if rcfg.LogWriter != nil {
logWriters = append(logWriters, rcfg.LogWriter)
}
var logWriter io.Writer
switch len(logWriters) {
case 0:
logWriter = io.Discard
case 1:
logWriter = logWriters[0]
default:
logWriter = io.MultiWriter(logWriters...)
}
logger := log.New(logWriter, "", log.Ltime|log.Lmicroseconds|log.Lshortfile)
// Fetch the upload config, if it is not provided.
var (
config *telemetry.UploadConfig
configVersion string
)
if mode, _ := dir.Mode(); mode == "on" {
// golang/go#68946: only download the upload config if it will be used.
//
// TODO(rfindley): This is a narrow change aimed at minimally fixing the
// associated bug. In the future, we should read the mode only once during
// the upload process.
config, configVersion, err = configstore.Download("latest", rcfg.Env)
if err != nil {
return nil, err
}
} else {
config = &telemetry.UploadConfig{}
configVersion = "v0.0.0-0"
}
// Set the start time, if it is not provided.
startTime := time.Now().UTC()
if !rcfg.StartTime.IsZero() {
startTime = rcfg.StartTime
}
return &uploader{
config: config,
configVersion: configVersion,
dir: dir,
uploadServerURL: uploadURL,
startTime: startTime,
logFile: logFile,
logger: logger,
}, nil
}
// Close cleans up any resources associated with the uploader.
func (u *uploader) Close() error {
if u.logFile == nil {
return nil
}
return u.logFile.Close()
}
// Run generates and uploads reports
func (u *uploader) Run() error {
if telemetry.DisabledOnPlatform {
return nil
}
todo := u.findWork()
ready, err := u.reports(&todo)
if err != nil {
u.logger.Printf("Error building reports: %v", err)
return fmt.Errorf("reports failed: %v", err)
}
u.logger.Printf("Uploading %d reports", len(ready))
for _, f := range ready {
u.uploadReport(f)
}
return nil
}
// debugLogFile arranges to write a log file in the given debug directory, if
// it exists.
func debugLogFile(debugDir string) (*os.File, error) {
fd, err := os.Stat(debugDir)
if os.IsNotExist(err) {
return nil, nil
}
if err != nil {
return nil, err
}
if !fd.IsDir() {
return nil, fmt.Errorf("debug path %q is not a directory", debugDir)
}
info, ok := debug.ReadBuildInfo()
if !ok {
return nil, fmt.Errorf("no build info")
}
year, month, day := time.Now().UTC().Date()
goVers := info.GoVersion
// E.g., goVers:"go1.22-20240109-RC01 cl/597041403 +dcbe772469 X:loopvar"
words := strings.Fields(goVers)
goVers = words[0]
progPkgPath := info.Path
if progPkgPath == "" {
progPkgPath = strings.TrimSuffix(filepath.Base(os.Args[0]), ".exe")
}
prog := path.Base(progPkgPath)
progVers := info.Main.Version
if progVers == "(devel)" { // avoid special characters in created file names
progVers = "devel"
}
logBase := strings.ReplaceAll(
fmt.Sprintf("%s-%s-%s-%4d%02d%02d-%d.log", prog, progVers, goVers, year, month, day, os.Getpid()),
" ", "")
fname := filepath.Join(debugDir, logBase)
if _, err := os.Stat(fname); err == nil {
// This process previously called upload.Run
return nil, nil
}
f, err := os.OpenFile(fname, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
if os.IsExist(err) {
return nil, nil // this process previously called upload.Run
}
return nil, err
}
return f, nil
}

117
vendor/golang.org/x/telemetry/internal/upload/upload.go generated vendored Normal file
View File

@@ -0,0 +1,117 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package upload
import (
"bytes"
"net/http"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"golang.org/x/telemetry/internal/telemetry"
)
var (
dateRE = regexp.MustCompile(`(\d\d\d\d-\d\d-\d\d)[.]json$`)
dateFormat = telemetry.DateOnly
// TODO(rfindley): use dateFormat throughout.
)
// uploadReportDate returns the date component of the upload file name, or "" if the
// date was unmatched.
func (u *uploader) uploadReportDate(fname string) time.Time {
match := dateRE.FindStringSubmatch(fname)
if match == nil || len(match) < 2 {
u.logger.Printf("malformed report name: missing date: %q", filepath.Base(fname))
return time.Time{}
}
d, err := time.Parse(dateFormat, match[1])
if err != nil {
u.logger.Printf("malformed report name: bad date: %q", filepath.Base(fname))
return time.Time{}
}
return d
}
func (u *uploader) uploadReport(fname string) {
thisInstant := u.startTime
// TODO(rfindley): use uploadReportDate here, once we've done a gopls release.
// first make sure it is not in the future
today := thisInstant.Format(telemetry.DateOnly)
match := dateRE.FindStringSubmatch(fname)
if match == nil || len(match) < 2 {
u.logger.Printf("Report name %q missing date", filepath.Base(fname))
} else if match[1] > today {
u.logger.Printf("Report date for %q is later than today (%s)", filepath.Base(fname), today)
return // report is in the future, which shouldn't happen
}
buf, err := os.ReadFile(fname)
if err != nil {
u.logger.Printf("%v reading %s", err, fname)
return
}
if u.uploadReportContents(fname, buf) {
// anything left to do?
}
}
// try to upload the report, 'true' if successful
func (u *uploader) uploadReportContents(fname string, buf []byte) bool {
fdate := strings.TrimSuffix(filepath.Base(fname), ".json")
fdate = fdate[len(fdate)-len(telemetry.DateOnly):]
newname := filepath.Join(u.dir.UploadDir(), fdate+".json")
// Lock the upload, to prevent duplicate uploads.
{
lockname := newname + ".lock"
lockfile, err := os.OpenFile(lockname, os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
u.logger.Printf("Failed to acquire lock %s: %v", lockname, err)
return false
}
_ = lockfile.Close()
defer os.Remove(lockname)
}
if _, err := os.Stat(newname); err == nil {
// Another process uploaded but failed to clean up (or hasn't yet cleaned
// up). Ensure that cleanup occurs.
u.logger.Printf("After acquire: report already uploaded")
_ = os.Remove(fname)
return false
}
endpoint := u.uploadServerURL + "/" + fdate
b := bytes.NewReader(buf)
resp, err := http.Post(endpoint, "application/json", b)
if err != nil {
u.logger.Printf("Error upload %s to %s: %v", filepath.Base(fname), endpoint, err)
return false
}
// hope for a 200, remove file on a 4xx, otherwise it will be retried by another process
if resp.StatusCode != 200 {
u.logger.Printf("Failed to upload %s to %s: %s", filepath.Base(fname), endpoint, resp.Status)
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
err := os.Remove(fname)
if err == nil {
u.logger.Printf("Removed local/%s", filepath.Base(fname))
} else {
u.logger.Printf("Error removing local/%s: %v", filepath.Base(fname), err)
}
}
return false
}
// Store a copy of the uploaded report in the uploaded directory.
if err := os.WriteFile(newname, buf, 0644); err == nil {
os.Remove(fname) // if it exists
}
u.logger.Printf("Uploaded %s to %q", fdate+".json", endpoint)
return true
}