vendor: use versions when possible in glide.yaml

Now using tags instead of SHAs
This commit is contained in:
Anthony Romano
2016-12-08 14:18:33 -08:00
parent cfd10b4bbf
commit f095334788
193 changed files with 34647 additions and 6462 deletions

View File

@ -27,14 +27,7 @@ func NewFS(mountPoint string) (FS, error) {
return FS(mountPoint), nil
}
func (fs FS) stat(p string) (os.FileInfo, error) {
return os.Stat(path.Join(string(fs), p))
}
func (fs FS) open(p string) (*os.File, error) {
return os.Open(path.Join(string(fs), p))
}
func (fs FS) readlink(p string) (string, error) {
return os.Readlink(path.Join(string(fs), p))
// Path returns the path of the given subsystem relative to the procfs root.
func (fs FS) Path(p ...string) string {
return path.Join(append([]string{string(fs)}, p...)...)
}

View File

@ -8,6 +8,7 @@ import (
"io"
"io/ioutil"
"net"
"os"
"strconv"
"strings"
)
@ -58,7 +59,7 @@ func NewIPVSStats() (IPVSStats, error) {
// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
func (fs FS) NewIPVSStats() (IPVSStats, error) {
file, err := fs.open("net/ip_vs_stats")
file, err := os.Open(fs.Path("net/ip_vs_stats"))
if err != nil {
return IPVSStats{}, err
}
@ -127,7 +128,7 @@ func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
file, err := fs.open("net/ip_vs")
file, err := os.Open(fs.Path("net/ip_vs"))
if err != nil {
return nil, err
}

View File

@ -3,7 +3,6 @@ package procfs
import (
"fmt"
"io/ioutil"
"path"
"regexp"
"strconv"
"strings"
@ -32,36 +31,22 @@ type MDStat struct {
// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
mdStatusFilePath := path.Join(string(fs), "mdstat")
mdStatusFilePath := fs.Path("mdstat")
content, err := ioutil.ReadFile(mdStatusFilePath)
if err != nil {
return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
}
mdStatusFile := string(content)
lines := strings.Split(mdStatusFile, "\n")
var currentMD string
// Each md has at least the deviceline, statusline and one empty line afterwards
// so we will have probably something of the order len(lines)/3 devices
// so we use that for preallocation.
estimateMDs := len(lines) / 3
mdStates := make([]MDStat, 0, estimateMDs)
mdStates := []MDStat{}
lines := strings.Split(string(content), "\n")
for i, l := range lines {
if l == "" {
// Skip entirely empty lines.
continue
}
if l[0] == ' ' {
// Those lines are not the beginning of a md-section.
continue
}
if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
// We aren't interested in lines with general info.
continue
}
@ -69,32 +54,30 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
if len(mainLine) < 3 {
return mdStates, fmt.Errorf("error parsing mdline: %s", l)
}
currentMD = mainLine[0] // name of md-device
activityState := mainLine[2] // activity status of said md-device
mdName := mainLine[0]
activityState := mainLine[2]
if len(lines) <= i+3 {
return mdStates, fmt.Errorf("error parsing %s: entry for %s has fewer lines than expected", mdStatusFilePath, currentMD)
return mdStates, fmt.Errorf(
"error parsing %s: too few lines for md device %s",
mdStatusFilePath,
mdName,
)
}
active, total, size, err := evalStatusline(lines[i+1]) // parse statusline, always present
active, total, size, err := evalStatusline(lines[i+1])
if err != nil {
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
}
//
// Now get the number of synced blocks.
//
// Get the line number of the syncing-line.
var j int
if strings.Contains(lines[i+2], "bitmap") { // then skip the bitmap line
// j is the line number of the syncing-line.
j := i + 2
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
j = i + 3
} else {
j = i + 2
}
// If device is syncing at the moment, get the number of currently synced bytes,
// otherwise that number equals the size of the device.
// If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device.
syncedBlocks := size
if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
syncedBlocks, err = evalBuildline(lines[j])
@ -103,8 +86,14 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
}
}
mdStates = append(mdStates, MDStat{currentMD, activityState, active, total, size, syncedBlocks})
mdStates = append(mdStates, MDStat{
Name: mdName,
ActivityState: activityState,
DisksActive: active,
DisksTotal: total,
BlocksTotal: size,
BlocksSynced: syncedBlocks,
})
}
return mdStates, nil
@ -112,47 +101,38 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
func evalStatusline(statusline string) (active, total, size int64, err error) {
matches := statuslineRE.FindStringSubmatch(statusline)
// +1 to make it more obvious that the whole string containing the info is also returned as matches[0].
if len(matches) != 3+1 {
return 0, 0, 0, fmt.Errorf("unexpected number matches found in statusline: %s", statusline)
if len(matches) != 4 {
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
}
size, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline)
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline)
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline)
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
}
return active, total, size, nil
}
// Gets the size that has already been synced out of the sync-line.
func evalBuildline(buildline string) (int64, error) {
func evalBuildline(buildline string) (syncedBlocks int64, err error) {
matches := buildlineRE.FindStringSubmatch(buildline)
// +1 to make it more obvious that the whole string containing the info is also returned as matches[0].
if len(matches) < 1+1 {
return 0, fmt.Errorf("too few matches found in buildline: %s", buildline)
if len(matches) != 2 {
return 0, fmt.Errorf("unexpected buildline: %s", buildline)
}
if len(matches) > 1+1 {
return 0, fmt.Errorf("too many matches found in buildline: %s", buildline)
}
syncedSize, err := strconv.ParseInt(matches[1], 10, 64)
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
}
return syncedSize, nil
return syncedBlocks, nil
}

552
cmd/vendor/github.com/prometheus/procfs/mountstats.go generated vendored Normal file
View File

@ -0,0 +1,552 @@
package procfs
// While implementing parsing of /proc/[pid]/mountstats, this blog was used
// heavily as a reference:
// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
//
// Special thanks to Chris Siebenmann for all of his posts explaining the
// various statistics available for NFS.
import (
"bufio"
"fmt"
"io"
"strconv"
"strings"
"time"
)
// Constants shared between multiple functions.
const (
deviceEntryLen = 8
fieldBytesLen = 8
fieldEventsLen = 27
statVersion10 = "1.0"
statVersion11 = "1.1"
fieldTransport10Len = 10
fieldTransport11Len = 13
)
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
type Mount struct {
// Name of the device.
Device string
// The mount point of the device.
Mount string
// The filesystem type used by the device.
Type string
// If available additional statistics related to this Mount.
// Use a type assertion to determine if additional statistics are available.
Stats MountStats
}
// A MountStats is a type which contains detailed statistics for a specific
// type of Mount.
type MountStats interface {
mountStats()
}
// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
type MountStatsNFS struct {
// The version of statistics provided.
StatVersion string
// The age of the NFS mount.
Age time.Duration
// Statistics related to byte counters for various operations.
Bytes NFSBytesStats
// Statistics related to various NFS event occurrences.
Events NFSEventsStats
// Statistics broken down by filesystem operation.
Operations []NFSOperationStats
// Statistics about the NFS RPC transport.
Transport NFSTransportStats
}
// mountStats implements MountStats.
func (m MountStatsNFS) mountStats() {}
// A NFSBytesStats contains statistics about the number of bytes read and written
// by an NFS client to and from an NFS server.
type NFSBytesStats struct {
// Number of bytes read using the read() syscall.
Read int
// Number of bytes written using the write() syscall.
Write int
// Number of bytes read using the read() syscall in O_DIRECT mode.
DirectRead int
// Number of bytes written using the write() syscall in O_DIRECT mode.
DirectWrite int
// Number of bytes read from the NFS server, in total.
ReadTotal int
// Number of bytes written to the NFS server, in total.
WriteTotal int
// Number of pages read directly via mmap()'d files.
ReadPages int
// Number of pages written directly via mmap()'d files.
WritePages int
}
// A NFSEventsStats contains statistics about NFS event occurrences.
type NFSEventsStats struct {
// Number of times cached inode attributes are re-validated from the server.
InodeRevalidate int
// Number of times cached dentry nodes are re-validated from the server.
DnodeRevalidate int
// Number of times an inode cache is cleared.
DataInvalidate int
// Number of times cached inode attributes are invalidated.
AttributeInvalidate int
// Number of times files or directories have been open()'d.
VFSOpen int
// Number of times a directory lookup has occurred.
VFSLookup int
// Number of times permissions have been checked.
VFSAccess int
// Number of updates (and potential writes) to pages.
VFSUpdatePage int
// Number of pages read directly via mmap()'d files.
VFSReadPage int
// Number of times a group of pages have been read.
VFSReadPages int
// Number of pages written directly via mmap()'d files.
VFSWritePage int
// Number of times a group of pages have been written.
VFSWritePages int
// Number of times directory entries have been read with getdents().
VFSGetdents int
// Number of times attributes have been set on inodes.
VFSSetattr int
// Number of pending writes that have been forcefully flushed to the server.
VFSFlush int
// Number of times fsync() has been called on directories and files.
VFSFsync int
// Number of times locking has been attemped on a file.
VFSLock int
// Number of times files have been closed and released.
VFSFileRelease int
// Unknown. Possibly unused.
CongestionWait int
// Number of times files have been truncated.
Truncation int
// Number of times a file has been grown due to writes beyond its existing end.
WriteExtension int
// Number of times a file was removed while still open by another process.
SillyRename int
// Number of times the NFS server gave less data than expected while reading.
ShortRead int
// Number of times the NFS server wrote less data than expected while writing.
ShortWrite int
// Number of times the NFS server indicated EJUKEBOX; retrieving data from
// offline storage.
JukeboxDelay int
// Number of NFS v4.1+ pNFS reads.
PNFSRead int
// Number of NFS v4.1+ pNFS writes.
PNFSWrite int
}
// A NFSOperationStats contains statistics for a single operation.
type NFSOperationStats struct {
// The name of the operation.
Operation string
// Number of requests performed for this operation.
Requests int
// Number of times an actual RPC request has been transmitted for this operation.
Transmissions int
// Number of times a request has had a major timeout.
MajorTimeouts int
// Number of bytes sent for this operation, including RPC headers and payload.
BytesSent int
// Number of bytes received for this operation, including RPC headers and payload.
BytesReceived int
// Duration all requests spent queued for transmission before they were sent.
CumulativeQueueTime time.Duration
// Duration it took to get a reply back after the request was transmitted.
CumulativeTotalResponseTime time.Duration
// Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestTime time.Duration
}
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
// responses.
type NFSTransportStats struct {
// The local port used for the NFS mount.
Port int
// Number of times the client has had to establish a connection from scratch
// to the NFS server.
Bind int
// Number of times the client has made a TCP connection to the NFS server.
Connect int
// Duration (in jiffies, a kernel internal unit of time) the NFS mount has
// spent waiting for connections to the server to be established.
ConnectIdleTime int
// Duration since the NFS mount last saw any RPC traffic.
IdleTime time.Duration
// Number of RPC requests for this mount sent to the NFS server.
Sends int
// Number of RPC responses for this mount received from the NFS server.
Receives int
// Number of times the NFS server sent a response with a transaction ID
// unknown to this client.
BadTransactionIDs int
// A running counter, incremented on each request as the current difference
// ebetween sends and receives.
CumulativeActiveRequests int
// A running counter, incremented on each request by the current backlog
// queue size.
CumulativeBacklog int
// Stats below only available with stat version 1.1.
// Maximum number of simultaneously active RPC requests ever used.
MaximumRPCSlotsUsed int
// A running counter, incremented on each request as the current size of the
// sending queue.
CumulativeSendingQueue int
// A running counter, incremented on each request as the current size of the
// pending queue.
CumulativePendingQueue int
}
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
// of Mount structures containing detailed information about each mount.
// If available, statistics for each mount are parsed as well.
func parseMountStats(r io.Reader) ([]*Mount, error) {
const (
device = "device"
statVersionPrefix = "statvers="
nfs3Type = "nfs"
nfs4Type = "nfs4"
)
var mounts []*Mount
s := bufio.NewScanner(r)
for s.Scan() {
// Only look for device entries in this function
ss := strings.Fields(string(s.Bytes()))
if len(ss) == 0 || ss[0] != device {
continue
}
m, err := parseMount(ss)
if err != nil {
return nil, err
}
// Does this mount also possess statistics information?
if len(ss) > deviceEntryLen {
// Only NFSv3 and v4 are supported for parsing statistics
if m.Type != nfs3Type && m.Type != nfs4Type {
return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
}
statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
stats, err := parseMountStatsNFS(s, statVersion)
if err != nil {
return nil, err
}
m.Stats = stats
}
mounts = append(mounts, m)
}
return mounts, s.Err()
}
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
// device [device] mounted on [mount] with fstype [type]
func parseMount(ss []string) (*Mount, error) {
if len(ss) < deviceEntryLen {
return nil, fmt.Errorf("invalid device entry: %v", ss)
}
// Check for specific words appearing at specific indices to ensure
// the format is consistent with what we expect
format := []struct {
i int
s string
}{
{i: 0, s: "device"},
{i: 2, s: "mounted"},
{i: 3, s: "on"},
{i: 5, s: "with"},
{i: 6, s: "fstype"},
}
for _, f := range format {
if ss[f.i] != f.s {
return nil, fmt.Errorf("invalid device entry: %v", ss)
}
}
return &Mount{
Device: ss[1],
Mount: ss[4],
Type: ss[7],
}, nil
}
// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
// related to NFS statistics.
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
// Field indicators for parsing specific types of data
const (
fieldAge = "age:"
fieldBytes = "bytes:"
fieldEvents = "events:"
fieldPerOpStats = "per-op"
fieldTransport = "xprt:"
)
stats := &MountStatsNFS{
StatVersion: statVersion,
}
for s.Scan() {
ss := strings.Fields(string(s.Bytes()))
if len(ss) == 0 {
break
}
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
switch ss[0] {
case fieldAge:
// Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s")
if err != nil {
return nil, err
}
stats.Age = d
case fieldBytes:
bstats, err := parseNFSBytesStats(ss[1:])
if err != nil {
return nil, err
}
stats.Bytes = *bstats
case fieldEvents:
estats, err := parseNFSEventsStats(ss[1:])
if err != nil {
return nil, err
}
stats.Events = *estats
case fieldTransport:
if len(ss) < 3 {
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
}
tstats, err := parseNFSTransportStats(ss[2:], statVersion)
if err != nil {
return nil, err
}
stats.Transport = *tstats
}
// When encountering "per-operation statistics", we must break this
// loop and parse them seperately to ensure we can terminate parsing
// before reaching another device entry; hence why this 'if' statement
// is not just another switch case
if ss[0] == fieldPerOpStats {
break
}
}
if err := s.Err(); err != nil {
return nil, err
}
// NFS per-operation stats appear last before the next device entry
perOpStats, err := parseNFSOperationStats(s)
if err != nil {
return nil, err
}
stats.Operations = perOpStats
return stats, nil
}
// parseNFSBytesStats parses a NFSBytesStats line using an input set of
// integer fields.
func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
if len(ss) != fieldBytesLen {
return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
}
ns := make([]int, 0, fieldBytesLen)
for _, s := range ss {
n, err := strconv.Atoi(s)
if err != nil {
return nil, err
}
ns = append(ns, n)
}
return &NFSBytesStats{
Read: ns[0],
Write: ns[1],
DirectRead: ns[2],
DirectWrite: ns[3],
ReadTotal: ns[4],
WriteTotal: ns[5],
ReadPages: ns[6],
WritePages: ns[7],
}, nil
}
// parseNFSEventsStats parses a NFSEventsStats line using an input set of
// integer fields.
func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
if len(ss) != fieldEventsLen {
return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
}
ns := make([]int, 0, fieldEventsLen)
for _, s := range ss {
n, err := strconv.Atoi(s)
if err != nil {
return nil, err
}
ns = append(ns, n)
}
return &NFSEventsStats{
InodeRevalidate: ns[0],
DnodeRevalidate: ns[1],
DataInvalidate: ns[2],
AttributeInvalidate: ns[3],
VFSOpen: ns[4],
VFSLookup: ns[5],
VFSAccess: ns[6],
VFSUpdatePage: ns[7],
VFSReadPage: ns[8],
VFSReadPages: ns[9],
VFSWritePage: ns[10],
VFSWritePages: ns[11],
VFSGetdents: ns[12],
VFSSetattr: ns[13],
VFSFlush: ns[14],
VFSFsync: ns[15],
VFSLock: ns[16],
VFSFileRelease: ns[17],
CongestionWait: ns[18],
Truncation: ns[19],
WriteExtension: ns[20],
SillyRename: ns[21],
ShortRead: ns[22],
ShortWrite: ns[23],
JukeboxDelay: ns[24],
PNFSRead: ns[25],
PNFSWrite: ns[26],
}, nil
}
// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
// additional information about per-operation statistics until an empty
// line is reached.
func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
const (
// Number of expected fields in each per-operation statistics set
numFields = 9
)
var ops []NFSOperationStats
for s.Scan() {
ss := strings.Fields(string(s.Bytes()))
if len(ss) == 0 {
// Must break when reading a blank line after per-operation stats to
// enable top-level function to parse the next device entry
break
}
if len(ss) != numFields {
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
}
// Skip string operation name for integers
ns := make([]int, 0, numFields-1)
for _, st := range ss[1:] {
n, err := strconv.Atoi(st)
if err != nil {
return nil, err
}
ns = append(ns, n)
}
ops = append(ops, NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0],
Transmissions: ns[1],
MajorTimeouts: ns[2],
BytesSent: ns[3],
BytesReceived: ns[4],
CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
})
}
return ops, s.Err()
}
// parseNFSTransportStats parses a NFSTransportStats line using an input set of
// integer fields matched to a specific stats version.
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
switch statVersion {
case statVersion10:
if len(ss) != fieldTransport10Len {
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
}
case statVersion11:
if len(ss) != fieldTransport11Len {
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
}
default:
return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
}
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
// in a v1.0 response
ns := make([]int, 0, fieldTransport11Len)
for _, s := range ss {
n, err := strconv.Atoi(s)
if err != nil {
return nil, err
}
ns = append(ns, n)
}
return &NFSTransportStats{
Port: ns[0],
Bind: ns[1],
Connect: ns[2],
ConnectIdleTime: ns[3],
IdleTime: time.Duration(ns[4]) * time.Second,
Sends: ns[5],
Receives: ns[6],
BadTransactionIDs: ns[7],
CumulativeActiveRequests: ns[8],
CumulativeBacklog: ns[9],
MaximumRPCSlotsUsed: ns[10],
CumulativeSendingQueue: ns[11],
CumulativePendingQueue: ns[12],
}, nil
}

View File

@ -4,7 +4,6 @@ import (
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
)
@ -42,7 +41,7 @@ func NewProc(pid int) (Proc, error) {
return fs.NewProc(pid)
}
// AllProcs returns a list of all currently avaible processes under /proc.
// AllProcs returns a list of all currently available processes under /proc.
func AllProcs() (Procs, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
@ -53,7 +52,7 @@ func AllProcs() (Procs, error) {
// Self returns a process for the current process.
func (fs FS) Self() (Proc, error) {
p, err := fs.readlink("self")
p, err := os.Readlink(fs.Path("self"))
if err != nil {
return Proc{}, err
}
@ -66,15 +65,15 @@ func (fs FS) Self() (Proc, error) {
// NewProc returns a process for the given pid.
func (fs FS) NewProc(pid int) (Proc, error) {
if _, err := fs.stat(strconv.Itoa(pid)); err != nil {
if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
return Proc{}, err
}
return Proc{PID: pid, fs: fs}, nil
}
// AllProcs returns a list of all currently avaible processes.
// AllProcs returns a list of all currently available processes.
func (fs FS) AllProcs() (Procs, error) {
d, err := fs.open("")
d, err := os.Open(fs.Path())
if err != nil {
return Procs{}, err
}
@ -99,7 +98,7 @@ func (fs FS) AllProcs() (Procs, error) {
// CmdLine returns the command line of a process.
func (p Proc) CmdLine() ([]string, error) {
f, err := p.open("cmdline")
f, err := os.Open(p.path("cmdline"))
if err != nil {
return nil, err
}
@ -117,10 +116,25 @@ func (p Proc) CmdLine() ([]string, error) {
return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
}
// Comm returns the command name of a process.
func (p Proc) Comm() (string, error) {
f, err := os.Open(p.path("comm"))
if err != nil {
return "", err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
return strings.TrimSpace(string(data)), nil
}
// Executable returns the absolute path of the executable command of a process.
func (p Proc) Executable() (string, error) {
exe, err := p.readlink("exe")
exe, err := os.Readlink(p.path("exe"))
if os.IsNotExist(err) {
return "", nil
}
@ -158,7 +172,7 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
targets := make([]string, len(names))
for i, name := range names {
target, err := p.readlink("fd/" + name)
target, err := os.Readlink(p.path("fd", name))
if err == nil {
targets[i] = target
}
@ -178,8 +192,20 @@ func (p Proc) FileDescriptorsLen() (int, error) {
return len(fds), nil
}
// MountStats retrieves statistics and configuration for mount points in a
// process's namespace.
func (p Proc) MountStats() ([]*Mount, error) {
f, err := os.Open(p.path("mountstats"))
if err != nil {
return nil, err
}
defer f.Close()
return parseMountStats(f)
}
func (p Proc) fileDescriptors() ([]string, error) {
d, err := p.open("fd")
d, err := os.Open(p.path("fd"))
if err != nil {
return nil, err
}
@ -193,10 +219,6 @@ func (p Proc) fileDescriptors() ([]string, error) {
return names, nil
}
func (p Proc) open(pa string) (*os.File, error) {
return p.fs.open(path.Join(strconv.Itoa(p.PID), pa))
}
func (p Proc) readlink(pa string) (string, error) {
return p.fs.readlink(path.Join(strconv.Itoa(p.PID), pa))
func (p Proc) path(pa ...string) string {
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
}

View File

@ -3,6 +3,7 @@ package procfs
import (
"fmt"
"io/ioutil"
"os"
)
// ProcIO models the content of /proc/<pid>/io.
@ -29,7 +30,7 @@ type ProcIO struct {
func (p Proc) NewIO() (ProcIO, error) {
pio := ProcIO{}
f, err := p.open("io")
f, err := os.Open(p.path("io"))
if err != nil {
return pio, err
}

View File

@ -3,29 +3,56 @@ package procfs
import (
"bufio"
"fmt"
"os"
"regexp"
"strconv"
)
// ProcLimits represents the soft limits for each of the process's resource
// limits.
// limits. For more information see getrlimit(2):
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
type ProcLimits struct {
CPUTime int
FileSize int
DataSize int
StackSize int
CoreFileSize int
ResidentSet int
Processes int
OpenFiles int
LockedMemory int
AddressSpace int
FileLocks int
PendingSignals int
MsqqueueSize int
NicePriority int
// CPU time limit in seconds.
CPUTime int
// Maximum size of files that the process may create.
FileSize int
// Maximum size of the process's data segment (initialized data,
// uninitialized data, and heap).
DataSize int
// Maximum size of the process stack in bytes.
StackSize int
// Maximum size of a core file.
CoreFileSize int
// Limit of the process's resident set in pages.
ResidentSet int
// Maximum number of processes that can be created for the real user ID of
// the calling process.
Processes int
// Value one greater than the maximum file descriptor number that can be
// opened by this process.
OpenFiles int
// Maximum number of bytes of memory that may be locked into RAM.
LockedMemory int
// Maximum size of the process's virtual memory address space in bytes.
AddressSpace int
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
// this process may establish.
FileLocks int
// Limit of signals that may be queued for the real user ID of the calling
// process.
PendingSignals int
// Limit on the number of bytes that can be allocated for POSIX message
// queues for the real user ID of the calling process.
MsqqueueSize int
// Limit of the nice priority set using setpriority(2) or nice(2).
NicePriority int
// Limit of the real-time priority set using sched_setscheduler(2) or
// sched_setparam(2).
RealtimePriority int
RealtimeTimeout int
// Limit (in microseconds) on the amount of CPU time that a process
// scheduled under a real-time scheduling policy may consume without making
// a blocking system call.
RealtimeTimeout int
}
const (
@ -39,7 +66,7 @@ var (
// NewLimits returns the current soft limits of the process.
func (p Proc) NewLimits() (ProcLimits, error) {
f, err := p.open("limits")
f, err := os.Open(p.path("limits"))
if err != nil {
return ProcLimits{}, err
}
@ -60,7 +87,7 @@ func (p Proc) NewLimits() (ProcLimits, error) {
case "Max cpu time":
l.CPUTime, err = parseInt(fields[1])
case "Max file size":
l.FileLocks, err = parseInt(fields[1])
l.FileSize, err = parseInt(fields[1])
case "Max data size":
l.DataSize, err = parseInt(fields[1])
case "Max stack size":
@ -90,7 +117,6 @@ func (p Proc) NewLimits() (ProcLimits, error) {
case "Max realtime timeout":
l.RealtimeTimeout, err = parseInt(fields[1])
}
if err != nil {
return ProcLimits{}, err
}

View File

@ -7,15 +7,15 @@ import (
"os"
)
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call which
// required cgo. However, that caused a lot of problems regarding
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
// which required cgo. However, that caused a lot of problems regarding
// cross-compilation. Alternatives such as running a binary to determine the
// value, or trying to derive it in some other way were all problematic.
// After much research it was determined that USER_HZ is actually hardcoded to
// 100 on all Go-supported platforms as of the time of this writing. This is
// why we decided to hardcode it here as well. It is not impossible that there
// could be systems with exceptions, but they should be very exotic edge cases,
// and in that case, the worst outcome will be two misreported metrics.
// value, or trying to derive it in some other way were all problematic. After
// much research it was determined that USER_HZ is actually hardcoded to 100 on
// all Go-supported platforms as of the time of this writing. This is why we
// decided to hardcode it here as well. It is not impossible that there could
// be systems with exceptions, but they should be very exotic edge cases, and
// in that case, the worst outcome will be two misreported metrics.
//
// See also the following discussions:
//
@ -91,7 +91,7 @@ type ProcStat struct {
// NewStat returns the current status information of the process.
func (p Proc) NewStat() (ProcStat, error) {
f, err := p.open("stat")
f, err := os.Open(p.path("stat"))
if err != nil {
return ProcStat{}, err
}

View File

@ -3,6 +3,7 @@ package procfs
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
@ -25,7 +26,7 @@ func NewStat() (Stat, error) {
// NewStat returns an information about current kernel/system statistics.
func (fs FS) NewStat() (Stat, error) {
f, err := fs.open("stat")
f, err := os.Open(fs.Path("stat"))
if err != nil {
return Stat{}, err
}