collectors

package
v0.0.0-...-a19e79c Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Oct 6, 2015 License: MIT Imports: 49 Imported by: 0

Documentation

Index

Constants

View Source
const (
	VRRPInstanceTable = ".1.3.6.1.4.1.9586.100.5.2.3.1"
	VRRPAddressTable  = ".1.3.6.1.4.1.9586.100.5.2.6.1"
)
View Source
const (
	DescRmqBackingQueueStatusAvgAckEgressRate  = "Rate at which unacknowledged message records leave RAM, e.g. because acks arrive or unacked messages are paged out"
	DescRmqBackingQueueStatusAvgAckIngressRate = "Rate at which unacknowledged message records enter RAM, e.g. because messages are delivered requiring acknowledgement"
	DescRmqBackingQueueStatusAvgEgressRate     = "Average egress (outbound) rate, not including messages that straight through to auto-acking consumers."
	DescRmqBackingQueueStatusAvgIngressRate    = "Average ingress (inbound) rate, not including messages that straight through to auto-acking consumers."
	DescRmqBackingQueueStatusLen               = "Total backing queue length."
	DescRmqConsumers                           = "Number of consumers."
	DescRmqConsumerUtilisation                 = "" /* 203-byte string literal not displayed */
	DescRmqDiskFreeAlarm                       = "Whether the disk alarm has gone off."
	DescRmqDiskFree                            = "Disk free space in bytes."
	DescRmqDiskFreeLimit                       = "Point at which the disk alarm will go off."
	DescRmqDownSlaveNodes                      = "Count of down nodes having a copy of the queue."
	DescRmqFDTotal                             = "File descriptors available."
	DescRmqFDUsed                              = "Used file descriptors."
	DescRmqIOReadAvgTime                       = "Average wall time (milliseconds) for each disk read operation in the last statistics interval."
	DescRmqIOReadBytes                         = "Total number of bytes read from disk by the persister."
	DescRmqIOReadCount                         = "Total number of read operations by the persister."
	DescRmqIOReopenCount                       = "" /* 248-byte string literal not displayed */
	DescRmqIOSeekAvgTime                       = "Average wall time (milliseconds) for each seek operation in the last statistics interval."
	DescRmqIOSeekCount                         = "Total number of seek operations by the persister."
	DescRmqIOSyncAvgTime                       = "Average wall time (milliseconds) for each sync operation in the last statistics interval."
	DescRmqIOSyncCount                         = "Total number of fsync() operations by the persister."
	DescRmqIOWriteAvgTime                      = "Average wall time (milliseconds) for each write operation in the last statistics interval."
	DescRmqIOWriteBytes                        = "Total number of bytes written to disk by the persister."
	DescRmqIOWriteCount                        = "Total number of write operations by the persister."
	DescRmqMemAlarm                            = ""
	DescRmqMemLimit                            = "Point at which the memory alarm will go off."
	DescRmqMemory                              = "Bytes of memory consumed by the Erlang process associated with the queue, including stack, heap and internal structures."
	DescRmqMemUsed                             = "Memory used in bytes."
	DescRmqMessageBytesPersistent              = "Like messageBytes but counting only those messages which are persistent."
	DescRmqMessageBytesRAM                     = "Like messageBytes but counting only those messages which are in RAM."
	DescRmqMessageBytesReady                   = "Like messageBytes but counting only those messages ready to be delivered to clients."
	DescRmqMessageBytes                        = "" /* 133-byte string literal not displayed */
	DescRmqMessageBytesUnacknowledged          = "Like messageBytes but counting only those messages delivered to clients but not yet acknowledged."
	DescRmqMessagesPersistent                  = "Total number of persistent messages in the queue (will always be 0 for transient queues)."
	DescRmqMessagesRAM                         = "Total number of messages which are resident in ram."
	DescRmqMessagesReady                       = "Number of messages ready to be delivered to clients."
	DescRmqMessagesReadyRAM                    = "Number of messages from messagesReady which are resident in ram."
	DescRmqMessages                            = "Sum of ready and unacknowledged messages (queue depth)."
	DescRmqMessageStatsAck                     = "Count of acknowledged messages."
	DescRmqMessageStatsConfirm                 = "Count of messages confirmed."
	DescRmqMessageStatsDeliver                 = "Count of messages delivered in acknowledgement mode to consumers."
	DescRmqMessageStatsDeliverGet              = "Sum of deliver, deliverNoack, get, getNoack."
	DescRmqMessageStatsDeliverNoAck            = "Count of messages delivered in no-acknowledgement mode to consumers."
	DescRmqMessageStatsGet                     = "Count of messages delivered in acknowledgement mode in response to basic.get."
	DescRmqMessageStatsGetNoack                = "Count of messages delivered in no-acknowledgement mode in response to basic.get."
	DescRmqMessageStatsPublish                 = "Count of messages published."
	DescRmqMessageStatsPublishIn               = "Count of messages published \"in\" to an exchange, i.e. not taking account of routing."
	DescRmqMessageStatsPublishOut              = "Count of messages published \"out\" of an exchange, i.e. taking account of routing."
	DescRmqMessageStatsRedeliver               = "Count of subset of messages in deliverGet which had the redelivered flag set."
	DescRmqMessageStatsReturn                  = "Count of messages returned to publisher as unroutable."
	DescRmqMessagesUnacknowledged              = "Number of messages delivered to clients but not yet acknowledged."
	DescRmqMessagesUnacknowledgedRAM           = "Number of messages from messagesUnacknowledged which are resident in ram."
	DescRmqMnesiaDiskTxCount                   = "" /* 180-byte string literal not displayed */
	DescRmqMnesiaRAMTxCount                    = "" /* 189-byte string literal not displayed */
	DescRmqMsgStoreReadCount                   = "Number of messages which have been read from the message store."
	DescRmqMsgStoreWriteCount                  = "Number of messages which have been written to the message store."
	DescRmqObjecttotalsChannels                = "Overall number of channels."
	DescRmqObjectTotalsConnections             = "Overall number of connections."
	DescRmqObjectTotalsConsumers               = "Overall number of consumers."
	DescRmqObjectTotalsExchanges               = "Overall number of exchanges."
	DescRmqObjectTotalsQueues                  = "Overall number of queues."
	DescRmqPartitions                          = "Count of network partitions this node is seeing."
	DescRmqProcessors                          = "Number of cores detected and usable by Erlang."
	DescRmqProcTotal                           = "Maximum number of Erlang processes."
	DescRmqProcUsed                            = "Number of Erlang processes in use."
	DescRmqQueueIndexJournalWriteCount         = "" /* 179-byte string literal not displayed */
	DescRmqQueueIndexReadCount                 = "Number of records read from the queue index."
	DescRmqQueueIndexWriteCount                = "Number of records written to the queue index."
	DescRmqQueueTotalsMessages                 = "Overall sum of ready and unacknowledged messages (queue depth)."
	DescRmqQueueTotalsMessagesReady            = "Overall number of messages ready to be delivered to clients."
	DescRmqQueueTotalsMessagesUnacknowledged   = "Overall number of messages delivered to clients but not yet acknowledged."
	DescRmqRunning                             = "Boolean for whether this node is up. Obviously if this is false, most other stats will be missing."
	DescRmqRunQueue                            = "Average number of Erlang processes waiting to run."
	DescRmqSlaveNodes                          = "Count of nodes having a copy of the queue."
	DescRmqSocketsTotal                        = "File descriptors available for use as sockets."
	DescRmqSocketsUsed                         = "File descriptors used as sockets."
	DescRmqState                               = "The state of the queue. Unknown=> -1, Running=> 0, Syncing=> 1, Flow=> 2, Down=> 3"
	DescRmqSynchronisedSlaveNodes              = "Count of nodes having synchronised copy of the queue."
	DescRmqSyncMessages                        = "Count of already synchronised messages on a slave node."
	DescRmqUptime                              = "Node uptime in seconds."
)

Variables

View Source
var (
	// DefaultFreq is the duration between collection intervals if none is
	// specified.
	DefaultFreq = time.Second * 15

	AddTags opentsdb.TagSet

	AddProcessDotNetConfig = func(params conf.ProcessDotNet) error {
		return fmt.Errorf("process_dotnet watching not implemented on this platform")
	}
	WatchProcessesDotNet = func() {}

	KeepalivedCommunity = ""
)
View Source
var CPU_FIELDS = []string{
	"user",
	"nice",
	"system",
	"idle",
	"iowait",
	"irq",
	"softirq",
	"steal",
	"guest",
	"guest_nice",
}

Functions

func AWS

func AWS(accessKey, secretKey, region string) error

func Add

func Add(md *opentsdb.MultiDataPoint, name string, value interface{}, t opentsdb.TagSet, rate metadata.RateType, unit metadata.Unit, desc string)

Add appends a new data point with given metric name, value, and tags. Tags may be nil. If tags is nil or does not contain a host key, it will be automatically added. If the value of the host key is the empty string, it will be removed (use this to prevent the normal auto-adding of the host tag).

func AddElasticIndexFilter

func AddElasticIndexFilter(s string) error

func AddProcessConfig

func AddProcessConfig(params conf.ProcessParams) error

func AddTS

func AddTS(md *opentsdb.MultiDataPoint, name string, ts int64, value interface{}, t opentsdb.TagSet, rate metadata.RateType, unit metadata.Unit, desc string)

AddTS is the same as Add but lets you specify the timestamp

func GenericSnmp

func GenericSnmp(cfg conf.SNMP, mib conf.MIB) (opentsdb.MultiDataPoint, error)

func HTTPUnitHiera

func HTTPUnitHiera(filename string) error

func HTTPUnitPlans

func HTTPUnitPlans(name string, plans *httpunit.Plans)

func HTTPUnitTOML

func HTTPUnitTOML(filename string) error

func ICMP

func ICMP(host string) error

ICMP registers an ICMP collector a given host.

func Init

func Init(c *conf.Conf)

func InitFake

func InitFake(fake int)

func InitPrograms

func InitPrograms(cpath string)

func IsAlNum

func IsAlNum(s string) bool

IsAlNum returns true if s is alphanumeric.

func IsDigit

func IsDigit(s string) bool

IsDigit returns true if s consists of decimal digits.

func RabbitMQ

func RabbitMQ(url string) error

RabbitMQ registers a RabbitMQ collector.

func Riak

func Riak(s string) error

func Run

func Run(cs []Collector) (chan *opentsdb.DataPoint, chan struct{})

Run runs specified collectors. Use nil for all collectors.

func SNMP

func SNMP(cfg conf.SNMP, mibs map[string]conf.MIB) error

func SNMPBridge

func SNMPBridge(cfg conf.SNMP)

SNMP Bridge registers

func SNMPCisco

func SNMPCisco(cfg conf.SNMP)

SNMPCisco registers a SNMP CISCO collector for the given community and host.

func SNMPIfaces

func SNMPIfaces(cfg conf.SNMP)

SNMPIfaces registers a SNMP Interfaces collector for the given community and host.

func TSys100NStoEpoch

func TSys100NStoEpoch(nsec uint64) int64

func Vsphere

func Vsphere(user, pwd, host string) error

Vsphere registers a vSphere collector.

func WatchProcesses

func WatchProcesses()

Types

type Collector

type Collector interface {
	Run(chan<- *opentsdb.DataPoint, <-chan struct{})
	Name() string
	Init()
}
func Search(s []string) []Collector

Search returns all collectors matching the pattern s.

type ElasticIndex

type ElasticIndex struct {
	Primaries ElasticIndexDetails `json:"primaries"`
	Total     ElasticIndexDetails `json:"total"`
}

type ElasticIndexDetails

type ElasticIndexDetails struct {
	Completion struct {
		SizeInBytes float64 `json:"size_in_bytes"`
	} `json:"completion"`
	Docs struct {
		Count   float64 `json:"count"`
		Deleted float64 `json:"deleted"`
	} `json:"docs"`
	Fielddata struct {
		Evictions         float64 `json:"evictions"`
		MemorySizeInBytes float64 `json:"memory_size_in_bytes"`
	} `json:"fielddata"`
	FilterCache struct {
		Evictions         float64 `json:"evictions"`
		MemorySizeInBytes float64 `json:"memory_size_in_bytes"`
	} `json:"filter_cache"`
	Flush struct {
		Total             float64 `json:"total"`
		TotalTimeInMillis float64 `json:"total_time_in_millis"`
	} `json:"flush"`
	Get struct {
		Current             float64 `json:"current"`
		ExistsTimeInMillis  float64 `json:"exists_time_in_millis"`
		ExistsTotal         float64 `json:"exists_total"`
		MissingTimeInMillis float64 `json:"missing_time_in_millis"`
		MissingTotal        float64 `json:"missing_total"`
		TimeInMillis        float64 `json:"time_in_millis"`
		Total               float64 `json:"total"`
	} `json:"get"`
	IDCache struct {
		MemorySizeInBytes float64 `json:"memory_size_in_bytes"`
	} `json:"id_cache"`
	Indexing struct {
		DeleteCurrent      float64 `json:"delete_current"`
		DeleteTimeInMillis float64 `json:"delete_time_in_millis"`
		DeleteTotal        float64 `json:"delete_total"`
		IndexCurrent       float64 `json:"index_current"`
		IndexTimeInMillis  float64 `json:"index_time_in_millis"`
		IndexTotal         float64 `json:"index_total"`
	} `json:"indexing"`
	Merges struct {
		Current            float64 `json:"current"`
		CurrentDocs        float64 `json:"current_docs"`
		CurrentSizeInBytes float64 `json:"current_size_in_bytes"`
		Total              float64 `json:"total"`
		TotalDocs          float64 `json:"total_docs"`
		TotalSizeInBytes   float64 `json:"total_size_in_bytes"`
		TotalTimeInMillis  float64 `json:"total_time_in_millis"`
	} `json:"merges"`
	Percolate struct {
		Current           float64 `json:"current"`
		MemorySize        string  `json:"memory_size"`
		MemorySizeInBytes float64 `json:"memory_size_in_bytes"`
		Queries           float64 `json:"queries"`
		TimeInMillis      float64 `json:"time_in_millis"`
		Total             float64 `json:"total"`
	} `json:"percolate"`
	Refresh struct {
		Total             float64 `json:"total"`
		TotalTimeInMillis float64 `json:"total_time_in_millis"`
	} `json:"refresh"`
	Search struct {
		FetchCurrent      float64 `json:"fetch_current"`
		FetchTimeInMillis float64 `json:"fetch_time_in_millis"`
		FetchTotal        float64 `json:"fetch_total"`
		OpenContexts      float64 `json:"open_contexts"`
		QueryCurrent      float64 `json:"query_current"`
		QueryTimeInMillis float64 `json:"query_time_in_millis"`
		QueryTotal        float64 `json:"query_total"`
	} `json:"search"`
	Segments struct {
		Count         float64 `json:"count"`
		MemoryInBytes float64 `json:"memory_in_bytes"`
	} `json:"segments"`
	Store struct {
		SizeInBytes          float64 `json:"size_in_bytes"`
		ThrottleTimeInMillis float64 `json:"throttle_time_in_millis"`
	} `json:"store"`
	Suggest struct {
		Current      float64 `json:"current"`
		TimeInMillis float64 `json:"time_in_millis"`
		Total        float64 `json:"total"`
	} `json:"suggest"`
	Translog struct {
		Operations  float64 `json:"operations"`
		SizeInBytes float64 `json:"size_in_bytes"`
	} `json:"translog"`
	Warmer struct {
		Current           float64 `json:"current"`
		Total             float64 `json:"total"`
		TotalTimeInMillis float64 `json:"total_time_in_millis"`
	} `json:"warmer"`
}

type ElasticIndexHealth

type ElasticIndexHealth struct {
	ActivePrimaryShards float64 `json:"active_primary_shards"`
	ActiveShards        float64 `json:"active_shards"`
	InitializingShards  float64 `json:"initializing_shards"`
	NumberOfReplicas    float64 `json:"number_of_replicas"`
	NumberOfShards      float64 `json:"number_of_shards"`
	RelocatingShards    float64 `json:"relocating_shards"`
	Status              string  `json:"status"`
	UnassignedShards    float64 `json:"unassigned_shards"`
}

type ElasticIndexStats

type ElasticIndexStats struct {
	All    ElasticIndex `json:"_all"`
	Shards struct {
		Failed     float64 `json:"failed"`
		Successful float64 `json:"successful"`
		Total      float64 `json:"total"`
	} `json:"_shards"`
	Indices map[string]ElasticIndex `json:"indices"`
}

type ElasticIndicesHealth

type ElasticIndicesHealth struct {
	ActivePrimaryShards float64                       `json:"active_primary_shards"`
	ActiveShards        float64                       `json:"active_shards"`
	ClusterName         string                        `json:"cluster_name"`
	Indices             map[string]ElasticIndexHealth `json:"indices"`
	InitializingShards  float64                       `json:"initializing_shards"`
	NumberOfDataNodes   float64                       `json:"number_of_data_nodes"`
	NumberOfNodes       float64                       `json:"number_of_nodes"`
	RelocatingShards    float64                       `json:"relocating_shards"`
	Status              string                        `json:"status"`
	TimedOut            bool                          `json:"timed_out"`
	UnassignedShards    float64                       `json:"unassigned_shards"`
}

type HostSystemIdentificationInfo

type HostSystemIdentificationInfo struct {
	IdentiferValue string `xml:"identifierValue"`
	IdentiferType  struct {
		Label   string `xml:"label"`
		Summary string `xml:"summary"`
		Key     string `xml:"key"`
	} `xml:"identifierType"`
}

type IntervalCollector

type IntervalCollector struct {
	F        func() (opentsdb.MultiDataPoint, error)
	Interval time.Duration // defaults to DefaultFreq if unspecified
	Enable   func() bool

	// internal use
	sync.Mutex
	// contains filtered or unexported fields
}

func (*IntervalCollector) Enabled

func (c *IntervalCollector) Enabled() bool

func (*IntervalCollector) Init

func (c *IntervalCollector) Init()

func (*IntervalCollector) Name

func (c *IntervalCollector) Name() string

func (*IntervalCollector) Run

func (c *IntervalCollector) Run(dpchan chan<- *opentsdb.DataPoint, quit <-chan struct{})

type MetricMeta

type MetricMeta struct {
	Metric   string
	TagSet   opentsdb.TagSet
	RateType metadata.RateType
	Unit     metadata.Unit
	Desc     string
}

type MetricMetaHAProxy

type MetricMetaHAProxy struct {
	Name   string
	Ignore bool
	MetricMeta
}

MetricMetaHAProxy is a super-structure which adds a friendly Name, as well as an indicator on if a metric is to be ignored.

type PRSummary

type PRSummary struct {
	Changes struct {
		Total float64 `yaml:"total"`
	} `yaml:"changes"`
	Events struct {
		Failure float64 `yaml:"failure"`
		Success float64 `yaml:"success"`
		Total   float64 `yaml:"total"`
	} `yaml:"events"`
	Resources struct {
		Changed         float64 `yaml:"changed"`
		Failed          float64 `yaml:"failed"`
		FailedToRestart float64 `yaml:"failed_to_restart"`
		OutOfSync       float64 `yaml:"out_of_sync"`
		Restarted       float64 `yaml:"restarted"`
		Scheduled       float64 `yaml:"scheduled"`
		Skipped         float64 `yaml:"skipped"`
		Total           float64 `yaml:"total"`
	} `yaml:"resources"`
	Time    map[string]string `yaml:"time"`
	Version struct {
		Config string `yaml:"config"`
		Puppet string `yaml:"puppet"`
	} `yaml:"version"`
}

type Process

type Process struct {
	Pid       string
	Command   string
	Arguments string
}

type ProgramCollector

type ProgramCollector struct {
	Path     string
	Interval time.Duration
}

func (*ProgramCollector) Init

func (c *ProgramCollector) Init()

func (*ProgramCollector) Name

func (c *ProgramCollector) Name() string

func (*ProgramCollector) Run

func (c *ProgramCollector) Run(dpchan chan<- *opentsdb.DataPoint, quit <-chan struct{})

type TeamPort

type TeamPort struct {
	Ifinfo struct {
		DevAddr    string  `json:"dev_addr"`
		DevAddrLen float64 `json:"dev_addr_len"`
		Ifindex    float64 `json:"ifindex"`
		Ifname     string  `json:"ifname"`
	}
	Link struct {
		Duplex string  `json:"duplex"`
		Speed  float64 `json:"speed"`
		Up     bool    `json:"up"`
	} `json:"link"`
	LinkWatches struct {
		List struct {
			LinkWatch0 struct {
				DelayDown float64 `json:"delay_down"`
				DelayUp   float64 `json:"delay_up"`
				Name      string  `json:"name"`
				Up        bool    `json:"up"`
			} `json:"link_watch_0"`
		} `json:"list"`
		Up bool `json:"up"`
	} `json:"link_watches"`
	Runner struct {
		ActorLacpduInfo struct {
			Key            float64 `json:"key"`
			Port           float64 `json:"port"`
			PortPriority   float64 `json:"port_priority"`
			State          float64 `json:"state"`
			System         string  `json:"system"`
			SystemPriority float64 `json:"system_priority"`
		} `json:"actor_lacpdu_info"`
		Aggregator struct {
			ID       float64 `json:"id"`
			Selected bool    `json:"selected"`
		} `json:"aggregator"`
		Key               float64 `json:"key"`
		PartnerLacpduInfo struct {
			Key            float64 `json:"key"`
			Port           float64 `json:"port"`
			PortPriority   float64 `json:"port_priority"`
			State          float64 `json:"state"`
			System         string  `json:"system"`
			SystemPriority float64 `json:"system_priority"`
		} `json:"partner_lacpdu_info"`
		Prio     float64 `json:"prio"`
		Selected bool    `json:"selected"`
		State    string  `json:"state"`
	} `json:"runner"`
}

type TeamState

type TeamState struct {
	TeamPorts map[string]TeamPort `json:"ports"`
	Runner    struct {
		Active       bool    `json:"active"`
		FastRate     bool    `json:"fast_rate"`
		SelectPolicy string  `json:"select_policy"`
		SysPrio      float64 `json:"sys_prio"`
	} `json:"runner"`
	Setup struct {
		Daemonized         bool    `json:"daemonized"`
		DbusEnabled        bool    `json:"dbus_enabled"`
		DebugLevel         float64 `json:"debug_level"`
		KernelTeamModeName string  `json:"kernel_team_mode_name"`
		Pid                float64 `json:"pid"`
		PidFile            string  `json:"pid_file"`
		RunnerName         string  `json:"runner_name"`
		ZmqEnabled         bool    `json:"zmq_enabled"`
	} `json:"setup"`
	TeamDevice struct {
		Ifinfo struct {
			DevAddr    string  `json:"dev_addr"`
			DevAddrLen float64 `json:"dev_addr_len"`
			Ifindex    float64 `json:"ifindex"`
			Ifname     string  `json:"ifname"`
		} `json:"ifinfo"`
	} `json:"team_device"`
}

type VRRPAddressEntry

type VRRPAddressEntry struct {
	VRRPAddressIndex       int64
	VRRPAddressType        int64
	VRRPAddressValue       string `snmp:"octet"`
	VRRPAddressBroadcast   string `snmp:"octet"`
	VRRPAddressMask        int64
	VRRPAddressScope       int64
	VRRPAddressIfIndex     int64
	VRRPAddressIfName      string
	VRRPAddressIfAlias     string
	VRRPAddressStatus      int64
	VRRPAddressAdvertising int64
}

type VRRPInstanceEntry

type VRRPInstanceEntry struct {
	VInstanceIndex             int64
	VInstanceName              string
	VInstanceVirtualRouterId   int64
	VInstanceState             int64
	VInstanceInitialState      int64
	VInstanceWantedState       int64
	VInstanceBasePriority      int64
	VInstanceEffectivePriority int64
	VInstanceVipsStatus        int64
	VInstancePrimaryInterface  string
	VInstanceTrackPrimaryIf    int64
	VInstanceAdvertisementsInt int64
	VInstancePreempt           int64
	VInstancePreemptDelay      int64
	VInstanceAuthType          int64
	VInstanceLvsSyncDaemon     int64
	VInstanceLvsSyncInterface  string
	VInstanceSyncGroup         string
	VInstanceGarpDelay         int64
	VInstanceSmtpAlert         int64
	VInstanceNotifyExec        int64
	VInstanceScriptMaster      string
	VInstanceScriptBackup      string
	VInstanceScriptFault       string
	VInstanceScriptStop        string
	VInstanceScript            string
}

type WatchedProc

type WatchedProc struct {
	Command      string
	Name         string
	IncludeCount bool
	Processes    map[string]int
	ArgMatch     *regexp.Regexp
	// contains filtered or unexported fields
}

func NewWatchedProc

func NewWatchedProc(params conf.ProcessParams) (*WatchedProc, error)

NewWatchedProc takes a configuration block [Process] from conf

func (*WatchedProc) Check

func (w *WatchedProc) Check(procs []*Process)

Check finds all matching processes and assigns them a new unique id.

func (*WatchedProc) Remove

func (w *WatchedProc) Remove(pid string)

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL