- Repository
- Munin (2.0)
- Last change
- 2020-05-27
- Graph Categories
- Family
- auto
- Capabilities
- Language
- Perl
- License
- GPL-2.0-only
postfix_mailvolume
Name
postfix_mailvolume - Plugin to monitor the volume of mails delivered by postfix.
Applicable Systems
Any postfix.
Configuration
The following shows the default configuration.
[postfix*]
env.logdir /var/log
env.logfile syslog
Interpretation
The plugin shows the number of bytes of mail that has passed through the postfix installation.
Magic Markers
#%# family=auto
#%# capabilities=autoconf
Bugs
None known
Version
v1.1 2018-03-24 * calculate extra field for mail volume that is actually delivered (“volume_delivered”)
Author
Copyright (C) 2002-2008.
No author is documented.
License
GPLv2
#!@@PERL@@ -w
# -*- perl -*-
=head1 NAME
postfix_mailvolume - Plugin to monitor the volume of mails delivered
by postfix.
=head1 APPLICABLE SYSTEMS
Any postfix.
=head1 CONFIGURATION
The following shows the default configuration.
[postfix*]
env.logdir /var/log
env.logfile syslog
=head1 INTERPRETATION
The plugin shows the number of bytes of mail that has passed through
the postfix installation.
=head1 MAGIC MARKERS
#%# family=auto
#%# capabilities=autoconf
=head1 BUGS
None known
=head1 VERSION
v1.1 2018-03-24
* calculate extra field for mail volume that is actually delivered ("volume_delivered")
=head1 AUTHOR
Copyright (C) 2002-2008.
No author is documented.
=head1 LICENSE
GPLv2
=cut
use strict;
use warnings;
use Munin::Plugin;
my $pos = undef;
# the volume that was actually delivered
my $volume_delivered = 0;
my %volumes_per_queue_id = ();
my $serialized_volumes_queue;
my %expired_queue_ids = ();
# Discard old queue IDs after a while (otherwise the state storage grows infinitely). We need to
# store the IDs long enough for the gap between two delivery attempts. Thus multiple hours are
# recommended.
use constant queue_id_expiry => 6 * 3600;
my $LOGDIR = $ENV{'logdir'} || '/var/log';
my $LOGFILE = $ENV{'logfile'} || 'syslog';
sub parseLogfile {
my ($fname, $start) = @_;
my ($LOGFILE, $rotated) = tail_open($fname, $start || 0);
while (my $line = <$LOGFILE>) {
chomp ($line);
if ($line =~ /qmgr.*: ([0-9A-Za-z]+): from=.*, size=([0-9]+)/) {
# The line with queue ID and size may pass along multiple times (every time the mail
# is moved into the active queue for another delivery attempt). The size should always
# be the same.
if (not exists($volumes_per_queue_id{$1})) {
$volumes_per_queue_id{$1} = {timestamp => time};
}
# probably it is the same value as before
$volumes_per_queue_id{$1}->{size} = $2;
} elsif ($line =~ / ([0-9A-Za-z]+): to=.*, status=sent /) {
# The "sent" line is repeated for every successful delivery for each recipient.
if (exists($volumes_per_queue_id{$1})) {
$volume_delivered += $volumes_per_queue_id{$1}->{size};
$volumes_per_queue_id{$1}->{timestamp} = time;
}
}
}
# remove all expired queue IDs
my @expired_queue_ids;
for my $key (keys %volumes_per_queue_id) {
if (time > $volumes_per_queue_id{$key}->{timestamp} + queue_id_expiry) {
push @expired_queue_ids, $key;
}
}
delete(@volumes_per_queue_id{@expired_queue_ids});
return tail_close($LOGFILE);
}
if ( $ARGV[0] and $ARGV[0] eq "autoconf" ) {
my $logfile;
`sh -c 'command -v postconf' >/dev/null`;
if (!$?) {
$logfile = "$LOGDIR/$LOGFILE";
if (-f $logfile) {
if (-r "$logfile") {
print "yes\n";
exit 0;
} else {
print "no (logfile '$logfile' not readable)\n";
}
} else {
print "no (logfile '$logfile' not found)\n";
}
} else {
print "no (postfix not found)\n";
}
exit 0;
}
if ( $ARGV[0] and $ARGV[0] eq "config" ) {
print "graph_title Postfix bytes throughput\n";
print "graph_args --base 1000 -l 0\n";
print "graph_vlabel bytes / \${graph_period}\n";
print "graph_scale yes\n";
print "graph_category postfix\n";
print "volume.label delivered volume\n";
print "volume.type DERIVE\n";
print "volume.min 0\n";
exit 0;
}
my $logfile = "$LOGDIR/$LOGFILE";
if (! -f $logfile) {
print "volume.value U\n";
exit 0;
}
# load the stored data
($pos, $volume_delivered, $serialized_volumes_queue) = restore_state();
if (!defined($volume_delivered)) {
# No state file present. Avoid startup spike: Do not read log
# file up to now, but remember how large it is now, and next
# time read from there.
$pos = (stat $logfile)[7]; # File size
$volume_delivered = 0;
%volumes_per_queue_id = ();
} else {
# decode the serialized hash
# source format: "$id1=$size1:$timestamp1 $id2=$size2:$timestamp2 ..."
# The "serialized" value may be undefined, in case we just upgraded from the version before
# 2018, since that old version stored only two fields in the state file. Tolerate this.
for my $queue_item_descriptor (split(/ /, $serialized_volumes_queue || "")) {
(my $queue_item_id, my $queue_item_content) = split(/=/, $queue_item_descriptor);
(my $size, my $timestamp) = split(/:/, $queue_item_content);
$volumes_per_queue_id{$queue_item_id} = { size => int($size), timestamp => int($timestamp) };
}
$pos = parseLogfile ($logfile, $pos);
}
print "volume.value $volume_delivered\n";
# serialize the hash to a string (see "source format" above)
$serialized_volumes_queue = join(" ", map { sprintf("%s=%s", $_, sprintf("%d:%d", $volumes_per_queue_id{$_}->{size}, $volumes_per_queue_id{$_}->{timestamp})) } keys %volumes_per_queue_id);
save_state($pos, $volume_delivered, $serialized_volumes_queue);
# vim:syntax=perl