gstlal  0.8.1
 All Classes Namespaces Files Functions Variables Pages
lal_channelgram.py
1 # Copyright (C) 2009 Kipp Cannon
2 #
3 # This program is free software; you can redistribute it and/or modify it
4 # under the terms of the GNU General Public License as published by the
5 # Free Software Foundation; either version 2 of the License, or (at your
6 # option) any later version.
7 #
8 # This program is distributed in the hope that it will be useful, but
9 # WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
11 # Public License for more details.
12 #
13 # You should have received a copy of the GNU General Public License along
14 # with this program; if not, write to the Free Software Foundation, Inc.,
15 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 
17 
18 #
19 # =============================================================================
20 #
21 # Preamble
22 #
23 # =============================================================================
24 #
25 
26 
27 import matplotlib
28 matplotlib.rcParams.update({
29  "font.size": 8.0,
30  "axes.titlesize": 10.0,
31  "axes.labelsize": 10.0,
32  "xtick.labelsize": 8.0,
33  "ytick.labelsize": 8.0,
34  "legend.fontsize": 8.0,
35  "figure.dpi": 100,
36  "savefig.dpi": 100,
37  "text.usetex": True,
38  "path.simplify": True
39 })
40 from matplotlib import figure
41 from matplotlib import cm as colourmap
42 from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
43 import numpy
44 
45 
46 import pygtk
47 pygtk.require("2.0")
48 import gobject
49 import pygst
50 pygst.require('0.10')
51 import gst
52 from gst.extend.pygobject import gproperty
53 
54 
55 from gstlal import pipeio
56 from gstlal.elements import matplotlibcaps
57 
58 
59 __author__ = "Kipp Cannon <kipp.cannon@ligo.org>"
60 __version__ = "FIXME"
61 __date__ = "FIXME"
62 
63 
64 #
65 # =============================================================================
66 #
67 # Utilities
68 #
69 # =============================================================================
70 #
71 
72 
73 class ArrayQueue(list):
74  class Element(object):
75  def __init__(self, buf):
76  self.timestamp = buf.timestamp
77  self.duration = buf.duration
78  self.offset = buf.offset
79  self.offset_end = buf.offset_end
80  if bool(buf.flags & gst.BUFFER_FLAG_GAP):
81  self.data = numpy.zeros((self.offset_end - self.offset, buf.caps[0]["channels"]), dtype = pipeio.numpy_dtype_from_caps(buf.caps))
82  else:
83  self.data = pipeio.array_from_audio_buffer(buf)
84 
85  def __len__(self):
86  return self.data.shape[0]
87 
88  def flush(self, n):
89  if n > self.data.shape[0]:
90  n = self.data.shape[0]
91  self.data = self.data[n:,:]
92  delta_t = self.duration * float(n) / (self.offset_end - self.offset)
93  self.timestamp += delta_t
94  self.duration -= delta_t
95  self.offset += n
96  return n
97 
98  def get(self, n):
99  return self.data[:n,:], self.timestamp
100 
101  def extract(self, n):
102  extracted, timestamp = self.get(n)
103  self.flush(n)
104  return extracted, timestamp
105 
106  def append(self, buf):
107  list.append(self, ArrayQueue.Element(buf))
108 
109  def __len__(self):
110  if not list.__len__(self):
111  return 0
112  return self[-1].offset_end - self[0].offset
113 
114  def flush(self, n):
115  flushed = n
116  while n > 0 and self:
117  n -= self[0].flush(n)
118  if not len(self[0]):
119  del self[0]
120  return flushed - n
121 
122  def get(self, n):
123  if self:
124  timestamp = self[0].timestamp
125  else:
126  timestamp = None
127  data = []
128  for i in self:
129  data.append(i.get(n)[0])
130  n -= data[-1].shape[0]
131  if not n:
132  break
133  return numpy.concatenate(data), timestamp
134 
135  def extract(self, n):
136  extracted, timestamp = self.get(n)
137  self.flush(extracted.shape[0])
138  return extracted, timestamp
139 
140 
141 def yticks(min, max, n):
142  delta = float(max - min) / n
143  return sorted(set(min + int(round(delta * i)) for i in range(n + 1)))
144 
145 
146 #
147 # =============================================================================
148 #
149 # Element
150 #
151 # =============================================================================
152 #
153 
154 
155 class lal_channelgram(gst.BaseTransform):
156  __gstdetails__ = (
157  "Scrolling channel amplitude plot",
158  "Plots",
159  "Generates video showing a scrolling plot of channel amplitudes",
160  __author__
161  )
162 
163  gproperty(
164  gobject.TYPE_DOUBLE,
165  "plot-width",
166  "Width of the plot in seconds, 0 = 1/framerate",
167  0.0, # min
168  gobject.G_MAXDOUBLE, # max
169  0.0, # default
170  readable = True, writable = True
171  )
172 
173  __gsttemplates__ = (
174  gst.PadTemplate("sink",
175  gst.PAD_SINK,
176  gst.PAD_ALWAYS,
177  gst.caps_from_string(
178  "audio/x-raw-complex, " +
179  "rate = (int) [1, MAX], " +
180  "channels = (int) [1, MAX], " +
181  "endianness = (int) BYTE_ORDER, " +
182  "width = (int) {64, 128};" +
183  "audio/x-raw-float, " +
184  "rate = (int) [1, MAX], " +
185  "channels = (int) [1, MAX], " +
186  "endianness = (int) BYTE_ORDER, " +
187  "width = (int) {32, 64};" +
188  "audio/x-raw-int, " +
189  "rate = (int) [1, MAX], " +
190  "channels = (int) [1, MAX], " +
191  "endianness = (int) BYTE_ORDER, " +
192  "width = (int) 32," +
193  "depth = (int) 32," +
194  "signed = (bool) {true, false}; " +
195  "audio/x-raw-int, " +
196  "rate = (int) [1, MAX], " +
197  "channels = (int) [1, MAX], " +
198  "endianness = (int) BYTE_ORDER, " +
199  "width = (int) 64," +
200  "depth = (int) 64," +
201  "signed = (bool) {true, false}"
202  )
203  ),
204  gst.PadTemplate("src",
205  gst.PAD_SRC,
206  gst.PAD_ALWAYS,
207  gst.caps_from_string(
208  matplotlibcaps + ", " +
209  "width = (int) [1, MAX], " +
210  "height = (int) [1, MAX], " +
211  "framerate = (fraction) [0, MAX]"
212  )
213  )
214  )
215 
216 
217  def __init__(self):
218  gst.BaseTransform.__init__(self)
219  self.channels = None
220  self.in_rate = None
221  self.out_rate = None
222  self.out_width = 320 # default, pixels
223  self.out_height = 200 # default, pixels
224  self.set_property("plot-width", 0.0) # seconds, 0 = 1/framerate
225  self.instrument = None
226  self.channel_name = None
227  self.sample_units = None
228 
229 
230  def do_set_caps(self, incaps, outcaps):
231  self.in_rate = incaps[0]["rate"]
232  channels = incaps[0]["channels"]
233  if channels != self.channels:
234  self.queue = ArrayQueue()
235  self.channels = channels
236  self.out_rate = outcaps[0]["framerate"]
237  self.out_width = outcaps[0]["width"]
238  self.out_height = outcaps[0]["height"]
239  return True
240 
241 
242  def do_start(self):
243  self.t0 = None
244  self.offset0 = None
245  self.next_out_offset = None
246  return True
247 
248 
249  def do_get_unit_size(self, caps):
250  return pipeio.get_unit_size(caps)
251 
252 
253  def do_event(self, event):
254  if event.type == gst.EVENT_TAG:
255  tags = pipeio.parse_framesrc_tags(event.parse_tag())
256  self.instrument = tags["instrument"]
257  self.channel_name = tags["channel-name"]
258  self.sample_units = tags["sample-units"]
259  return True
260 
261 
262  def make_frame(self, samples, samples_timestamp, outbuf):
263  #
264  # set metadata and advance output offset counter
265  #
266 
267  outbuf.offset = self.next_out_offset
268  self.next_out_offset += 1
269  outbuf.offset_end = self.next_out_offset
270  outbuf.timestamp = self.t0 + int(round(float(int(outbuf.offset - self.offset0) / self.out_rate) * gst.SECOND))
271  outbuf.duration = self.t0 + int(round(float(int(outbuf.offset_end - self.offset0) / self.out_rate) * gst.SECOND)) - outbuf.timestamp
272 
273  #
274  # generate pseudocolor plot
275  #
276 
277  fig = figure.Figure()
278  FigureCanvas(fig)
279  fig.set_size_inches(self.out_width / float(fig.get_dpi()), self.out_height / float(fig.get_dpi()))
280  axes = fig.gca(rasterized = True)
281  x, y = map(lambda n: numpy.arange(n + 1, dtype = "double"), samples.shape)
282  x = x / self.in_rate + float(samples_timestamp) / gst.SECOND
283  y -= 0.5
284  xlim = x[0], x[-1]
285  ylim = y[0], y[-1]
286  x, y = numpy.meshgrid(x, y)
287  if samples.dtype.kind == "c":
288  #
289  # complex data
290  #
291 
292  axes.pcolormesh(x, y, numpy.abs(samples.transpose()), cmap = colourmap.gray)
293  else:
294  #
295  # real data
296  #
297 
298  axes.pcolormesh(x, y, samples.transpose(), cmap = colourmap.gray)
299  axes.set_xlim(xlim)
300  axes.set_ylim(ylim)
301  axes.set_yticks(yticks(0, samples.shape[1] - 1, 20))
302  axes.set_title(r"Amplitude of %s, %s" % (self.instrument or "Unknown Instrument", (self.channel_name or "Unknown Channel").replace("_", r"\_")))
303  axes.set_xlabel(r"Time (s)")
304  axes.set_ylabel(r"Channel Number")
305 
306  #
307  # extract pixel data
308  #
309 
310  fig.canvas.draw()
311  rgba_buffer = fig.canvas.buffer_rgba(0, 0)
312  rgba_buffer_size = len(rgba_buffer)
313 
314  #
315  # copy pixel data to output buffer
316  #
317 
318  outbuf[0:rgba_buffer_size] = rgba_buffer
319  outbuf.datasize = rgba_buffer_size
320 
321  #
322  # done
323  #
324 
325  return outbuf
326 
327 
328  def do_transform(self, inbuf, outbuf):
329  #
330  # make sure we have valid metadata
331  #
332 
333  if self.t0 is None:
334  self.t0 = inbuf.timestamp
335  self.offset0 = 0
336  self.next_out_offset = 0
337 
338  #
339  # append input to queue
340  #
341 
342  self.queue.append(inbuf)
343 
344  #
345  # number of samples required for output frame, and the
346  # number of samples flushed per output frame
347  #
348  # FIXME: if the number flushed isn't really an integer the
349  # output timestamps drift wrt the input timestamps
350  #
351 
352  samples_flushed_per_frame = int(round(self.in_rate / float(self.out_rate)))
353  plot_width = self.get_property("plot-width")
354  if plot_width != 0.0:
355  samples_per_frame = int(round(plot_width * self.in_rate))
356  else:
357  samples_per_frame = samples_flushed_per_frame
358 
359  #
360  # build output frame(s)
361  #
362 
363  if len(self.queue) < samples_per_frame:
364  # not enough data for output
365  # FIXME: should return
366  # GST_BASE_TRANSFORM_FLOW_DROPPED, don't know what
367  # that constant is, but I know it's #define'ed to
368  # GST_FLOW_CUSTOM_SUCCESS. figure out what the
369  # constant should be
370  return gst.FLOW_CUSTOM_SUCCESS
371 
372  while len(self.queue) >= 2 * samples_per_frame:
373  flow_return, newoutbuf = self.get_pad("src").alloc_buffer(self.next_out_offset, self.out_width * self.out_height * 4, outbuf.caps)
374  samples, timestamp = self.queue.get(samples_per_frame)
375  self.queue.flush(samples_flushed_per_frame)
376  self.get_pad("src").push(self.make_frame(samples, timestamp, newoutbuf))
377  samples, timestamp = self.queue.get(samples_per_frame)
378  self.queue.flush(samples_flushed_per_frame)
379  self.make_frame(samples, timestamp, outbuf)
380 
381  #
382  # done
383  #
384 
385  return gst.FLOW_OK
386 
387 
388  def do_transform_caps(self, direction, caps):
389  if direction == gst.PAD_SRC:
390  #
391  # convert src pad's caps to sink pad's
392  #
393 
394  return self.get_pad("sink").get_fixed_caps_func()
395 
396  elif direction == gst.PAD_SINK:
397  #
398  # convert sink pad's caps to src pad's
399  #
400 
401  return self.get_pad("src").get_fixed_caps_func()
402 
403  raise ValueError(direction)
404 
405 
406  def do_transform_size(self, direction, caps, size, othercaps):
407  samples_per_frame = int(round(self.in_rate / float(self.out_rate)))
408 
409  if direction == gst.PAD_SRC:
410  #
411  # convert byte count on src pad to sample count on
412  # sink pad (minus samples we already have)
413  #
414 
415  bytes_per_frame = caps[0]["width"] * caps[0]["height"] * caps[0]["bpp"] / 8
416  samples = int(size / bytes_per_frame) * samples_per_frame - len(self.queue)
417 
418  #
419  # convert to byte count on sink pad.
420  #
421 
422  if samples <= 0:
423  return 0
424  return samples * (othercaps[0]["width"] // 8) * othercaps[0]["channels"]
425 
426  elif direction == gst.PAD_SINK:
427  #
428  # convert byte count on sink pad plus samples we
429  # already have to frame count on src pad.
430  #
431 
432  frames = (int(size * 8 / caps[0]["width"]) // caps[0]["channels"] + len(self.queue)) / samples_per_frame
433 
434  #
435  # if there's enough for at least one frame, claim
436  # output size will be 1 frame. additional buffers
437  # will be created as needed
438  #
439 
440  if frames < 1:
441  return 0
442  # FIXME: why is othercaps not the *other* caps?
443  return self.out_width * self.out_height * 4
444  return othercaps[0]["width"] * othercaps[0]["height"] * othercaps[0]["bpp"] / 8
445 
446  raise ValueError(direction)
447 
448 
449 #
450 # register element class
451 #
452 
453 
454 gobject.type_register(lal_channelgram)
455 
456 __gstelementfactory__ = (
457  lal_channelgram.__name__,
458  gst.RANK_NONE,
459  lal_channelgram
460 )