root/trunk/libffado/src/libstreaming/digidesign/DigidesignTransmitStreamProcessor.cpp

Revision 2802, 29.9 kB (checked in by jwoithe, 3 years ago)

Cosmetic: "Firewire" becomes "FireWire?".

Officially both the "F" and "W" were capitalised in the FireWire? name, so
reflect this throughout FFADO's source tree. This mostly affects comments.

This patch originated from pander on the ffado-devel mailing list. To
maintain consistency, the committed version has been expanded to include
files not originally included in the original patch.

Line 
1 /*
2  * Copyright (C) 2005-2008, 2011 by Jonathan Woithe
3  * Copyright (C) 2005-2008 by Pieter Palmers
4  *
5  * This file is part of FFADO
6  * FFADO = Free FireWire (pro-)audio drivers for linux
7  *
8  * FFADO is based upon FreeBoB.
9  *
10  * This program is free software: you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation, either version 2 of the License, or
13  * (at your option) version 3 of the License.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
22  *
23  */
24
25 #include "config.h"
26
27 #include "libutil/float_cast.h"
28
29 #include "DigidesignTransmitStreamProcessor.h"
30 #include "DigidesignPort.h"
31 #include "../StreamProcessorManager.h"
32 #include "devicemanager.h"
33
34 #include "libieee1394/ieee1394service.h"
35 #include "libieee1394/IsoHandlerManager.h"
36 #include "libieee1394/cycletimer.h"
37
38 #include "libutil/ByteSwap.h"
39
40 #include <cstring>
41 #include <assert.h>
42
43 /* Provide more intuitive access to GCC's branch predition built-ins */
44 #define likely(x)   __builtin_expect((x),1)
45 #define unlikely(x) __builtin_expect((x),0)
46
47 namespace Streaming
48 {
49
50 /* transmit */
51 DigidesignTransmitStreamProcessor::DigidesignTransmitStreamProcessor(FFADODevice &parent, unsigned int event_size )
52         : StreamProcessor(parent, ePT_Transmit )
53         , m_event_size( event_size )
54 {
55     // Provide any other initialisation code needed.
56 }
57
58 unsigned int
59 DigidesignTransmitStreamProcessor::getMaxPacketSize() {
60
61     // Return the maximum packet size expected given the current device
62     // configuration, in bytes.  Often this will depend on the current
63     // sample rate which can be retrieved using something like this:
64     //
65     //   int framerate = m_Parent.getDeviceManager().getStreamProcessorManager().getNominalRate();
66
67     return 0;
68 }
69
70 unsigned int
71 DigidesignTransmitStreamProcessor::getNominalFramesPerPacket() {
72
73     // Return the number of frames per packet.  Often this will depend on
74     // the device's current sample rate which can be obtained as per the
75     // comment in getMaxPacketSize().
76    
77     return 0;
78 }
79
80 enum StreamProcessor::eChildReturnValue
81 DigidesignTransmitStreamProcessor::generatePacketHeader (
82     unsigned char *data, unsigned int *length,
83     unsigned char *tag, unsigned char *sy,
84     uint32_t pkt_ctr )
85 {
86
87     // Construct the header portion of a packet to send to the Digidesign
88     // hardware.  This normally requires the "length", "tag" and "sy" fields
89     // to be set as needed.  While "length" should be set here, "data"
90     // probably ought to be left alone (there's another method for dealing
91     // with the packet data).  "pkt_ctr" gives the iso cycle timer
92     // corresponding to the time that the packet will be transmitted.
93     //
94     // It is this method which determines whether a packet should be sent in
95     // the iso cycle indicated by "pkt_ctr".  The code which follows is
96     // mostly boiler-plate and lifted straight from the MOTU driver (which
97     // in turn came from the AMDTP code from memory).  The basic theory as
98     // to when a packet should be sent is very similar for all devices
99     // because the timing requirements are effectively abstracted by the
100     // concept of the timestamp.
101     //
102     // The vast majority of the logic in this method came from Pieter
103     // Palmers, who wrote the foundational streaming infrastructure.
104
105     unsigned int cycle = CYCLE_TIMER_GET_CYCLES(pkt_ctr);
106
107     signed n_events = getNominalFramesPerPacket();
108
109     // Do housekeeping expected for all packets.  Note that if it is later
110     // identified that an empty packet should be sent then "length" will be
111     // overriden in generateEmptyPacketHeader().
112     //
113     // As per the firewire standards, only set "tag" if the Digidesign
114     // expects a CIP header in the first two bytes of "data".  Similarly,
115     // remove the "+8" from the length calculation if no CIP header is to be
116     // included.
117     *sy = 0x00;
118     *tag = 1;      // Set to 0 if Digidesign don't use CIP headers
119     *length = n_events*m_event_size + 8;
120
121     signed int fc;
122     uint64_t presentation_time;
123     unsigned int presentation_cycle;
124     int cycles_until_presentation;
125
126     uint64_t transmit_at_time;
127     unsigned int transmit_at_cycle;
128     int cycles_until_transmit;
129
130     debugOutput ( DEBUG_LEVEL_ULTRA_VERBOSE, "Try for cycle %d\n", cycle );
131     // check whether the packet buffer has packets for us to send.
132     // the base timestamp is the one of the next sample in the buffer
133     ffado_timestamp_t ts_head_tmp;
134     m_data_buffer->getBufferHeadTimestamp ( &ts_head_tmp, &fc ); // thread safe
135
136     // the timestamp gives us the time at which we want the sample block
137     // to be output by the device
138     presentation_time = ( uint64_t ) ts_head_tmp;
139
140     // now we calculate the time when we have to transmit the sample block
141     transmit_at_time = substractTicks ( presentation_time, DIGIDESIGN_TRANSMIT_TRANSFER_DELAY );
142
143     // calculate the cycle this block should be presented in
144     // (this is just a virtual calculation since at that time it should
145     //  already be in the device's buffer)
146     presentation_cycle = ( unsigned int ) ( TICKS_TO_CYCLES ( presentation_time ) );
147
148     // calculate the cycle this block should be transmitted in
149     transmit_at_cycle = ( unsigned int ) ( TICKS_TO_CYCLES ( transmit_at_time ) );
150
151     // we can check whether this cycle is within the 'window' we have
152     // to send this packet.
153     // first calculate the number of cycles left before presentation time
154     cycles_until_presentation = diffCycles ( presentation_cycle, cycle );
155
156     // we can check whether this cycle is within the 'window' we have
157     // to send this packet.
158     // first calculate the number of cycles left before presentation time
159     cycles_until_transmit = diffCycles ( transmit_at_cycle, cycle );
160
161     // two different options:
162     // 1) there are not enough frames for one packet
163     //      => determine wether this is a problem, since we might still
164     //         have some time to send it
165     // 2) there are enough packets
166     //      => determine whether we have to send them in this packet
167     if ( fc < ( signed int ) getNominalFramesPerPacket() )
168     {
169         // not enough frames in the buffer,
170
171         // we can still postpone the queueing of the packets
172         // if we are far enough ahead of the presentation time
173         if ( cycles_until_presentation <= DIGIDESIGN_MIN_CYCLES_BEFORE_PRESENTATION )
174         {
175             debugOutput ( DEBUG_LEVEL_VERBOSE,
176                         "Insufficient frames (P): N=%02d, CY=%04u, TC=%04u, CUT=%04d\n",
177                         fc, cycle, transmit_at_cycle, cycles_until_transmit );
178             // we are too late
179             return eCRV_XRun;
180         }
181         else
182         {
183             debugOutput ( DEBUG_LEVEL_VERY_VERBOSE,
184                         "Insufficient frames (NP): N=%02d, CY=%04u, TC=%04u, CUT=%04d\n",
185                         fc, cycle, transmit_at_cycle, cycles_until_transmit );
186             // there is still time left to send the packet
187             // we want the system to give this packet another go at a later time instant
188             return eCRV_Again;
189         }
190     }
191     else
192     {
193         // there are enough frames, so check the time they are intended for
194         // all frames have a certain 'time window' in which they can be sent
195         // this corresponds to the range of the timestamp mechanism:
196         // we can send a packet 15 cycles in advance of the 'presentation time'
197         // in theory we can send the packet up till one cycle before the presentation time,
198         // however this is not very smart.
199
200         // There are 3 options:
201         // 1) the frame block is too early
202         //      => send an empty packet
203         // 2) the frame block is within the window
204         //      => send it
205         // 3) the frame block is too late
206         //      => discard (and raise xrun?)
207         //         get next block of frames and repeat
208
209         if(cycles_until_transmit < 0)
210         {
211             // we are too late
212             debugOutput(DEBUG_LEVEL_VERBOSE,
213                         "Too late: CY=%04u, TC=%04u, CUT=%04d, TSP=%011"PRIu64" (%04u)\n",
214                         cycle,
215                         transmit_at_cycle, cycles_until_transmit,
216                         presentation_time, (unsigned int)TICKS_TO_CYCLES(presentation_time) );
217
218             // however, if we can send this sufficiently before the presentation
219             // time, it could be harmless.
220             // NOTE: dangerous since the device has no way of reporting that it didn't get
221             //       this packet on time.
222             if(cycles_until_presentation >= DIGIDESIGN_MIN_CYCLES_BEFORE_PRESENTATION)
223             {
224                 // we are not that late and can still try to transmit the packet
225                 fillDataPacketHeader((quadlet_t *)data, length, presentation_time);
226                 m_last_timestamp = presentation_time;
227                 return eCRV_Packet;
228             }
229             else   // definitely too late
230             {
231                 return eCRV_XRun;
232             }
233         }
234         else if(cycles_until_transmit <= DIGIDESIGN_MAX_CYCLES_TO_TRANSMIT_EARLY)
235         {
236             // it's time send the packet
237             fillDataPacketHeader((quadlet_t *)data, length, presentation_time);
238             m_last_timestamp = presentation_time;
239             return eCRV_Packet;
240         }
241         else
242         {
243             debugOutput ( DEBUG_LEVEL_VERY_VERBOSE,
244                         "Too early: CY=%04u, TC=%04u, CUT=%04d, TST=%011"PRIu64" (%04u), TSP=%011"PRIu64" (%04u)\n",
245                         cycle,
246                         transmit_at_cycle, cycles_until_transmit,
247                         transmit_at_time, ( unsigned int ) TICKS_TO_CYCLES ( transmit_at_time ),
248                         presentation_time, ( unsigned int ) TICKS_TO_CYCLES ( presentation_time ) );
249 #ifdef DEBUG
250             if ( cycles_until_transmit > DIGIDESIGN_MAX_CYCLES_TO_TRANSMIT_EARLY + 1 )
251             {
252                 debugOutput ( DEBUG_LEVEL_VERY_VERBOSE,
253                             "Way too early: CY=%04u, TC=%04u, CUT=%04d, TST=%011"PRIu64" (%04u), TSP=%011"PRIu64" (%04u)\n",
254                             cycle,
255                             transmit_at_cycle, cycles_until_transmit,
256                             transmit_at_time, ( unsigned int ) TICKS_TO_CYCLES ( transmit_at_time ),
257                             presentation_time, ( unsigned int ) TICKS_TO_CYCLES ( presentation_time ) );
258             }
259 #endif
260             // we are too early, send only an empty packet
261             return eCRV_EmptyPacket;
262         }
263     }
264     return eCRV_Invalid;
265 }
266
267 enum StreamProcessor::eChildReturnValue
268 DigidesignTransmitStreamProcessor::generatePacketData (
269     unsigned char *data, unsigned int *length)
270 {
271
272     // This method should fill the "length" bytes of "data" with streaming
273     // data from the devices ports.  Similar to the receive side, this
274     // method calls the object's data buffer readFrames() method which takes
275     // care of calling the encoding functions to facilitate this data copy.
276
277     // Treat the packet data as being in quadlets.
278     quadlet_t *quadlet = (quadlet_t *)data;
279
280     // If there's a CIP header included, skip past it.  Otherwise don't
281     // do this step.
282     quadlet += 2;
283
284     signed n_events = getNominalFramesPerPacket();
285     // unsigned dbs = m_event_size / 4;
286
287     // Encode data into packet.  If a CIP header is to be placed at the
288     // start of "data", the pointer passed to readFrames() should be
289     // (data+8) so audio data isn't copied to that location.
290     if (m_data_buffer->readFrames(n_events, (char *)(data + 8))) {
291
292         // If audio data was succesfully copied, deal with timestamps
293         // embedded in the ISO stream if relevant.  How this is done depends
294         // on what the device expects.  Some devices like the MOTUs
295         // timestamp each frame, while others have a single timestamp
296         // somewhere in the packet which applies to a particular
297         // representative frame within the packet.
298         //
299         // The timestamps are usually in terms of iso cycle timer ticks, and
300         // it's therefore often useful to know how many ticks correspond to
301         // the interval between two frames (as deduced by the rate at which
302         // data is arriving from the device).  This can be accessed from
303         // here with something like:
304         //
305         //   float ticks_per_frame = m_Parent.getDeviceManager().getStreamProcessorManager().getSyncSource().getTicksPerFrame();
306         //
307         // "quadlet" starts out pointing to the start of the first frame,
308         // and it can be advanced to the next frame by adding dbs to it.
309
310         return eCRV_OK;
311     }
312     else return eCRV_XRun;
313
314 }
315
316 enum StreamProcessor::eChildReturnValue
317 DigidesignTransmitStreamProcessor::generateEmptyPacketHeader (
318     unsigned char *data, unsigned int *length,
319     unsigned char *tag, unsigned char *sy,
320     uint32_t pkt_ctr )
321 {
322     debugOutput ( DEBUG_LEVEL_VERY_VERBOSE, "XMIT EMPTY: CY=%04d, TSP=%011"PRIu64" (%04u)\n",
323                 (int)CYCLE_TIMER_GET_CYCLES(pkt_ctr), m_last_timestamp,
324                 ( unsigned int ) TICKS_TO_CYCLES ( m_last_timestamp ) );
325
326     // An "empty" packet is one which contains no audio data.  It is used
327     // when it is determined that no audio data needs to be sent to the
328     // device in a given iso cycle.  Whether one sends empty packets or just
329     // skips the cycle entirely depends on the device's protocol.  Some
330     // expect empty packets while others are happy if nothing is sent.
331    
332
333     // The following sets up an "empty" packet assuming that a CIP header is
334     // included in such a packet.  If this isn't the case, "tag" should be 0
335     // and the "length" will be 0.
336     *sy = 0x00;
337     *tag = 1;
338     *length = 8;
339
340     fillNoDataPacketHeader ( (quadlet_t *)data, length );
341     return eCRV_OK;
342 }
343
344 enum StreamProcessor::eChildReturnValue
345 DigidesignTransmitStreamProcessor::generateEmptyPacketData (
346     unsigned char *data, unsigned int *length)
347 {
348     // By definition an empty packet doesn't contain any audio data, so
349     // normally this method doesn't have to do anything.
350     return eCRV_OK;
351 }
352
353 enum StreamProcessor::eChildReturnValue
354 DigidesignTransmitStreamProcessor::generateSilentPacketHeader (
355     unsigned char *data, unsigned int *length,
356     unsigned char *tag, unsigned char *sy,
357     uint32_t pkt_ctr )
358 {
359     // This method generates a silent packet - that is, one which contains
360     // nothing but zeros in the audio data fields.  All other aspects of the
361     // packet are the same as a regular data packet so much of the logic
362     // from generatePacketHeader() is needed here too.  The main difference
363     // between the two methods is the source of audio data - here we just
364     // need zeros.
365     //
366     // Note that not all devices require "silent" packets.  If the
367     // Digidesign interfaces don't this function may ultimtely be removed.
368
369     unsigned int cycle = CYCLE_TIMER_GET_CYCLES(pkt_ctr);
370
371     debugOutput( DEBUG_LEVEL_VERY_VERBOSE, "XMIT SILENT: CY=%04u, TSP=%011"PRIu64" (%04u)\n",
372                  cycle, m_last_timestamp,
373                  ( unsigned int ) TICKS_TO_CYCLES ( m_last_timestamp ) );
374
375     signed n_events = getNominalFramesPerPacket();
376
377     // Again, here's the housekeeping.  If there's no CIP header needed, set "tag"
378     // to 0 and remove the "+8" from the setting of "length" below.
379     *sy = 0x00;
380     *tag = 1;
381
382     /* Assume the packet will have audio data.  If it turns out we need an empty packet
383      * the length will be overridden by fillNoDataPacketHeader().
384      */
385     *length = n_events*m_event_size + 8;
386
387     uint64_t presentation_time;
388     unsigned int presentation_cycle;
389     int cycles_until_presentation;
390            
391     uint64_t transmit_at_time;
392     unsigned int transmit_at_cycle;
393     int cycles_until_transmit;
394
395     /* The sample buffer is not necessarily running when silent packets are
396      * needed, so use m_last_timestamp (the timestamp of the previously sent
397      * data packet) as the basis for the presentation time of the next
398      * packet.  Since we're only writing zeros we don't have to deal with
399      * buffer xruns.
400      */
401     float ticks_per_frame = m_Parent.getDeviceManager().getStreamProcessorManager().getSyncSource().getTicksPerFrame();
402     presentation_time = addTicks(m_last_timestamp, (unsigned int)lrintf(n_events * ticks_per_frame));
403
404     transmit_at_time = substractTicks(presentation_time, DIGIDESIGN_TRANSMIT_TRANSFER_DELAY);
405     presentation_cycle = (unsigned int)(TICKS_TO_CYCLES(presentation_time));
406     transmit_at_cycle = (unsigned int)(TICKS_TO_CYCLES(transmit_at_time));
407     cycles_until_presentation = diffCycles(presentation_cycle, cycle);
408     cycles_until_transmit = diffCycles(transmit_at_cycle, cycle);
409
410     if (cycles_until_transmit < 0)
411     {
412         if (cycles_until_presentation >= DIGIDESIGN_MIN_CYCLES_BEFORE_PRESENTATION)
413         {
414             m_last_timestamp = presentation_time;
415             fillDataPacketHeader((quadlet_t *)data, length, m_last_timestamp);
416             return eCRV_Packet;
417         }
418         else
419         {
420             return eCRV_XRun;
421         }
422     }
423     else if (cycles_until_transmit <= DIGIDESIGN_MAX_CYCLES_TO_TRANSMIT_EARLY)
424     {
425         m_last_timestamp = presentation_time;
426         fillDataPacketHeader((quadlet_t *)data, length, m_last_timestamp);
427         return eCRV_Packet;
428     }
429     else
430     {
431         return eCRV_EmptyPacket;
432     }
433     return eCRV_Invalid;
434 }
435
436 enum StreamProcessor::eChildReturnValue
437 DigidesignTransmitStreamProcessor::generateSilentPacketData (
438     unsigned char *data, unsigned int *length )
439 {
440     // Simply set all audio data to zero since that's what's meant by
441     // a "silent" packet.  Note that for the example code given below
442     // m_event_size is in bytes.
443
444     quadlet_t *quadlet = (quadlet_t *)data;
445     quadlet += 2; // skip the header - remove if no CIP header is used
446
447     // Size of a single data frame in quadlets
448     // unsigned dbs = m_event_size / 4;
449
450     signed n_events = getNominalFramesPerPacket();
451
452     memset(quadlet, 0, n_events*m_event_size);
453
454     // If there are per-frame timestamps to set up (or other things), it's
455     // done here.  "quadlet" starts out pointing to the start of the first
456     // frame, and it can be advanced to the next frame by adding dbs to it.
457     //
458     // Obtaining the "ticks per frame" is sometimes useful when constructing
459     // timestamps:
460     //   float ticks_per_frame = m_Parent.getDeviceManager().getStreamProcessorManager().getSyncSource().getTicksPerFrame();
461
462     return eCRV_OK;
463 }
464
465 unsigned int DigidesignTransmitStreamProcessor::fillDataPacketHeader (
466     quadlet_t *data, unsigned int* length,
467     uint32_t ts )
468 {
469     // If there's a CIP header (or a similar per-packet header distinct from
470     // the standard iso header) this method is used to construct it.  The return
471     // value is the number of events (aka frames) to be included in the packet.
472
473     quadlet_t *quadlet = (quadlet_t *)data;
474     // Size of a single data frame in quadlets.
475     unsigned dbs = m_event_size / 4;
476
477     signed n_events = getNominalFramesPerPacket();
478
479     // Depending on the device this might have to be set to something sensible.
480     unsigned int tx_dbc = 0;
481
482     // The following shows how a CIP header can be constructed.  This is
483     // taken directly from the MOTU driver and therefore contains some
484     // hard-coded fields as they are used by the MOTU devices.  Most
485     // importantly, the MOTUs don't always follow the ieee1394 standard when
486     // it comes to fields in the CIP header, so this code is at best a guide
487     // as to how things might be done.
488     //
489     // The entire thing can be omitted if CIP headers aren't used by the
490     // digidesign devices.
491     *quadlet = CondSwapToBus32(0x00000400 | ((m_Parent.get1394Service().getLocalNodeId()&0x3f)<<24) | tx_dbc | (dbs<<16));
492     quadlet++;
493     *quadlet = CondSwapToBus32(0x8222ffff);
494     quadlet++;
495
496     return n_events;
497 }
498
499 unsigned int DigidesignTransmitStreamProcessor::fillNoDataPacketHeader (
500     quadlet_t *data, unsigned int* length )
501 {
502     // This constructs any per-packet headers required in packets containing
503     // no audio data.  As for fillDataPacketHeader(), this is an example
504     // lifted from the MOTU code to show how it might be done.
505     // fillNoDataPacketHeader() should return the number of frames to be
506     // transmitted in the packet, which is 0 by definition.
507
508     quadlet_t *quadlet = (quadlet_t *)data;
509
510     // construct the packet CIP-like header.  Even if this is a data-less
511     // packet the dbs field is still set as if there were data blocks
512     // present.  For data-less packets the tx_dbc is the same as the previously
513     // transmitted block.
514
515     unsigned dbs = m_event_size / 4;
516
517     // Depending on the device this might have to be set to something sensible.
518     unsigned int tx_dbc = m_event_size / 4;
519
520     *quadlet = CondSwapToBus32(0x00000400 | ((m_Parent.get1394Service().getLocalNodeId()&0x3f)<<24) | tx_dbc | (dbs<<16));
521     quadlet++;
522     *quadlet = CondSwapToBus32(0x8222ffff);
523     quadlet++;
524     *length = 8;
525     return 0;
526 }
527
528 bool DigidesignTransmitStreamProcessor::prepareChild()
529 {
530     debugOutput ( DEBUG_LEVEL_VERBOSE, "Preparing (%p)...\n", this );
531
532     // Additional setup can be done here if nececssary.  Normally this
533     // method doesn't do anything but it's provided in case it proves useful
534     // for some device.
535
536     return true;
537 }
538
539 /*
540 * Compose the event streams for the packets from the port buffers
541 */
542 bool DigidesignTransmitStreamProcessor::processWriteBlock(char *data,
543                        unsigned int nevents, unsigned int offset) {
544     bool no_problem=true;
545
546     // This function is the transmit equivalent of
547     // DigidesignReceiveStreamProcessor::processReadBlock().  It iterates
548     // over the ports registered with the device and calls the applicable
549     // encoding methods to transfer data from the port buffers into the
550     // packet data which is pointed to by "data".  "nevents" is the number
551     // of events (aka frames) to transfer and "offset" is the position
552     // within the port ring buffers to take data from.
553
554     for ( PortVectorIterator it = m_Ports.begin();
555       it != m_Ports.end();
556       ++it ) {
557         // If this port is disabled, unconditionally send it silence.
558         if((*it)->isDisabled()) {
559           if (encodeSilencePortToDigidesignEvents(static_cast<DigidesignAudioPort *>(*it), (quadlet_t *)data, offset, nevents)) {
560             debugWarning("Could not encode silence for disabled port %s to Digidesign events\n",(*it)->getName().c_str());
561             // Don't treat this as a fatal error at this point
562           }
563           continue;
564         }
565
566         Port *port=(*it);
567
568         switch(port->getPortType()) {
569
570         case Port::E_Audio:
571             if (encodePortToDigidesignEvents(static_cast<DigidesignAudioPort *>(*it), (quadlet_t *)data, offset, nevents)) {
572                 debugWarning("Could not encode port %s to Digidesign events\n",(*it)->getName().c_str());
573                 no_problem=false;
574             }
575             break;
576         case Port::E_Midi:
577              if (encodePortToDigidesignMidiEvents(static_cast<DigidesignMidiPort *>(*it), (quadlet_t *)data, offset, nevents)) {
578                  debugWarning("Could not encode port %s to Midi events\n",(*it)->getName().c_str());
579                  no_problem=false;
580              }
581             break;
582         default: // ignore
583             break;
584         }
585     }
586     return no_problem;
587 }
588
589 bool
590 DigidesignTransmitStreamProcessor::transmitSilenceBlock(char *data,
591                        unsigned int nevents, unsigned int offset) {
592     // This is the same as the non-silence version, except that is
593     // doesn't read from the port buffers.
594     bool no_problem = true;
595     for ( PortVectorIterator it = m_Ports.begin();
596       it != m_Ports.end();
597       ++it ) {
598         Port *port=(*it);
599
600         switch(port->getPortType()) {
601
602         case Port::E_Audio:
603             if (encodeSilencePortToDigidesignEvents(static_cast<DigidesignAudioPort *>(*it), (quadlet_t *)data, offset, nevents)) {
604                 debugWarning("Could not encode port %s to Digidesign events\n",(*it)->getName().c_str());
605                 no_problem = false;
606             }
607             break;
608         case Port::E_Midi:
609             if (encodeSilencePortToDigidesignMidiEvents(static_cast<DigidesignMidiPort *>(*it), (quadlet_t *)data, offset, nevents)) {
610                 debugWarning("Could not encode port %s to Midi events\n",(*it)->getName().c_str());
611                 no_problem = false;
612             }
613             break;
614         default: // ignore
615             break;
616         }
617     }
618     return no_problem;
619 }
620
621 int DigidesignTransmitStreamProcessor::encodePortToDigidesignEvents(DigidesignAudioPort *p, quadlet_t *data,
622                        unsigned int offset, unsigned int nevents) {
623 // Encodes nevents worth of data from the given port into the given buffer.  The
624 // format of the buffer is precisely that which will be sent to the Digidesign interface.
625 // The basic idea:
626 //   iterate over the ports
627 //     * get port buffer address
628 //     * loop over events
629 //         - pick right sample in event based upon PortInfo
630 //         - convert sample from Port format (E_Int24, E_Float, ..) to Digidesign
631 //           native format
632 //
633 // We include the ability to start the transfer from the given offset within
634 // the port (expressed in frames) so the 'efficient' transfer method can be
635 // utilised.
636
637 // This code assumes that the Digidesign expects its data to be packed
638 // 24-bit integers.  If this is not the case changes will be required.
639
640     unsigned int j=0;
641
642     // Use char here since the target address won't necessarily be
643     // aligned; use of an unaligned quadlet_t may cause issues on certain
644     // architectures.  Besides, the target (data going directly to the MOTU)
645     // isn't structured in quadlets anyway; it mainly consists of packed
646     // 24-bit integers.
647     unsigned char *target;
648     target = (unsigned char *)data + p->getPosition();
649
650     switch(m_StreamProcessorManager.getAudioDataType()) {
651         default:
652         case StreamProcessorManager::eADT_Int24:
653             {
654                 quadlet_t *buffer=(quadlet_t *)(p->getBufferAddress());
655
656                 assert(nevents + offset <= p->getBufferSize());
657
658                 // Offset is in frames, but each port is only a single
659                 // channel, so the number of frames is the same as the
660                 // number of quadlets to offset (assuming the port buffer
661                 // uses one quadlet per sample, which is the case currently).
662                 buffer+=offset;
663
664                 for(j = 0; j < nevents; j += 1) { // Decode nsamples
665                     *target = (*buffer >> 16) & 0xff;
666                     *(target+1) = (*buffer >> 8) & 0xff;
667                     *(target+2) = (*buffer) & 0xff;
668
669                     buffer++;
670                     target+=m_event_size;
671                 }
672             }
673             break;
674         case StreamProcessorManager::eADT_Float:
675             {
676                 const float multiplier = (float)(0x7FFFFF);
677                 float *buffer=(float *)(p->getBufferAddress());
678
679                 assert(nevents + offset <= p->getBufferSize());
680
681                 buffer+=offset;
682
683                 for(j = 0; j < nevents; j += 1) { // decode max nsamples
684                     float in = *buffer;
685
686 #if DIGIDESIGN_CLIP_FLOATS
687                     if (unlikely(in > 1.0)) in = 1.0;
688                     if (unlikely(in < -1.0)) in = -1.0;
689 #endif
690                     unsigned int v = lrintf(in * multiplier);
691                     *target = (v >> 16) & 0xff;
692                     *(target+1) = (v >> 8) & 0xff;
693                     *(target+2) = v & 0xff;
694
695                     buffer++;
696                     target+=m_event_size;
697                 }
698             }
699             break;
700     }
701
702     return 0;
703 }
704
705 int DigidesignTransmitStreamProcessor::encodeSilencePortToDigidesignEvents(DigidesignAudioPort *p, quadlet_t *data,
706                        unsigned int offset, unsigned int nevents) {
707
708     // Encodes silence to the digidesign channel corresponding to the given
709     // audio port.  As for encodePortToDigidesignEvents() above, this
710     // assumes that each audio data sample is a packed signed 24-bit
711     // integer.  Changes will be necessary if Digidesign uses a different
712     // format in the packets.
713
714     unsigned int j=0;
715     unsigned char *target = (unsigned char *)data + p->getPosition();
716
717     switch (m_StreamProcessorManager.getAudioDataType()) {
718     default:
719         case StreamProcessorManager::eADT_Int24:
720         case StreamProcessorManager::eADT_Float:
721         for (j = 0; j < nevents; j++) {
722             *target = *(target+1) = *(target+2) = 0;
723             target += m_event_size;
724         }
725         break;
726     }
727
728     return 0;
729 }
730
731 int DigidesignTransmitStreamProcessor::encodePortToDigidesignMidiEvents(
732                        DigidesignMidiPort *p, quadlet_t *data,
733                        unsigned int offset, unsigned int nevents) {
734
735     // Encode MIDI data into the packet to be sent to the Digidesign
736     // hardware.  Depending on the way MIDI data is formatted in the packet,
737     // this function may take a similar form to
738     // encodePortToDigidesignEvents(), or it may be completely different.
739     // For example, the MOTU driver structures it quite differently due to
740     // the way MIDI is carried in the packet.
741     //
742     // The return value is zero.
743
744     return 0;
745 }
746
747 int DigidesignTransmitStreamProcessor::encodeSilencePortToDigidesignMidiEvents(
748                        DigidesignMidiPort *p, quadlet_t *data,
749                        unsigned int offset, unsigned int nevents) {
750
751     // Write zeros to a MIDI port region of the transmit packet.
752
753     unsigned int j;
754     unsigned char *target = (unsigned char *)data + p->getPosition();
755
756     // For now, a "silent" MIDI event contains nothing but zeroes.  This
757     // may have to change if we find this isn't for some reason appropriate.
758     for (j=0; j<nevents; j++, target+=m_event_size) {
759        memset(target, 0, 3);
760     }
761
762     return 0;
763 }
764
765 } // end of namespace Streaming
Note: See TracBrowser for help on using the browser.