Karl Reichert wrote:
> Jan Kiszka wrote:>
> > What I would analyse if I were you:
> > - Is the request frame sent in the right slot according to the sender?
>
> Well, now I have another weird behavior. The slave sends a request
> calibration frame in cycle no 45849 and sets Reply Cycle Number to 97655. As
> I'm
> using a cycle length of 5 ms, this means the slave wants the answer more
> then 4 minutes in the future, which is for sure to far away!
>
> This behavior and the one observed before points to a problem when
> calculating or setting this reply cycle number. How can one configure how big
> this
> value is set (how big the offset between request and reply should be). If
> this is done automatically, in which function is it done?
I digged deeper into the code and here are the results. Please see attached
file to see me changes. Every change is marked by /* REK debug */:
My first step was to go into module tdma_worker.c, function
do_request_cal_job(). I printed the values of tdma->current_cycle and
job->period. job->period is always 1, in all calls of this function.
tdma->current_cycle of course changes, but it holds an old value, for example
1521 cycles ago with 1 ms cycle length.
As a quick hack, my second step was to substitue job->period with an constant
value, 2500 in my case. Now the synchronisation works, of course slowly, but it
does.
The error must be the "wrong" value of on of those two variables!
This leads to a few questions:
1) Is it right that job->period is always 1?
2a) If yes (what I do not believe), do you have an idea why tdma->current_cycle
holds an "old" value?
2b) If not, where is this value calculated and where for?
Thanks in advance
Karl
--
GMX FreeMail: 1 GB Postfach, 5 E-Mail-Adressen, 10 Free SMS.
Alle Infos und kostenlose Anmeldung: http://www.gmx.net/de/go/freemail
/***
*
* rtmac/tdma/tdma_worker.c
*
* RTmac - real-time networking media access control subsystem
* Copyright (C) 2002 Marc Kleine-Budde <[EMAIL PROTECTED]>,
* 2003-2005 Jan Kiszka <[EMAIL PROTECTED]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <rtmac/rtmac_proto.h>
#include <rtmac/tdma/tdma_proto.h>
/* REK debug */
#include <rtdm/rtdm.h>
/* REK debug */
static void do_slot_job(struct tdma_priv *tdma, struct tdma_slot *job,
rtdm_lockctx_t lockctx)
{
struct rtskb *rtskb;
if ((job->period != 1) &&
(tdma->current_cycle % job->period != job->phasing))
return;
rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
/* wait for slot begin, then send one pending packet */
rtdm_task_sleep_until(tdma->current_cycle_start +
SLOT_JOB(job)->offset);
rtdm_lock_get_irqsave(&tdma->lock, lockctx);
rtskb = __rtskb_prio_dequeue(SLOT_JOB(job)->queue);
if (!rtskb)
return;
rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
rtmac_xmit(rtskb);
rtdm_lock_get_irqsave(&tdma->lock, lockctx);
}
static void do_xmit_sync_job(struct tdma_priv *tdma, rtdm_lockctx_t lockctx)
{
rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
/* wait for beginning of next cycle, then send sync */
rtdm_task_sleep_until(tdma->current_cycle_start + tdma->cycle_period);
rtdm_lock_get_irqsave(&tdma->lock, lockctx);
tdma->current_cycle++;
tdma->current_cycle_start += tdma->cycle_period;
rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
tdma_xmit_sync_frame(tdma);
rtdm_lock_get_irqsave(&tdma->lock, lockctx);
}
static void do_backup_sync_job(struct tdma_priv *tdma, rtdm_lockctx_t lockctx)
{
rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
/* wait for backup slot */
rtdm_task_sleep_until(tdma->current_cycle_start + tdma->backup_sync_inc);
/* take over sync transmission if all earlier masters failed */
if (!test_and_clear_bit(TDMA_FLAG_RECEIVED_SYNC, &tdma->flags)) {
rtdm_lock_get_irqsave(&tdma->lock, lockctx);
tdma->current_cycle++;
tdma->current_cycle_start += tdma->cycle_period;
rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
tdma_xmit_sync_frame(tdma);
set_bit(TDMA_FLAG_BACKUP_ACTIVE, &tdma->flags);
} else
clear_bit(TDMA_FLAG_BACKUP_ACTIVE, &tdma->flags);
rtdm_lock_get_irqsave(&tdma->lock, lockctx);
}
static struct tdma_job *do_request_cal_job(struct tdma_priv *tdma,
struct tdma_request_cal *job,
rtdm_lockctx_t lockctx)
{
struct rt_proc_call *call;
struct tdma_job *prev_job;
int err;
if ((job->period != 1) &&
(tdma->current_cycle % job->period != job->phasing))
return &job->head;
/* remove job until we get a reply */
__list_del(job->head.entry.prev, job->head.entry.next);
job->head.ref_count--;
prev_job = tdma->current_job =
list_entry(job->head.entry.prev, struct tdma_job, entry);
prev_job->ref_count++;
tdma->job_list_revision++;
rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
rtdm_task_sleep_until(tdma->current_cycle_start + job->offset);
/* REK debug */
rtdm_printk("[REK debug] tdma->current_cycle = %u\n", tdma->current_cycle);
rtdm_printk("[REK debug] job->period = %u\n", job->period);
/* err = tdma_xmit_request_cal_frame(tdma,
tdma->current_cycle + job->period, job->offset); */
err = tdma_xmit_request_cal_frame(tdma,
tdma->current_cycle + 2500, job->offset);
/* end of REK debug */
rtdm_lock_get_irqsave(&tdma->lock, lockctx);
/* terminate call on error */
if (err < 0) {
call = tdma->calibration_call;
tdma->calibration_call = NULL;
if (call) {
rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
rtpc_complete_call(call, err);
rtdm_lock_get_irqsave(&tdma->lock, lockctx);
}
}
return prev_job;
}
static struct tdma_job *do_reply_cal_job(struct tdma_priv *tdma,
struct tdma_reply_cal *job,
rtdm_lockctx_t lockctx)
{
struct tdma_job *prev_job;
if (job->reply_cycle > tdma->current_cycle)
return &job->head;
/* remove the job */
__list_del(job->head.entry.prev, job->head.entry.next);
job->head.ref_count--;
prev_job = tdma->current_job =
list_entry(job->head.entry.prev, struct tdma_job, entry);
prev_job->ref_count++;
tdma->job_list_revision++;
rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
if (job->reply_cycle == tdma->current_cycle) {
/* send reply in the assigned slot */
rtdm_task_sleep_until(tdma->current_cycle_start + job->reply_offset);
rtmac_xmit(job->reply_rtskb);
} else {
/* cleanup if cycle already passed */
kfree_rtskb(job->reply_rtskb);
}
rtdm_lock_get_irqsave(&tdma->lock, lockctx);
return prev_job;
}
void tdma_worker(void *arg)
{
struct tdma_priv *tdma = (struct tdma_priv *)arg;
struct tdma_job *job;
rtdm_lockctx_t lockctx;
rtdm_event_wait(&tdma->worker_wakeup);
rtdm_lock_get_irqsave(&tdma->lock, lockctx);
job = tdma->first_job;
while (!test_bit(TDMA_FLAG_SHUTDOWN, &tdma->flags)) {
job->ref_count++;
switch (job->id) {
case WAIT_ON_SYNC:
rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
rtdm_event_wait(&tdma->sync_event);
rtdm_lock_get_irqsave(&tdma->lock, lockctx);
break;
case XMIT_REQ_CAL:
job = do_request_cal_job(tdma, REQUEST_CAL_JOB(job), lockctx);
break;
#ifdef CONFIG_RTNET_TDMA_MASTER
case XMIT_SYNC:
do_xmit_sync_job(tdma, lockctx);
break;
case BACKUP_SYNC:
do_backup_sync_job(tdma, lockctx);
break;
case XMIT_RPL_CAL:
job = do_reply_cal_job(tdma, REPLY_CAL_JOB(job), lockctx);
break;
#endif /* CONFIG_RTNET_TDMA_MASTER */
default:
do_slot_job(tdma, SLOT_JOB(job), lockctx);
break;
}
job->ref_count--;
job = tdma->current_job =
list_entry(job->entry.next, struct tdma_job, entry);
}
rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
}
-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
RTnet-users mailing list
RTnet-users@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/rtnet-users