here is the patch for MDK9.0's boxed kernel to rtai 24.1.10. patch type rtal5g
diff -urN linux-2.4.19-16mdk/arch/i386/config.in linux-rtai/arch/i386/config.in
--- linux-2.4.19-16mdk/arch/i386/config.in 2002-09-20 23:44:50.000000000 +1000
+++ linux-rtai/arch/i386/config.in 2003-01-04 13:09:41.000000000 +1100
-at--at- -207,6 +207,8 -at--at-
if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
define_bool CONFIG_HAVE_DEC_LOCK y
fi
+comment 'CONFIG_RTHAL must be yes'
+bool 'Real-Time Hardware Abstraction Layer' CONFIG_RTHAL
endmenu
mainmenu_option next_comment
diff -urN linux-2.4.19-16mdk/arch/i386/kernel/entry.S linux-rtai/arch/i386/kernel/entry.S
--- linux-2.4.19-16mdk/arch/i386/kernel/entry.S 2002-09-20 23:44:49.000000000 +1000
+++ linux-rtai/arch/i386/kernel/entry.S 2003-01-04 13:09:41.000000000 +1100
-at--at- -177,6 +177,7 -at--at-
ENTRY(ret_from_fork)
+ sti
pushl %ebx
call SYMBOL_NAME(schedule_tail)
addl $4, %esp
-at--at- -215,17 +216,20 -at--at-
call *SYMBOL_NAME(sys_call_table)(,%eax,4)
movl %eax,EAX(%esp) # save the return value
ENTRY(ret_from_sys_call)
- cli # need_resched and signals atomic test
+ call *(SYMBOL_NAME(rthal) + 12) # cli
cmpl $0,need_resched(%ebx)
jne reschedule
cmpl $0,sigpending(%ebx)
jne signal_return
+ sti
+ call *(SYMBOL_NAME(rthal) + 16) # sti
restore_all:
RESTORE_ALL
ALIGN
signal_return:
- sti # we can get here from an interrupt handler
+ sti # we can get here from an interrupt handler
+ call *(SYMBOL_NAME(rthal) + 16) # sti
testl $(VM_MASK),EFLAGS(%esp)
movl %esp,%eax
jne v86_signal_return
diff -urN linux-2.4.19-16mdk/arch/i386/kernel/i386_ksyms.c linux-rtai/arch/i386/kernel/i386_ksyms.c
--- linux-2.4.19-16mdk/arch/i386/kernel/i386_ksyms.c 2002-08-03 10:39:42.000000000 +1000
+++ linux-rtai/arch/i386/kernel/i386_ksyms.c 2003-01-04 13:09:41.000000000 +1100
-at--at- -32,6 +32,18 -at--at-
extern void dump_thread(struct pt_regs *, struct user *);
extern spinlock_t rtc_lock;
+EXPORT_SYMBOL_NOVERS(rthal);
+
+#ifdef CONFIG_VT
+ #include <linux/vt_kern.h>
+ EXPORT_SYMBOL(kd_mksound);
+#endif
+
+#include <linux/console.h>
+EXPORT_SYMBOL(console_drivers);
+extern unsigned long cpu_khz;
+EXPORT_SYMBOL(cpu_khz);
+
#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)
extern void machine_real_restart(unsigned char *, int);
EXPORT_SYMBOL(machine_real_restart);
-at--at- -173,6 +185,13 -at--at-
EXPORT_SYMBOL(atomic_dec_and_lock);
#endif
+#ifdef CONFIG_X86_REMOTE_DEBUG
+#include <linux/gdb.h>
+EXPORT_SYMBOL(linux_debug_hook);
+EXPORT_SYMBOL(gdb_irq);
+EXPORT_SYMBOL(gdb_interrupt);
+#endif
+
extern int is_sony_vaio_laptop;
EXPORT_SYMBOL(is_sony_vaio_laptop);
diff -urN linux-2.4.19-16mdk/arch/i386/kernel/i8259.c linux-rtai/arch/i386/kernel/i8259.c
--- linux-2.4.19-16mdk/arch/i386/kernel/i8259.c 2002-09-20 23:44:29.000000000 +1000
+++ linux-rtai/arch/i386/kernel/i8259.c 2003-01-04 13:09:41.000000000 +1100
-at--at- -290,12 +290,12 -at--at-
handle_real_irq:
if (irq & 8) {
- inb(0xA1); /* DUMMY - (do we need this?) */
+// inb(0xA1); /* DUMMY - (do we need this?) */
outb(cached_A1,0xA1);
outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */
outb(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */
} else {
- inb(0x21); /* DUMMY - (do we need this?) */
+// inb(0x21); /* DUMMY - (do we need this?) */
outb(cached_21,0x21);
outb(0x60+irq,0x20); /* 'Specific EOI' to master */
}
-at--at- -512,3 +512,17 -at--at-
if (boot_cpu_data.hard_math && !cpu_has_fpu)
setup_irq(13, &irq13);
}
+
+void ack_8259_irq(unsigned int irq)
+{
+ spin_lock(&i8259A_lock);
+ if (irq & 8) {
+ outb(0x62,0x20);
+ outb(0x20,0xA0);
+ } else {
+ outb(0x20,0x20);
+ }
+ spin_unlock(&i8259A_lock);
+ return;
+}
+
diff -urN linux-2.4.19-16mdk/arch/i386/kernel/io_apic.c linux-rtai/arch/i386/kernel/io_apic.c
--- linux-2.4.19-16mdk/arch/i386/kernel/io_apic.c 2002-08-03 10:39:42.000000000 +1000
+++ linux-rtai/arch/i386/kernel/io_apic.c 2003-01-04 13:09:41.000000000 +1100
-at--at- -35,7 +35,7 -at--at-
#undef APIC_LOCKUP_DEBUG
-#define APIC_LOCKUP_DEBUG
+//#define APIC_LOCKUP_DEBUG
static spinlock_t ioapic_lock = SPIN_LOCK_UNLOCKED;
-at--at- -1243,11 +1243,10 -at--at-
#define enable_level_ioapic_irq unmask_IO_APIC_irq
#define disable_level_ioapic_irq mask_IO_APIC_irq
+static unsigned long strange_level;
+
static void end_level_ioapic_irq (unsigned int irq)
{
- unsigned long v;
- int i;
-
/*
* It appears there is an erratum which affects at least version 0x11
* of I/O APIC (that's the 82093AA and cores integrated into various
-at--at- -1267,12 +1266,8 -at--at-
* operation to prevent an edge-triggered interrupt escaping meanwhile.
* The idea is from Manfred Spraul. --macro
*/
- i = IO_APIC_VECTOR(irq);
- v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
- ack_APIC_irq();
-
- if (!(v & (1 << (i & 0x1f)))) {
+ if (test_and_clear_bit(irq, &strange_level)) {
#ifdef APIC_LOCKUP_DEBUG
struct irq_pin_list *entry;
#endif
-at--at- -1281,7 +1276,6 -at--at-
atomic_inc(&irq_mis_count);
#endif
spin_lock(&ioapic_lock);
- __mask_and_edge_IO_APIC_irq(irq);
#ifdef APIC_LOCKUP_DEBUG
for (entry = irq_2_pin + irq;;) {
unsigned int reg;
-at--at- -1299,10 +1293,30 -at--at-
#endif
__unmask_and_level_IO_APIC_irq(irq);
spin_unlock(&ioapic_lock);
+ } else {
+ spin_lock(&ioapic_lock);
+ __unmask_IO_APIC_irq(irq);
+ spin_unlock(&ioapic_lock);
}
}
-static void mask_and_ack_level_ioapic_irq (unsigned int irq) { /* nothing */ }
+static void mask_and_ack_level_ioapic_irq (unsigned int irq)
+{
+ unsigned long i;
+
+ i = IO_APIC_VECTOR(irq);
+ if (!(apic_read(APIC_TMR + ((i & ~0x1f) >> 1)) & (1 << (i & 0x1f)))) {
+ test_and_set_bit(irq, &strange_level);
+ spin_lock(&ioapic_lock);
+ __mask_and_edge_IO_APIC_irq(irq);
+ spin_unlock(&ioapic_lock);
+ } else {
+ spin_lock(&ioapic_lock);
+ __mask_IO_APIC_irq(irq);
+ spin_unlock(&ioapic_lock);
+ }
+ ack_APIC_irq();
+}
static void set_ioapic_affinity (unsigned int irq, unsigned long mask)
{
diff -urN linux-2.4.19-16mdk/arch/i386/kernel/irq.c linux-rtai/arch/i386/kernel/irq.c
--- linux-2.4.19-16mdk/arch/i386/kernel/irq.c 2002-09-20 23:44:29.000000000 +1000
+++ linux-rtai/arch/i386/kernel/irq.c 2003-01-04 13:09:41.000000000 +1100
-at--at- -1206,3 +1206,71 -at--at-
register_irq_proc(i);
}
+static void linux_cli(void)
+{
+ hard_cli();
+}
+
+static void linux_sti(void)
+{
+ hard_sti();
+}
+
+static unsigned int linux_save_flags(void)
+{
+ int flags;
+ hard_save_flags(flags);
+ return flags;
+}
+
+static void linux_restore_flags(unsigned int flags)
+{
+ hard_restore_flags(flags);
+}
+
+static unsigned int linux_save_flags_and_cli(void)
+{
+ int flags;
+ hard_save_flags_and_cli(flags);
+ return flags;
+}
+
+#include <asm/mmu_context.h>
+
+#ifndef CONFIG_X86_IO_APIC
+int irq_vector[];
+#endif
+#ifndef CONFIG_SMP
+void smp_invalidate_interrupt(void) { }
+static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
+static volatile int physical_apicid_2_cpu[1];
+#endif
+
+extern void *ret_from_intr;
+extern struct desc_struct idt_table[];
+extern void ack_8259_irq(unsigned int);
+extern int idle_weight;
+extern void smp_invalidate_interrupt(void);
+extern void switch_mem(struct task_struct *, struct task_struct *, int);
+extern volatile int physical_apicid_2_cpu[];
+
+struct rt_hal rthal = {
+ ret_from_intr: &ret_from_intr,
+ __switch_to: __switch_to,
+ idt_table: idt_table,
+ disint: linux_cli,
+ enint: linux_sti,
+ getflags: linux_save_flags,
+ setflags: linux_restore_flags,
+ getflags_and_cli: linux_save_flags_and_cli,
+ irq_desc: irq_desc,
+ irq_vector: irq_vector,
+ irq_affinity: irq_affinity,
+ smp_invalidate_interrupt: smp_invalidate_interrupt,
+ ack_8259_irq: ack_8259_irq,
+ idle_weight: &idle_weight,
+ lxrt_global_cli: NULL,
+ switch_mem: switch_mem,
+ init_tasks: init_tasks,
+ apicmap: physical_apicid_2_cpu,
+};
diff -urN linux-2.4.19-16mdk/arch/i386/kernel/smp.c linux-rtai/arch/i386/kernel/smp.c
--- linux-2.4.19-16mdk/arch/i386/kernel/smp.c 2002-09-20 23:44:29.000000000 +1000
+++ linux-rtai/arch/i386/kernel/smp.c 2003-01-04 13:09:41.000000000 +1100
-at--at- -174,8 +174,7 -at--at-
unsigned long cfg;
unsigned long flags;
- __save_flags(flags);
- __cli();
+ hard_save_flags_and_cli(flags);
/*
-at--at- -199,7 +198,7 -at--at-
*/
apic_write_around(APIC_ICR, cfg);
- __restore_flags(flags);
+ hard_restore_flags(flags);
}
static inline void send_IPI_mask_sequence(int mask, int vector)
diff -urN linux-2.4.19-16mdk/arch/i386/kernel/time.c linux-rtai/arch/i386/kernel/time.c
--- linux-2.4.19-16mdk/arch/i386/kernel/time.c 2002-02-26 06:37:53.000000000 +1100
+++ linux-rtai/arch/i386/kernel/time.c 2003-01-04 13:09:41.000000000 +1100
-at--at- -496,6 +496,7 -at--at-
rdtscl(last_tsc_low);
+#if 0
spin_lock(&i8253_lock);
outb_p(0x00, 0x43); /* latch the count ASAP */
-at--at- -505,6 +506,7 -at--at-
count = ((LATCH-1) - count) * TICK_SIZE;
delay_at_last_interrupt = (count + LATCH/2) / LATCH;
+#endif
}
do_timer_interrupt(irq, NULL, regs);
diff -urN linux-2.4.19-16mdk/arch/i386/mm/fault.c linux-rtai/arch/i386/mm/fault.c
--- linux-2.4.19-16mdk/arch/i386/mm/fault.c 2002-09-20 23:44:50.000000000 +1000
+++ linux-rtai/arch/i386/mm/fault.c 2003-01-04 13:09:41.000000000 +1100
-at--at- -596,7 +596,7 -at--at-
/* It's safe to allow irq's after cr2 has been saved */
if (regs->eflags & X86_EFLAGS_IF)
- local_irq_enable();
+ hard_sti();
if ((error_code & 5) != 5 || address >= TASK_SIZE || regs->xcs != __USER_CS || (VM_MASK & regs->eflags))
goto chain;
diff -urN linux-2.4.19-16mdk/arch/i386/mm/ioremap.c linux-rtai/arch/i386/mm/ioremap.c
--- linux-2.4.19-16mdk/arch/i386/mm/ioremap.c 2002-08-03 10:39:42.000000000 +1000
+++ linux-rtai/arch/i386/mm/ioremap.c 2003-01-04 13:09:41.000000000 +1100
-at--at- -81,6 +81,7 -at--at-
if (remap_area_pmd(pmd, address, end - address,
phys_addr + address, flags))
break;
+ set_pgdir(address, *dir);
error = 0;
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
diff -urN linux-2.4.19-16mdk/arch/ppc/config.in linux-rtai/arch/ppc/config.in
--- linux-2.4.19-16mdk/arch/ppc/config.in 2002-09-20 23:44:50.000000000 +1000
+++ linux-rtai/arch/ppc/config.in 2003-01-04 13:09:41.000000000 +1100
-at--at- -109,6 +109,9 -at--at-
bool ' Distribute interrupts on all CPUs by default' CONFIG_IRQ_ALL_CPUS
fi
+#bool 'Real-Time Hardware Abstraction Layer' CONFIG_RTHAL
+define_bool CONFIG_RTHAL y
+
if [ "$CONFIG_6xx" = "y" -a "$CONFIG_8260" = "n" ];then
bool 'AltiVec Support' CONFIG_ALTIVEC
bool 'Thermal Management Support' CONFIG_TAU
diff -urN linux-2.4.19-16mdk/arch/ppc/kernel/entry.S linux-rtai/arch/ppc/kernel/entry.S
--- linux-2.4.19-16mdk/arch/ppc/kernel/entry.S 2002-02-26 06:37:55.000000000 +1100
+++ linux-rtai/arch/ppc/kernel/entry.S 2003-01-04 13:09:41.000000000 +1100
-at--at- -294,6 +294,7 -at--at-
bl do_signal
.globl do_signal_ret
do_signal_ret:
+ bl do_soft_sti
.globl ret_to_user_hook
ret_to_user_hook:
nop
diff -urN linux-2.4.19-16mdk/arch/ppc/kernel/irq.c linux-rtai/arch/ppc/kernel/irq.c
--- linux-2.4.19-16mdk/arch/ppc/kernel/irq.c 2002-08-03 10:39:43.000000000 +1000
+++ linux-rtai/arch/ppc/kernel/irq.c 2003-01-04 13:09:41.000000000 +1100
-at--at- -534,6 +534,17 -at--at-
spin_unlock(&desc->lock);
}
+void do_soft_cli(void)
+{
+}
+
+void (*rtai_soft_sti)(void);
+
+void do_soft_sti(void)
+{
+ if(rtai_soft_sti)rtai_soft_sti();
+}
+
int do_IRQ(struct pt_regs *regs)
{
int cpu = smp_processor_id();
diff -urN linux-2.4.19-16mdk/arch/ppc/kernel/ppc_ksyms.c linux-rtai/arch/ppc/kernel/ppc_ksyms.c
--- linux-2.4.19-16mdk/arch/ppc/kernel/ppc_ksyms.c 2002-09-20 23:44:40.000000000 +1000
+++ linux-rtai/arch/ppc/kernel/ppc_ksyms.c 2003-01-04 13:09:41.000000000 +1100
-at--at- -220,6 +220,12 -at--at-
EXPORT_SYMBOL(synchronize_irq);
#endif
+extern int (*rtai_srq_bckdr)(struct pt_regs *);
+EXPORT_SYMBOL(rtai_srq_bckdr);
+
+extern void (*rtai_soft_sti)(void);
+EXPORT_SYMBOL(rtai_soft_sti);
+
EXPORT_SYMBOL(ppc_md);
#ifdef CONFIG_ADB
diff -urN linux-2.4.19-16mdk/arch/ppc/kernel/traps.c linux-rtai/arch/ppc/kernel/traps.c
--- linux-2.4.19-16mdk/arch/ppc/kernel/traps.c 2001-11-03 12:43:54.000000000 +1100
+++ linux-rtai/arch/ppc/kernel/traps.c 2003-01-04 13:09:41.000000000 +1100
-at--at- -269,9 +269,14 -at--at-
return(retval);
}
+int (*rtai_srq_bckdr)(struct pt_regs *regs) = NULL;
+
void
ProgramCheckException(struct pt_regs *regs)
{
+ if (rtai_srq_bckdr && !rtai_srq_bckdr(regs)) {
+ return;
+ }
#if defined(CONFIG_4xx)
unsigned int esr = mfspr(SPRN_ESR);
diff -urN linux-2.4.19-16mdk/Documentation/Configure.help linux-rtai/Documentation/Configure.help
--- linux-2.4.19-16mdk/Documentation/Configure.help 2002-09-20 23:44:49.000000000 +1000
+++ linux-rtai/Documentation/Configure.help 2003-01-04 13:09:40.000000000 +1100
-at--at- -241,6 +241,13 -at--at-
You will need a new lynxer.elf file to flash your firmware with - send
email to Martin.Bligh-at-us.ibm.com
+Real-Time Harware Abstraction
+CONFIG_RTHAL
+ The Real-Time Hardware Abstraction Layer (RTHAL) is used by
+ the Real-Time Application Interface (RTAI) to provide a
+ hard real-time environment as part of Linux. This feature
+ cannot be turned off, so say Y.
+
IO-APIC support on uniprocessors
CONFIG_X86_UP_IOAPIC
An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an
diff -urN linux-2.4.19-16mdk/include/asm-i386/hw_irq.h linux-rtai/include/asm-i386/hw_irq.h
--- linux-2.4.19-16mdk/include/asm-i386/hw_irq.h 2002-09-21 02:15:08.000000000 +1000
+++ linux-rtai/include/asm-i386/hw_irq.h 2003-01-05 09:13:49.000000000 +1100
-at--at- -38,19 +38,31 -at--at-
*
* Vectors 0xf0-0xfa are free (reserved for future Linux use).
*/
+#ifdef CONFIG_RTHAL
+/* the standard definitions conflict with LXRT */
+#define SPURIOUS_APIC_VECTOR 0xdf
+#define ERROR_APIC_VECTOR 0xde
+#define INVALIDATE_TLB_VECTOR 0xdd
+#define RESCHEDULE_VECTOR 0xdc
+#define CALL_FUNCTION_VECTOR 0xdb
+#else
#define SPURIOUS_APIC_VECTOR 0xff
#define ERROR_APIC_VECTOR 0xfe
#define INVALIDATE_TLB_VECTOR 0xfd
#define RESCHEDULE_VECTOR 0xfc
#define CALL_FUNCTION_VECTOR 0xfb
#define KDB_VECTOR 0xfa
-
+#endif
/*
* Local APIC timer IRQ vector is on a different priority level,
* to work around the 'lost local interrupt if more than 2 IRQ
* sources per level' errata.
*/
+#ifdef CONFIG_RTHAL
+#define LOCAL_TIMER_VECTOR 0xcf
+#else
#define LOCAL_TIMER_VECTOR 0xef
+#endif
/*
* First APIC vector available to drivers: (vectors 0x30-0xee)
-at--at- -58,7 +70,11 -at--at-
* levels. (0x80 is the syscall vector)
*/
#define FIRST_DEVICE_VECTOR 0x31
+#ifdef CONFIG_RTHAL
+#define FIRST_SYSTEM_VECTOR 0xcf
+#else
#define FIRST_SYSTEM_VECTOR 0xef
+#endif
extern int irq_vector[NR_IRQS];
#define IO_APIC_VECTOR(irq) irq_vector[irq]
diff -urN linux-2.4.19-16mdk/include/asm-i386/irq.h linux-rtai/include/asm-i386/irq.h
--- linux-2.4.19-16mdk/include/asm-i386/irq.h 2002-09-21 02:15:07.000000000 +1000
+++ linux-rtai/include/asm-i386/irq.h 2003-01-05 09:13:49.000000000 +1100
-at--at- -26,7 +26,7 -at--at-
#ifdef CONFIG_X86_IO_APIC
#define NR_IRQS 224
#else
-#define NR_IRQS 16
+#define NR_IRQS 32 /* 2.4.19 vanilla has 16, this is rtai back compatibility */
#endif
static __inline__ int irq_cannonicalize(int irq)
diff -urN linux-2.4.19-16mdk/include/asm-i386/pgalloc.h linux-rtai/include/asm-i386/pgalloc.h
--- linux-2.4.19-16mdk/include/asm-i386/pgalloc.h 2002-09-21 02:15:08.000000000 +1000
+++ linux-rtai/include/asm-i386/pgalloc.h 2003-01-06 19:39:53.000000000 +1100
-at--at- -163,6 +163,33 -at--at-
extern int do_check_pgt_cache(int, int);
+extern inline void set_pgdir(unsigned long address, pgd_t entry)
+{
+ struct task_struct * p;
+ pgd_t *pgd;
+#ifdef CONFIG_SMP
+ int i;
+#endif
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (!p->mm)
+ continue;
+ *pgd_offset(p->mm,address) = entry;
+ }
+ read_unlock(&tasklist_lock);
+#ifndef CONFIG_SMP
+ for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+ pgd[address >> PGDIR_SHIFT] = entry;
+#else
+ /* To pgd_alloc/pgd_free, one holds master kernel lock and so does our callee, so we can
+ modify pgd caches of other CPUs as well. -jj */
+ for (i = 0; i < NR_CPUS; i++)
+ for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+ pgd[address >> PGDIR_SHIFT] = entry;
+#endif
+}
+
/*
* TLB flushing:
*
diff -urN linux-2.4.19-16mdk/include/asm-i386/system.h linux-rtai/include/asm-i386/system.h
--- linux-2.4.19-16mdk/include/asm-i386/system.h 2002-09-21 02:15:08.000000000 +1000
+++ linux-rtai/include/asm-i386/system.h 2003-01-05 09:13:49.000000000 +1100
-at--at- -12,7 +12,12 -at--at-
struct task_struct; /* one of the stranger aspects of C forward declarations.. */
extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
-#define prepare_to_switch() do { } while(0)
+#define prepare_to_switch() do { \
+ if (rthal.lxrt_global_cli) { \
+ rthal.lxrt_global_cli(); \
+ } \
+} while(0)
+
#define switch_to(prev,next,last) do { \
asm volatile("pushl %%esi\n\t" \
"pushl %%edi\n\t" \
-at--at- -23,6 +28,7 -at--at-
"pushl %4\n\t" /* restore EIP */ \
"jmp __switch_to\n" \
"1:\t" \
+ "sti\n\t" \
"popl %%ebp\n\t" \
"popl %%edi\n\t" \
"popl %%esi\n\t" \
-at--at- -315,29 +321,54 -at--at-
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
/* interrupt control.. */
-#define __save_flags(x) __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */)
-#define __restore_flags(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc")
-#define __cli() __asm__ __volatile__("cli": : :"memory")
-#define __sti() __asm__ __volatile__("sti": : :"memory")
+#define hard_save_flags(x) __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */)
+#define hard_restore_flags(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc")
+#define hard_cli() __asm__ __volatile__("cli": : :"memory")
+#define hard_sti() __asm__ __volatile__("sti": : :"memory")
+#define hard_save_flags_and_cli(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */)
/* used in the idle loop; sti takes one instruction cycle to complete */
-#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
-
-#define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0);
-#define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0);
+#define safe_halt() __asm__ __volatile__("call *"SYMBOL_NAME_STR(rthal + 16)"; hlt": : :"memory")
/* For spinlocks etc */
-#if 0
-#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
-#define local_irq_set(x) __asm__ __volatile__("pushfl ; popl %0 ; sti":"=g" (x): /* no input */ :"memory")
-#else
-#define local_irq_save(x) __save_and_cli(x)
-#define local_irq_set(x) __save_and_sti(x)
-#endif
-
-#define local_irq_restore(x) __restore_flags(x)
-#define local_irq_disable() __cli()
-#define local_irq_enable() __sti()
+//#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
+//#define local_irq_restore(x) __restore_flags(x)
+//#define local_irq_disable() __cli()
+//#define local_irq_enable() __sti()
+
+struct rt_hal {
+ void *ret_from_intr;
+ void *__switch_to;
+ struct desc_struct *idt_table;
+ void (*disint)(void);
+ void (*enint)(void);
+ unsigned int (*getflags)(void);
+ void (*setflags)(unsigned int flags);
+ unsigned int (*getflags_and_cli)(void);
+ void *irq_desc;
+ int *irq_vector;
+ unsigned long *irq_affinity;
+ void (*smp_invalidate_interrupt)(void);
+ void (*ack_8259_irq)(unsigned int);
+ int *idle_weight;
+ void (*lxrt_global_cli)(void);
+ void (*switch_mem)(struct task_struct *, struct task_struct *, int);
+ struct task_struct **init_tasks;
+ unsigned int *apicmap;
+};
+
+extern struct rt_hal rthal;
+
+#define __cli() (rthal.disint())
+#define __sti() (rthal.enint())
+#define __save_flags(x) ((x) = rthal.getflags())
+#define __restore_flags(x) (rthal.setflags(x))
+
+#define local_irq_disable() (rthal.disint())
+#define local_irq_enable() (rthal.enint())
+#define local_irq_save(x) ((x) = rthal.getflags_and_cli())
+#define local_irq_restore(x) (rthal.setflags(x))
+
#ifdef CONFIG_SMP
extern void __global_cli(void);
diff -urN linux-2.4.19-16mdk/include/asm-i386/system.h.org linux-rtai/include/asm-i386/system.h.org
--- linux-2.4.19-16mdk/include/asm-i386/system.h.org 1970-01-01 10:00:00.000000000 +1000
+++ linux-rtai/include/asm-i386/system.h.org 2003-01-05 08:33:39.000000000 +1100
-at--at- -0,0 +1,384 -at--at-
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/segment.h>
+#include <linux/bitops.h> /* for LOCK_PREFIX */
+
+#ifdef __KERNEL__
+
+struct task_struct; /* one of the stranger aspects of C forward declarations.. */
+extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
+
+#define prepare_to_switch() do { \
+ if (rthal.lxrt_global_cli) { \
+ rthal.lxrt_global_cli(); \
+ } \
+} while(0)
+
+#define switch_to(prev,next,last) do { \
+ asm volatile("pushl %%esi\n\t" \
+ "pushl %%edi\n\t" \
+ "pushl %%ebp\n\t" \
+ "movl %%esp,%0\n\t" /* save ESP */ \
+ "movl %3,%%esp\n\t" /* restore ESP */ \
+ "movl $1f,%1\n\t" /* save EIP */ \
+ "pushl %4\n\t" /* restore EIP */ \
+ "jmp __switch_to\n" \
+ "1:\t" \
+ "sti\n\t" \
+ "popl %%ebp\n\t" \
+ "popl %%edi\n\t" \
+ "popl %%esi\n\t" \
+ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
+ "=b" (last) \
+ :"m" (next->thread.esp),"m" (next->thread.eip), \
+ "a" (prev), "d" (next), \
+ "b" (prev)); \
+} while (0)
+
+#define _set_base(addr,base) do { unsigned long __pr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %%dl,%2\n\t" \
+ "movb %%dh,%3" \
+ :"=&d" (__pr) \
+ :"m" (*((addr)+2)), \
+ "m" (*((addr)+4)), \
+ "m" (*((addr)+7)), \
+ "0" (base) \
+ ); } while(0)
+
+#define _set_limit(addr,limit) do { unsigned long __lr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %2,%%dh\n\t" \
+ "andb $0xf0,%%dh\n\t" \
+ "orb %%dh,%%dl\n\t" \
+ "movb %%dl,%2" \
+ :"=&d" (__lr) \
+ :"m" (*(addr)), \
+ "m" (*((addr)+6)), \
+ "0" (limit) \
+ ); } while(0)
+
+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
+
+static inline unsigned long _get_base(char * addr)
+{
+ unsigned long __base;
+ __asm__("movb %3,%%dh\n\t"
+ "movb %2,%%dl\n\t"
+ "shll $16,%%edx\n\t"
+ "movw %1,%%dx"
+ :"=&d" (__base)
+ :"m" (*((addr)+2)),
+ "m" (*((addr)+4)),
+ "m" (*((addr)+7)));
+ return __base;
+}
+
+#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+
+/*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg,value) \
+ asm volatile("\n" \
+ "1:\t" \
+ "movl %0,%%" #seg "\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3:\t" \
+ "pushl $0\n\t" \
+ "popl %%" #seg "\n\t" \
+ "jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".long 1b,3b\n" \
+ ".previous" \
+ : :"m" (*(unsigned int *)&(value)))
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+#define clts() __asm__ __volatile__ ("clts")
+#define read_cr0() ({ \
+ unsigned int __dummy; \
+ __asm__( \
+ "movl %%cr0,%0\n\t" \
+ :"=r" (__dummy)); \
+ __dummy; \
+})
+#define write_cr0(x) \
+ __asm__("movl %0,%%cr0": :"r" (x));
+
+#define read_cr4() ({ \
+ unsigned int __dummy; \
+ __asm__( \
+ "movl %%cr4,%0\n\t" \
+ :"=r" (__dummy)); \
+ __dummy; \
+})
+#define write_cr4(x) \
+ __asm__("movl %0,%%cr4": :"r" (x));
+#define stts() write_cr0(8 | read_cr0())
+
+#endif /* __KERNEL__ */
+
+#define wbinvd() \
+ __asm__ __volatile__ ("wbinvd": : :"memory");
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+ unsigned long __limit;
+ __asm__("lsll %1,%0"
+ :"=r" (__limit):"r" (segment));
+ return __limit+1;
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
+#define tas(ptr) (xchg((ptr),1))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+
+/*
+ * The semantics of XCHGCMP8B are a bit strange, this is why
+ * there is a loop and the loading of %%eax and %%edx has to
+ * be inside. This inlines well in most cases, the cached
+ * cost is around ~38 cycles. (in the future we might want
+ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
+ * might have an implicit FPU-save as a cost, so it's not
+ * clear which path to go.)
+ *
+ * chmxchg8b must be used with the lock prefix here to allow
+ * the instruction to be executed atomically, see page 3-102
+ * of the instruction set reference 24319102.pdf. We need
+ * the reader side to see the coherent 64bit value.
+ */
+static inline void __set_64bit (unsigned long long * ptr,
+ unsigned int low, unsigned int high)
+{
+ __asm__ __volatile__ (
+ "\n1:\t"
+ "movl (%0), %%eax\n\t"
+ "movl 4(%0), %%edx\n\t"
+ "lock cmpxchg8b (%0)\n\t"
+ "jnz 1b"
+ : /* no outputs */
+ : "D"(ptr),
+ "b"(low),
+ "c"(high)
+ : "ax","dx","memory");
+}
+
+static inline void __set_64bit_constant (unsigned long long *ptr,
+ unsigned long long value)
+{
+ __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
+}
+#define ll_low(x) *(((unsigned int*)&(x))+0)
+#define ll_high(x) *(((unsigned int*)&(x))+1)
+
+static inline void __set_64bit_var (unsigned long long *ptr,
+ unsigned long long value)
+{
+ __set_64bit(ptr,ll_low(value), ll_high(value));
+}
+
+#define set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit_constant(ptr, value) : \
+ __set_64bit_var(ptr, value) )
+
+#define _set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
+ __set_64bit(ptr, ll_low(value), ll_high(value)) )
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ * but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#ifdef CONFIG_X86_CMPXCHG
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+
+#else
+/* Compiling for a 386 proper. Is it worth implementing via cli/sti? */
+#endif
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ *
+ * For now, "wmb()" doesn't actually do anything, as all
+ * Intel CPU's follow what Intel calls a *Processor Order*,
+ * in which all writes are seen in the program order even
+ * outside the CPU.
+ *
+ * I expect future Intel CPU's to have a weaker ordering,
+ * but I'd also expect them to finally get their act together
+ * and add some real memory barriers if so.
+ *
+ * Some non intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+
+#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#define rmb() mb()
+
+#ifdef CONFIG_X86_OOSTORE
+#define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#else
+#define wmb() __asm__ __volatile__ ("": : :"memory")
+#endif
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#endif
+
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+/* interrupt control.. */
+#define __save_flags(x) __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */)
+#define __restore_flags(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc")
+#define __cli() __asm__ __volatile__("cli": : :"memory")
+#define __sti() __asm__ __volatile__("sti": : :"memory")
+/* used in the idle loop; sti takes one instruction cycle to complete */
+#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
+
+#define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0);
+#define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0);
+
+/* For spinlocks etc */
+#if 0
+#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
+#define local_irq_set(x) __asm__ __volatile__("pushfl ; popl %0 ; sti":"=g" (x): /* no input */ :"memory")
+#else
+#define local_irq_save(x) __save_and_cli(x)
+#define local_irq_set(x) __save_and_sti(x)
+#endif
+
+#define local_irq_restore(x) __restore_flags(x)
+#define local_irq_disable() __cli()
+#define local_irq_enable() __sti()
+
+#ifdef CONFIG_SMP
+
+extern void __global_cli(void);
+extern void __global_sti(void);
+extern unsigned long __global_save_flags(void);
+extern void __global_restore_flags(unsigned long);
+#define cli() __global_cli()
+#define sti() __global_sti()
+#define save_flags(x) ((x)=__global_save_flags())
+#define restore_flags(x) __global_restore_flags(x)
+#define save_and_cli(x) do { save_flags(x); cli(); } while(0);
+#define save_and_sti(x) do { save_flags(x); sti(); } while(0);
+
+#else
+
+#define cli() __cli()
+#define sti() __sti()
+#define save_flags(x) __save_flags(x)
+#define restore_flags(x) __restore_flags(x)
+#define save_and_cli(x) __save_and_cli(x)
+#define save_and_sti(x) __save_and_sti(x)
+
+#endif
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
+void disable_hlt(void);
+void enable_hlt(void);
+
+extern unsigned long dmi_broken;
+extern int is_sony_vaio_laptop;
+
+#define BROKEN_ACPI_Sx 0x0001
+#define BROKEN_INIT_AFTER_S1 0x0002
+
+#endif
diff -urN linux-2.4.19-16mdk/include/asm-ppc/system.h linux-rtai/include/asm-ppc/system.h
--- linux-2.4.19-16mdk/include/asm-ppc/system.h 2002-09-20 23:44:22.000000000 +1000
+++ linux-rtai/include/asm-ppc/system.h 2003-01-04 13:09:41.000000000 +1100
-at--at- -85,6 +85,7 -at--at-
struct task_struct;
#define prepare_to_switch() do { } while(0)
+#define end_switch() do { } while(0)
#define switch_to(prev,next,last) _switch_to((prev),(next),&(last))
extern void _switch_to(struct task_struct *, struct task_struct *,
struct task_struct **);
diff -urN linux-2.4.19-16mdk/include/linux/sched.h linux-rtai/include/linux/sched.h
--- linux-2.4.19-16mdk/include/linux/sched.h 2002-09-21 02:15:09.000000000 +1000
+++ linux-rtai/include/linux/sched.h 2003-01-06 19:39:53.000000000 +1100
-at--at- -452,6 +452,8 -at--at-
/* journalling filesystem info */
void *journal_info;
+
+ void *this_rt_task[2];
};
/*
-at--at- -565,6 +567,7 -at--at-
blocked: {{0}}, \
alloc_lock: SPIN_LOCK_UNLOCKED, \
journal_info: NULL, \
+ this_rt_task: {0,0}, \
GRSEC_INIT_TASK(tsk) \
}
#ifndef INIT_TASK_SIZE
diff -urN linux-2.4.19-16mdk/kernel/exit.c linux-rtai/kernel/exit.c
--- linux-2.4.19-16mdk/kernel/exit.c 2002-08-03 10:39:46.000000000 +1000
+++ linux-rtai/kernel/exit.c 2003-01-04 13:09:42.000000000 +1100
-at--at- -436,6 +436,71 -at--at-
write_unlock_irq(&tasklist_lock);
}
+//
+// PGGC added these lines to callback rtai when a task dies.
+// A list of functions allows different rt_modules to be informed.
+//
+static struct t_callback {
+ void (*rtai_callback)(struct task_struct *tsk);
+ struct t_callback *next;
+ } *rtai_callback_list;
+
+extern int set_rtai_callback( void (*fun)(struct task_struct *tsk));
+extern void remove_rtai_callback( void (*fun)(struct task_struct *tsk));
+
+void inform_rtai(void)
+{
+ struct t_callback *pt;
+
+ pt = rtai_callback_list;
+ while (pt) {
+ (*(pt->rtai_callback))(current);
+ pt = pt->next;
+ }
+//printk( "Task pid %d going down\n", current->pid);
+}
+
+int set_rtai_callback( void (*pt)(struct task_struct *tsk))
+{
+ struct t_callback *ptn;
+
+ ptn = kmalloc(sizeof(struct t_callback), GFP_KERNEL);
+ if (!ptn) {
+ return -ENOMEM;
+ }
+ ptn->rtai_callback = pt;
+ ptn->next = rtai_callback_list ? rtai_callback_list : 0;
+ rtai_callback_list = ptn;
+ return 0;
+}
+
+void remove_rtai_callback(void (*pt)(struct task_struct *tsk))
+{
+ struct t_callback *pto, *ptoo, *ptd;
+
+ pto = rtai_callback_list;
+ ptoo = 0;
+ while (pto) {
+ if (pto->rtai_callback == pt) {
+ if (!ptoo) {
+ rtai_callback_list = pto->next;
+ } else {
+ ptoo->next = pto->next;
+ }
+ ptd = pto;
+ pto = pto->next;
+ kfree(ptd);
+ } else {
+ ptoo = pto;
+ pto = pto->next;
+ }
+ }
+//printk("rtai_callback_list %X\n", rtai_callback_list);
+}
+//
+//
+//
+
NORET_TYPE void do_exit(long code)
{
struct task_struct *tsk = current;
-at--at- -453,6 +518,18 -at--at-
#ifdef CONFIG_BSD_PROCESS_ACCT
acct_process(code);
#endif
+
+/*
+ * PGGC added these lines to callback rtai when a task dies.
+ * This assumes that a LXRT task should/will always set its
+ * scheduling police to SCHED_FIFO or SCHED_RR.
+ * We may want to enforce this in rt_task_init(...).
+ * (For the moment it is not so, thus let's inform LXRT anyhow (Paolo))
+ */
+ if(tsk->this_rt_task[0]) {
+ inform_rtai();
+ }
+
__exit_mm(tsk);
lock_kernel();
diff -urN linux-2.4.19-16mdk/kernel/fork.c linux-rtai/kernel/fork.c
--- linux-2.4.19-16mdk/kernel/fork.c 2002-09-20 23:44:50.000000000 +1000
+++ linux-rtai/kernel/fork.c 2003-01-04 13:09:42.000000000 +1100
-at--at- -253,7 +253,9 -at--at-
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
mm->page_table_lock = SPIN_LOCK_UNLOCKED;
+ lock_kernel();
mm->pgd = pgd_alloc(mm);
+ unlock_kernel();
mm->def_flags = 0;
if (mm->pgd)
return mm;
-at--at- -285,7 +287,9 -at--at-
inline void __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
+ lock_kernel();
pgd_free(mm->pgd);
+ unlock_kernel();
destroy_context(mm);
free_mm(mm);
}
diff -urN linux-2.4.19-16mdk/kernel/ksyms.c linux-rtai/kernel/ksyms.c
--- linux-2.4.19-16mdk/kernel/ksyms.c 2002-09-20 23:44:50.000000000 +1000
+++ linux-rtai/kernel/ksyms.c 2003-01-05 09:46:00.000000000 +1100
-at--at- -618,6 +618,45 -at--at-
EXPORT_SYMBOL(tasklist_lock);
EXPORT_SYMBOL(pidhash);
+/*
+ + * used to inform rtai a task is about to die.
+ + */
+extern int set_rtai_callback( void (*fun)(struct task_struct *tsk));
+extern void remove_rtai_callback(void (*fun)(struct task_struct *tsk));
+extern NORET_TYPE void do_exit(long code);
+EXPORT_SYMBOL(set_rtai_callback);
+EXPORT_SYMBOL(remove_rtai_callback);
+EXPORT_SYMBOL(do_exit);
+
+/*
+ + * used to inform RTAI LXRT a task should deal with a Linux signal, and for rt_lxrt_fork()
+ + */
+extern int (*rtai_signal_handler)(struct task_struct *lnxt, int sig);
+EXPORT_SYMBOL(rtai_signal_handler);
+extern int do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size);
+EXPORT_SYMBOL(do_fork);
+
+/*
+ + * used to provide async io support (aio) to RTAI LXRT.
+ + */
+extern ssize_t sys_read(unsigned int fd, char * buf, size_t count);
+extern ssize_t sys_write(unsigned int fd, const char * buf, size_t count);
+extern ssize_t sys_pread(unsigned int fd, char * buf, size_t count, loff_t pos);
+extern ssize_t sys_pwrite(unsigned int fd, const char * buf, size_t count, loff_t pos);
+extern long sys_fsync(unsigned int fd);
+extern long sys_fdatasync(unsigned int fd);
+extern long sys_open(const char * filename, int flags, int mode);
+extern long sys_close(unsigned int fd);
+
+EXPORT_SYMBOL(sys_read);
+EXPORT_SYMBOL(sys_write);
+EXPORT_SYMBOL(sys_open);
+//EXPORT_SYMBOL(sys_close);
+EXPORT_SYMBOL(sys_pread);
+EXPORT_SYMBOL(sys_pwrite);
+EXPORT_SYMBOL(sys_fsync);
+EXPORT_SYMBOL(sys_fdatasync);
+
/* debug */
EXPORT_SYMBOL(dump_stack);
diff -urN linux-2.4.19-16mdk/kernel/sched.c linux-rtai/kernel/sched.c
--- linux-2.4.19-16mdk/kernel/sched.c 2002-09-20 23:44:50.000000000 +1000
+++ linux-rtai/kernel/sched.c 2003-01-04 13:09:42.000000000 +1100
-at--at- -549,6 +549,68 -at--at-
* tasks can run. It can not be killed, and it cannot sleep. The 'state'
* information in task[0] is never used.
*/
+
+int idle_weight = -1000;
+#define MAX_MM 4096 // How large should it be?
+static struct smm_t { int in, out; struct mm_struct *mm[MAX_MM]; } smm[NR_CPUS];
+#define incpnd(x) do { x = (x + 1) & (MAX_MM - 1); } while(0)
+
+#ifdef CONFIG_X86
+static inline void pend_mm(struct mm_struct *mm, int cpu)
+{
+ if (rthal.lxrt_global_cli) {
+ struct smm_t *p = smm + cpu;
+ p->mm[p->in] = mm;
+ incpnd(p->in);
+ } else {
+ mmdrop(mm);
+ }
+}
+
+static inline void drop_mm(void)
+{
+ if (rthal.lxrt_global_cli) {
+ struct smm_t *p = smm + smp_processor_id();
+ while (p->out != p->in) {
+ mmdrop(p->mm[p->out]);
+ incpnd(p->out);
+ }
+ }
+}
+
+void switch_mem(struct task_struct *prevp, struct task_struct *nextp, int cpuid)
+{
+ if (cpuid < 0) {
+ struct mm_struct *next = nextp->active_mm;
+ if (prevp->active_mm != next || (cpuid & 0x40000000)) {
+ if ((prevp->active_mm)->context.segments != next->context.segments) {
+ load_LDT(next);
+ }
+ set_bit(cpuid & 0xFFFFFFF, &next->context.cpuvalid);
+ asm volatile("movl %0,%%cr3": :"r" (__pa(next->pgd)));
+ }
+#ifdef CONFIG_SMP
+ else if (!test_and_set_bit(cpuid & 0xFFFFFFF, &next->context.cpuvalid)) {
+ load_LDT(next);
+ }
+#endif
+ } else {
+ struct mm_struct *oldmm = prevp->active_mm;
+ switch_mm(oldmm, nextp->active_mm, nextp, cpuid);
+ if (!nextp->mm) {
+ enter_lazy_tlb(oldmm, nextp, cpuid);
+ }
+ }
+}
+#else
+static inline void pend_mm(struct mm_struct *mm, int cpu)
+{
+ mmdrop(mm);
+}
+static inline void drop_mm(void) {}
+void switch_mem(struct task_struct *prevp, struct task_struct *nextp, int cpuid) {}
+#endif
+
asmlinkage void schedule(void)
{
struct schedule_data * sched_data;
-at--at- -607,7 +669,7 -at--at-
* Default process to select..
*/
next = idle_task(this_cpu);
- c = -1000;
+ c = idle_weight;
list_for_each(tmp, &runqueue_head) {
p = list_entry(tmp, struct task_struct, run_list);
if (can_schedule(p, this_cpu)) {
-at--at- -689,7 +751,7 -at--at-
if (!prev->mm) {
prev->active_mm = NULL;
- mmdrop(oldmm);
+ pend_mm(oldmm, this_cpu);
}
}
-at--at- -698,6 +760,7 -at--at-
* stack.
*/
switch_to(prev, next, prev);
+ drop_mm();
__schedule_tail(prev);
same_process:
diff -urN linux-2.4.19-16mdk/kernel/signal.c linux-rtai/kernel/signal.c
--- linux-2.4.19-16mdk/kernel/signal.c 2002-09-20 23:44:50.000000000 +1000
+++ linux-rtai/kernel/signal.c 2003-01-04 13:09:42.000000000 +1100
-at--at- -1091,9 +1091,30 -at--at-
return ret;
}
+//
+// Add this pointer to the RTAI signal handler.
+//
+int (*rtai_signal_handler)(struct task_struct *lnxt, int sig);
+
asmlinkage long
sys_kill(int pid, int sig)
{
+// Add this section to call the RTAI signal handler.
+//
+ {
+ struct task_struct *p;
+ int ret;
+
+ if (rtai_signal_handler) {
+ p = find_task_by_pid(pid);
+ if(p && (p->policy == SCHED_FIFO || p->policy == SCHED_RR) && p->this_rt_task[0]) {
+ ret = rtai_signal_handler(p, sig);
+ if(!ret) return 0; //let Linux deal with it.
+ }
+ }
+ }
+
+ {
struct siginfo info;
info.si_signo = sig;
-at--at- -1103,6 +1124,7 -at--at-
info.si_uid = current->uid;
return kill_something_info(sig, &info, pid);
+ }
}
/*
diff -urN linux-2.4.19-16mdk/Makefile linux-rtai/Makefile
--- linux-2.4.19-16mdk/Makefile 2002-09-21 03:04:04.000000000 +1000
+++ linux-rtai/Makefile 2003-01-04 13:12:03.000000000 +1100
-at--at- -1,7 +1,7 -at--at-
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 19
-EXTRAVERSION = -16mdkcustom
+EXTRAVERSION = -rthal5
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
diff -urN linux-2.4.19-16mdk/mm/vmalloc.c linux-rtai/mm/vmalloc.c
--- linux-2.4.19-16mdk/mm/vmalloc.c 2002-09-20 23:44:45.000000000 +1000
+++ linux-rtai/mm/vmalloc.c 2003-01-04 13:09:42.000000000 +1100
-at--at- -162,6 +162,9 -at--at-
spin_lock(&init_mm.page_table_lock);
do {
pmd_t *pmd;
+#ifdef CONFIG_X86
+ pgd_t olddir = *dir;
+#endif
pmd = pmd_alloc(&init_mm, dir, address);
ret = -ENOMEM;
-at--at- -171,6 +174,10 -at--at-
ret = -ENOMEM;
if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot, pages))
break;
+#ifdef CONFIG_X86
+ if (pgd_val(olddir) != pgd_val(*dir))
+ set_pgdir(address, *dir);
+#endif
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
This is a digitally signed message part