arch/i386/kernel/irq.c
598 /*
599 * linux/arch/i386/kernel/irq.c
600 *
601 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
602 *
603 * This file contains the code used by various IRQ
604 * handling routines: asking for different IRQ's should
605 * be done through these routines instead of just
606 * grabbing them. Thus setups with different IRQ numbers
607 * shouldn't result in any weird surprises, and
608 * installing new handlers should be easier. */
609
610 /* IRQs are in fact implemented a bit like signal
611 * handlers for the kernel. Naturally it's not a 1:1
612 * relation, but there are similarities. */
613
614 #include <linux/config.h>
615 #include <linux/ptrace.h>
616 #include <linux/errno.h>
617 #include <linux/kernel_stat.h>
618 #include <linux/signal.h>
619 #include <linux/sched.h>
620 #include <linux/ioport.h>
621 #include <linux/interrupt.h>
622 #include <linux/timex.h>
623 #include <linux/malloc.h>
624 #include <linux/random.h>
625 #include <linux/smp.h>
626 #include <linux/tasks.h>
627 #include <linux/smp_lock.h>
628 #include <linux/init.h>
629
630 #include <asm/system.h>
631 #include <asm/io.h>
632 #include <asm/irq.h>
633 #include <asm/bitops.h>
634 #include <asm/smp.h>
635 #include <asm/pgtable.h>
636 #include <asm/delay.h>
637 #include <asm/desc.h>
638
639 #include "irq.h"
640
641 unsigned int local_bh_count[NR_CPUS];
642 unsigned int local_irq_count[NR_CPUS];
643
644 atomic_t nmi_counter;
645
646 /* Linux has a controller-independent x86 interrupt
647 * architecture. every controller has a
648 * 'controller-template', that is used by the main code
649 * to do the right thing. Each driver-visible interrupt
650 * source is transparently wired to the apropriate
651 * controller. Thus drivers need not be aware of the
652 * interrupt-controller.
653 *
654 * Various interrupt controllers we handle: 8259 PIC, SMP
655 * IO-APIC, PIIX4's internal 8259 PIC and SGI's Visual
656 * Workstation Cobalt (IO-)APIC. (IO-APICs assumed to be
657 * messaging to Pentium local-APICs)
658 *
659 * the code is designed to be easily extended with
660 * new/different interrupt controllers, without having to
661 * do assembly magic. */
662
663 /* Micro-access to controllers is serialized over the
664 * whole system. We never hold this lock when we call the
665 * actual IRQ handler. */
666 spinlock_t irq_controller_lock;
667
668 /* Dummy controller type for unused interrupts */
669 static void do_none(unsigned int irq,
670 struct pt_regs * regs)
671 {
672 /* we are careful. While for ISA irqs it's common to
673 * happen outside of any driver (think autodetection),
674 * this is not at all nice for PCI interrupts. So we
675 * are stricter and print a warning when such spurious
676 * interrupts happen. Spurious interrupts can confuse
677 * other drivers if the PCI IRQ line is shared.
678 *
679 * Such spurious interrupts are either driver bugs, or
680 * sometimes hw (chipset) bugs. */
681 printk("unexpected IRQ vector %d on CPU#%d!\n",
682 irq, smp_processor_id());
683
684 #ifdef __SMP__
685 /* [currently unexpected vectors happen only on SMP and
686 * APIC. if we want to have non-APIC and non-8259A
687 * controllers in the future with unexpected vectors,
688 * this ack should probably be made
689 * controller-specific.] */
690 ack_APIC_irq();
691 #endif
692 }
693 static void enable_none(unsigned int irq) { }
694 static void disable_none(unsigned int irq) { }
695
696 /* startup is the same as "enable", shutdown is same as
697 * "disable" */
698 #define startup_none enable_none
699 #define shutdown_none disable_none
700
701 struct hw_interrupt_type no_irq_type = {
702 "none",
703 startup_none,
704 shutdown_none,
705 do_none,
706 enable_none,
707 disable_none
708 };
709
710 /* This is the 'legacy' 8259A Programmable Interrupt
711 * Controller, present in the majority of PC/AT boxes. */
712
713 static void do_8259A_IRQ(unsigned int irq,
714 struct pt_regs * regs);
715 static void enable_8259A_irq(unsigned int irq);
716 void disable_8259A_irq(unsigned int irq);
717
718 /* startup is the same as "enable", shutdown is same as
719 * "disable" */
720 #define startup_8259A_irq enable_8259A_irq
721 #define shutdown_8259A_irq disable_8259A_irq
722
723 static struct hw_interrupt_type i8259A_irq_type = {
724 "XT-PIC",
725 startup_8259A_irq,
726 shutdown_8259A_irq,
727 do_8259A_IRQ,
728 enable_8259A_irq,
729 disable_8259A_irq
730 };
731
732 /* Controller mappings for all interrupt sources: */
733 irq_desc_t irq_desc[NR_IRQS] = { [0 ... NR_IRQS-1] =
734 { 0, &no_irq_type, } };
735
736
737 /* 8259A PIC functions to handle ISA devices: */
738
739 /* This contains the irq mask for both 8259A irq
740 * controllers, */
741 static unsigned int cached_irq_mask = 0xffff;
742
743 #define __byte(x,y) (((unsigned char *)&(y))[x])
744 #define __word(x,y) (((unsigned short *)&(y))[x])
745 #define __long(x,y) (((unsigned int *)&(y))[x])
746
747 #define cached_21 (__byte(0,cached_irq_mask))
748 #define cached_A1 (__byte(1,cached_irq_mask))
749
750 /* Not all IRQs can be routed through the IO-APIC, eg. on
751 * certain (older) boards the timer interrupt is not
752 * connected to any IO-APIC pin, it's fed to the CPU IRQ
753 * line directly.
754 *
755 * Any '1' bit in this mask means the IRQ is routed
756 * through the IO-APIC. this 'mixed mode' IRQ handling
757 * costs nothing because it's only used at IRQ setup
758 * time. */
759 unsigned long io_apic_irqs = 0;
760
761 /* These have to be protected by the irq controller
762 * spinlock before being called. */
763 void disable_8259A_irq(unsigned int irq)
764 {
765 unsigned int mask = 1 << irq;
766 cached_irq_mask |= mask;
767 if (irq & 8) {
768 outb(cached_A1,0xA1);
769 } else {
770 outb(cached_21,0x21);
771 }
772 }
773
774 static void enable_8259A_irq(unsigned int irq)
775 {
776 unsigned int mask = ~(1 << irq);
777 cached_irq_mask &= mask;
778 if (irq & 8) {
779 outb(cached_A1,0xA1);
780 } else {
781 outb(cached_21,0x21);
782 }
783 }
784
785 int i8259A_irq_pending(unsigned int irq)
786 {
787 unsigned int mask = 1<<irq;
788
789 if (irq < 8)
790 return (inb(0x20) & mask);
791 return (inb(0xA0) & (mask >> 8));
792 }
793
794 void make_8259A_irq(unsigned int irq)
795 {
796 disable_irq(irq);
797 __long(0,io_apic_irqs) &= ~(1<<irq);
798 irq_desc[irq].handler = &i8259A_irq_type;
799 enable_irq(irq);
800 }
801
802 /* Careful! The 8259A is a fragile beast, it pretty much
803 * _has_ to be done exactly like this (mask it first,
804 * _then_ send the EOI, and the order of EOI to the two
805 * 8259s is important! */
806 static inline void mask_and_ack_8259A(unsigned int irq)
807 {
808 cached_irq_mask |= 1 << irq;
809 if (irq & 8) {
810 inb(0xA1); /* DUMMY */
811 outb(cached_A1,0xA1);
812 outb(0x62,0x20); /* Specific EOI to cascade */
813 outb(0x20,0xA0);
814 } else {
815 inb(0x21); /* DUMMY */
816 outb(cached_21,0x21);
817 outb(0x20,0x20);
818 }
819 }
820
821 static void do_8259A_IRQ(unsigned int irq,
822 struct pt_regs * regs)
823 {
824 struct irqaction * action;
825 irq_desc_t *desc = irq_desc + irq;
826
827 spin_lock(&irq_controller_lock);
828 {
829 unsigned int status;
830 mask_and_ack_8259A(irq);
831 status = desc->status & ~IRQ_REPLAY;
832 action = NULL;
833 if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))
834 action = desc->action;
835 desc->status = status | IRQ_INPROGRESS;
836 }
837 spin_unlock(&irq_controller_lock);
838
839 /* Exit early if we had no action or it was disabled */
840 if (!action)
841 return;
842
843 handle_IRQ_event(irq, regs, action);
844
845 spin_lock(&irq_controller_lock);
846 {
847 unsigned int status = desc->status & ~IRQ_INPROGRESS;
848 desc->status = status;
849 if (!(status & IRQ_DISABLED))
850 enable_8259A_irq(irq);
851 }
852 spin_unlock(&irq_controller_lock);
853 }
854
855 /* This builds up the IRQ handler stubs using some ugly
856 * macros in irq.h
857 *
858 * These macros create the low-level assembly IRQ
859 * routines that save register context and call do_IRQ().
860 * do_IRQ() then does all the operations that are needed
861 * to keep the AT (or SMP IOAPIC) interrupt-controller
862 * happy. */
863
864 BUILD_COMMON_IRQ()
865
866 #define BI(x,y) \
867 BUILD_IRQ(##x##y)
868
869 #define BUILD_16_IRQS(x) \
870 BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
871 BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
872 BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
873 BI(x,c) BI(x,d) BI(x,e) BI(x,f)
874
875 /* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC)
876 * interrupts: (these are usually mapped to vectors
877 * 0x20-0x30) */
878 BUILD_16_IRQS(0x0)
879
880 #ifdef CONFIG_X86_IO_APIC
881 /* The IO-APIC gives us many more interrupt sources. Most
882 * of these are unused but an SMP system is supposed to
883 * have enough memory ... sometimes (mostly wrt. hw
884 * bugs) we get corrupted vectors all across the
885 * spectrum, so we really want to be prepared to get all
886 * of these. Plus, more powerful systems might have more
887 * than 64 IO-APIC registers.
888 *
889 * (these are usually mapped into the 0x30-0xff vector
890 * range) */
891 BUILD_16_IRQS(0x1) BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3)
892 BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6)
893 BUILD_16_IRQS(0x7) BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9)
894 BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb) BUILD_16_IRQS(0xc)
895 BUILD_16_IRQS(0xd)
896 #endif
897
898 #undef BUILD_16_IRQS
899 #undef BI
900
901
902 #ifdef __SMP__
903 /* The following vectors are part of the Linux
904 * architecture, there is no hardware IRQ pin equivalent
905 * for them, they are triggered through the ICC by us
906 * (IPIs) */
907 BUILD_SMP_INTERRUPT(reschedule_interrupt)
908 BUILD_SMP_INTERRUPT(invalidate_interrupt)
909 BUILD_SMP_INTERRUPT(stop_cpu_interrupt)
910 BUILD_SMP_INTERRUPT(mtrr_interrupt)
911 BUILD_SMP_INTERRUPT(spurious_interrupt)
912
913 /* every pentium local APIC has two 'local interrupts',
914 * with a soft-definable vector attached to both
915 * interrupts, one of which is a timer interrupt, the
916 * other one is error counter overflow. Linux uses the
917 * local APIC timer interrupt to get a much simpler SMP
918 * time architecture: */
919 BUILD_SMP_TIMER_INTERRUPT(apic_timer_interrupt)
920
921 #endif
922
923 #define IRQ(x,y) \
924 IRQ##x##y##_interrupt
925
926 #define IRQLIST_16(x) \
927 IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
928 IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
929 IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
930 IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
931
932 static void (*interrupt[NR_IRQS])(void) = {
933 IRQLIST_16(0x0),
934
935 #ifdef CONFIG_X86_IO_APIC
936 IRQLIST_16(0x1), IRQLIST_16(0x2), IRQLIST_16(0x3),
937 IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6),
938 IRQLIST_16(0x7), IRQLIST_16(0x8), IRQLIST_16(0x9),
939 IRQLIST_16(0xa), IRQLIST_16(0xb), IRQLIST_16(0xc),
940 IRQLIST_16(0xd)
941 #endif
942 };
943
944 #undef IRQ
945 #undef IRQLIST_16
946
947
948 /* Special irq handlers. */
949
950 void no_action(int cpl, void *dev_id,
951 struct pt_regs *regs)
952 {}
953
954 #ifndef CONFIG_VISWS
955 /* Note that on a 486, we don't want to do a SIGFPE on an
956 * irq13 as the irq is unreliable, and exception 16 works
957 * correctly (ie as explained in the intel
958 * literature). On a 386, you can't use exception 16 due
959 * to bad IBM design, so we have to rely on the less
960 * exact irq13.
961 *
962 * Careful.. Not only is IRQ13 unreliable, but it is also
963 * leads to races. IBM designers who came up with it
964 * should be shot. */
965 static void math_error_irq(int cpl, void *dev_id,
966 struct pt_regs *regs)
967 {
968 outb(0,0xF0);
969 if (ignore_irq13 || !boot_cpu_data.hard_math)
970 return;
971 math_error();
972 }
973
974 static struct irqaction irq13 =
975 { math_error_irq, 0, 0, "fpu", NULL, NULL };
976
977 /* IRQ2 is cascade interrupt to second interrupt
978 * controller */
979 static struct irqaction irq2 =
980 { no_action, 0, 0, "cascade", NULL, NULL};
981 #endif
982
983 /* Generic, controller-independent functions: */
984
985 int get_irq_list(char *buf)
986 {
987 int i, j;
988 struct irqaction * action;
989 char *p = buf;
990
991 p += sprintf(p, " ");
992 for (j=0; j<smp_num_cpus; j++)
993 p += sprintf(p, "CPU%d ",j);
994 *p++ = '\n';
995
996 for (i = 0 ; i < NR_IRQS ; i++) {
997 action = irq_desc[i].action;
998 if (!action)
999 continue;
1000 p += sprintf(p, "%3d: ",i);
1001 #ifndef __SMP__
1002 p += sprintf(p, "%10u ", kstat_irqs(i));
1003 #else
1004 for (j=0; j<smp_num_cpus; j++)
1005 p += sprintf(p, "%10u ",
1006 kstat.irqs[cpu_logical_map(j)][i]);
1007 #endif
1008 p += sprintf(p, " %14s",
1009 irq_desc[i].handler->typename);
1010 p += sprintf(p, " %s", action->name);
1011
1012 for (action=action->next; action;
1013 action = action->next) {
1014 p += sprintf(p, ", %s", action->name);
1015 }
1016 *p++ = '\n';
1017 }
1018 p += sprintf(p, "NMI: %10u\n",
1019 atomic_read(&nmi_counter));
1020 #ifdef __SMP__
1021 p += sprintf(p, "ERR: %10lu\n", ipi_count);
1022 #endif
1023 return p - buf;
1024 }
1025
1026 /* Global interrupt locks for SMP. Allow interrupts to
1027 * come in on any CPU, yet make cli/sti act globally to
1028 * protect critical regions.. */
1029 #ifdef __SMP__
1030 unsigned char global_irq_holder = NO_PROC_ID;
1031 unsigned volatile int global_irq_lock;
1032 atomic_t global_irq_count;
1033
1034 atomic_t global_bh_count;
1035 atomic_t global_bh_lock;
1036
1037 /* "global_cli()" is a special case, in that it can hold
1038 * the interrupts disabled for a longish time, and also
1039 * because we may be doing TLB invalidates when holding
1040 * the global IRQ lock for historical reasons. Thus we
1041 * may need to check SMP invalidate events specially by
1042 * hand here (but not in any normal spinlocks) */
1043 static inline void check_smp_invalidate(int cpu)
1044 {
1045 if (test_bit(cpu, &smp_invalidate_needed)) {
1046 clear_bit(cpu, &smp_invalidate_needed);
1047 local_flush_tlb();
1048 }
1049 }
1050
1051 static void show(char * str)
1052 {
1053 int i;
1054 unsigned long *stack;
1055 int cpu = smp_processor_id();
1056 extern char *get_options(char *str, int *ints);
1057
1058 printk("\n%s, CPU %d:\n", str, cpu);
1059 printk("irq: %d [%d %d]\n",
1060 atomic_read(&global_irq_count), local_irq_count[0],
1061 local_irq_count[1]);
1062 printk("bh: %d [%d %d]\n",
1063 atomic_read(&global_bh_count), local_bh_count[0],
1064 local_bh_count[1]);
1065 stack = (unsigned long *) &stack;
1066 for (i = 40; i ; i--) {
1067 unsigned long x = *++stack;
1068 if (x > (unsigned long) &get_options &&
1069 x < (unsigned long) &vsprintf) {
1070 printk("<[%08lx]> ", x);
1071 }
1072 }
1073 }
1074
1075 #define MAXCOUNT 100000000
1076
1077 static inline void wait_on_bh(void)
1078 {
1079 int count = MAXCOUNT;
1080 do {
1081 if (!--count) {
1082 show("wait_on_bh");
1083 count = ~0;
1084 }
1085 /* nothing .. wait for the other bh's to go away */
1086 } while (atomic_read(&global_bh_count) != 0);
1087 }
1088
1089 /* I had a lockup scenario where a tight loop doing
1090 * spin_unlock()/spin_lock() on CPU#1 was racing with
1091 * spin_lock() on CPU#0. CPU#0 should have noticed
1092 * spin_unlock(), but apparently the spin_unlock()
1093 * information did not make it through to CPU#0
1094 * ... nasty, is this by design, do we have to limit
1095 * 'memory update oscillation frequency' artificially
1096 * like here?
1097 *
1098 * Such 'high frequency update' races can be avoided by
1099 * careful design, but some of our major constructs like
1100 * spinlocks use similar techniques, it would be nice to
1101 * clarify this issue. Set this define to 0 if you want
1102 * to check whether your system freezes. I suspect the
1103 * delay done by SYNC_OTHER_CORES() is in correlation
1104 * with 'snooping latency', but i thought that such
1105 * things are guaranteed by design, since we use the
1106 * 'LOCK' prefix. */
1107 #define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 1
1108
1109 #if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
1110 # define SYNC_OTHER_CORES(x) udelay(x+1)
1111 #else
1112 /* We have to allow irqs to arrive between __sti and
1113 * __cli */
1114 # define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
1115 #endif
1116
1117 static inline void wait_on_irq(int cpu)
1118 {
1119 int count = MAXCOUNT;
1120
1121 for (;;) {
1122
1123 /* Wait until all interrupts are gone. Wait for
1124 * bottom half handlers unless we're already
1125 * executing in one.. */
1126 if (!atomic_read(&global_irq_count)) {
1127 if (local_bh_count[cpu] ||
1128 !atomic_read(&global_bh_count))
1129 break;
1130 }
1131
1132 /* Duh, we have to loop. Release the lock to avoid
1133 * deadlocks */
1134 clear_bit(0, &global_irq_lock);
1135
1136 for (;;) {
1137 if (!--count) {
1138 show("wait_on_irq");
1139 count = ~0;
1140 }
1141 __sti();
1142 SYNC_OTHER_CORES(cpu);
1143 __cli();
1144 check_smp_invalidate(cpu);
1145 if (atomic_read(&global_irq_count))
1146 continue;
1147 if (global_irq_lock)
1148 continue;
1149 if (!local_bh_count[cpu] &&
1150 atomic_read(&global_bh_count))
1151 continue;
1152 if (!test_and_set_bit(0,&global_irq_lock))
1153 break;
1154 }
1155 }
1156 }
1157
1158 /* This is called when we want to synchronize with bottom
1159 * half handlers. We need to wait until no other CPU is
1160 * executing any bottom half handler.
1161 *
1162 * Don't wait if we're already running in an interrupt
1163 * context or are inside a bh handler. */
1164 void synchronize_bh(void)
1165 {
1166 if (atomic_read(&global_bh_count) && !in_interrupt())
1167 wait_on_bh();
1168 }
1169
1170 /* This is called when we want to synchronize with
1171 * interrupts. We may for example tell a device to stop
1172 * sending interrupts: but to make sure there are no
1173 * interrupts that are executing on another CPU we need
1174 * to call this function. */
1175 void synchronize_irq(void)
1176 {
1177 if (atomic_read(&global_irq_count)) {
1178 /* Stupid approach */
1179 cli();
1180 sti();
1181 }
1182 }
1183
1184 static inline void get_irqlock(int cpu)
1185 {
1186 if (test_and_set_bit(0,&global_irq_lock)) {
1187 /* do we already hold the lock? */
1188 if ((unsigned char) cpu == global_irq_holder)
1189 return;
1190 /* Uhhuh.. Somebody else got it. Wait.. */
1191 do {
1192 do {
1193 check_smp_invalidate(cpu);
1194 } while (test_bit(0,&global_irq_lock));
1195 } while (test_and_set_bit(0,&global_irq_lock));
1196 }
1197 /* We also to make sure that nobody else is running in
1198 * an interrupt context. */
1199 wait_on_irq(cpu);
1200
1201 /* Ok, finally.. */
1202 global_irq_holder = cpu;
1203 }
1204
1205 #define EFLAGS_IF_SHIFT 9
1206
1207 /* A global "cli()" while in an interrupt context turns
1208 * into just a local cli(). Interrupts should use
1209 * spinlocks for the (very unlikely) case that they ever
1210 * want to protect against each other.
1211 *
1212 * If we already have local interrupts disabled, this
1213 * will not turn a local disable into a global one
1214 * (problems with spinlocks: this makes
1215 * save_flags+cli+sti usable inside a spinlock). */
1216 void __global_cli(void)
1217 {
1218 unsigned int flags;
1219
1220 __save_flags(flags);
1221 if (flags & (1 << EFLAGS_IF_SHIFT)) {
1222 int cpu = smp_processor_id();
1223 __cli();
1224 if (!local_irq_count[cpu])
1225 get_irqlock(cpu);
1226 }
1227 }
1228
1229 void __global_sti(void)
1230 {
1231 int cpu = smp_processor_id();
1232
1233 if (!local_irq_count[cpu])
1234 release_irqlock(cpu);
1235 __sti();
1236 }
1237
1238 /* SMP flags value to restore to:
1239 * 0 - global cli
1240 * 1 - global sti
1241 * 2 - local cli
1242 * 3 - local sti */
1243 unsigned long __global_save_flags(void)
1244 {
1245 int retval;
1246 int local_enabled;
1247 unsigned long flags;
1248
1249 __save_flags(flags);
1250 local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
1251 /* default to local */
1252 retval = 2 + local_enabled;
1253
1254 /*check for global flags if we're not in an interrupt*/
1255 if (!local_irq_count[smp_processor_id()]) {
1256 if (local_enabled)
1257 retval = 1;
1258 if (global_irq_holder ==
1259 (unsigned char) smp_processor_id())
1260 retval = 0;
1261 }
1262 return retval;
1263 }
1264
1265 void __global_restore_flags(unsigned long flags)
1266 {
1267 switch (flags) {
1268 case 0:
1269 __global_cli();
1270 break;
1271 case 1:
1272 __global_sti();
1273 break;
1274 case 2:
1275 __cli();
1276 break;
1277 case 3:
1278 __sti();
1279 break;
1280 default:
1281 printk("global_restore_flags: %08lx (%08lx)\n",
1282 flags, (&flags)[-1]);
1283 }
1284 }
1285
1286 #endif
1287
1288 /* This should really return information about whether we
1289 * should do bottom half handling etc. Right now we end
1290 * up _always_ checking the bottom half, which is a waste
1291 * of time and is not what some drivers would prefer. */
1292 int handle_IRQ_event(unsigned int irq,
1293 struct pt_regs * regs, struct irqaction * action)
1294 {
1295 int status;
1296 int cpu = smp_processor_id();
1297
1298 irq_enter(cpu, irq);
1299
1300 status = 1; /* Force the "do bottom halves" bit */
1301
1302 if (!(action->flags & SA_INTERRUPT))
1303 __sti();
1304
1305 do {
1306 status |= action->flags;
1307 action->handler(irq, action->dev_id, regs);
1308 action = action->next;
1309 } while (action);
1310 if (status & SA_SAMPLE_RANDOM)
1311 add_interrupt_randomness(irq);
1312 __cli();
1313
1314 irq_exit(cpu, irq);
1315
1316 return status;
1317 }
1318
1319 /* Generic enable/disable code: this just calls down into
1320 * the PIC-specific version for the actual hardware
1321 * disable after having gotten the irq controller lock.
1322 */
1323 void disable_irq(unsigned int irq)
1324 {
1325 unsigned long flags;
1326
1327 spin_lock_irqsave(&irq_controller_lock, flags);
1328 if (!irq_desc[irq].depth++) {
1329 irq_desc[irq].status |= IRQ_DISABLED;
1330 irq_desc[irq].handler->disable(irq);
1331 }
1332 spin_unlock_irqrestore(&irq_controller_lock, flags);
1333
1334 if (irq_desc[irq].status & IRQ_INPROGRESS)
1335 synchronize_irq();
1336 }
1337
1338 void enable_irq(unsigned int irq)
1339 {
1340 unsigned long flags;
1341
1342 spin_lock_irqsave(&irq_controller_lock, flags);
1343 switch (irq_desc[irq].depth) {
1344 case 1:
1345 irq_desc[irq].status &= ~(IRQ_DISABLED |
1346 IRQ_INPROGRESS);
1347 irq_desc[irq].handler->enable(irq);
1348 /* fall throught */
1349 default:
1350 irq_desc[irq].depth--;
1351 break;
1352 case 0:
1353 printk("enable_irq() unbalanced from %p\n",
1354 __builtin_return_address(0));
1355 }
1356 spin_unlock_irqrestore(&irq_controller_lock, flags);
1357 }
1358
1359 /* do_IRQ handles all normal device IRQ's (the special
1360 * SMP cross-CPU interrupts have their own specific
1361 * handlers). */
1362 asmlinkage void do_IRQ(struct pt_regs regs)
1363 {
1364 /* We ack quickly, we don't want the irq controller
1365 * thinking we're snobs just because some other CPU has
1366 * disabled global interrupts (we have already done the
1367 * INT_ACK cycles, it's too late to try to pretend to
1368 * the controller that we aren't taking the interrupt).
1369 *
1370 * 0 return value means that this irq is already being
1371 * handled by some other CPU. (or is disabled) */
1372 int irq = regs.orig_eax & 0xff; /* subtle, see irq.h */
1373 int cpu = smp_processor_id();
1374
1375 kstat.irqs[cpu][irq]++;
1376 irq_desc[irq].handler->handle(irq, ®s);
1377
1378 /* This should be conditional: we should really get a
1379 * return code from the irq handler to tell us whether
1380 * the handler wants us to do software bottom half
1381 * handling or not.. */
1382 if (1) {
1383 if (bh_active & bh_mask)
1384 do_bottom_half();
1385 }
1386 }
1387
1388 int setup_x86_irq(unsigned int irq,
1389 struct irqaction * new)
1390 {
1391 int shared = 0;
1392 struct irqaction *old, **p;
1393 unsigned long flags;
1394
1395 /* Some drivers like serial.c use request_irq()
1396 * heavily, so we have to be careful not to interfere
1397 * with a running system. */
1398 if (new->flags & SA_SAMPLE_RANDOM) {
1399 /* This function might sleep, we want to call it
1400 * first, outside of the atomic block. Yes, this
1401 * might clear the entropy pool if the wrong driver
1402 * is attempted to be loaded, without actually
1403 * installing a new handler, but is this really a
1404 * problem, only the sysadmin is able to do this. */
1405 rand_initialize_irq(irq);
1406 }
1407
1408 /* The following block of code has to be executed
1409 * atomically */
1410 spin_lock_irqsave(&irq_controller_lock,flags);
1411 p = &irq_desc[irq].action;
1412 if ((old = *p) != NULL) {
1413 /* Can't share interrupts unless both agree to */
1414 if (!(old->flags & new->flags & SA_SHIRQ)) {
1415 spin_unlock_irqrestore(&irq_controller_lock,flags);
1416 return -EBUSY;
1417 }
1418
1419 /* add new interrupt at end of irq queue */
1420 do {
1421 p = &old->next;
1422 old = *p;
1423 } while (old);
1424 shared = 1;
1425 }
1426
1427 *p = new;
1428
1429 if (!shared) {
1430 irq_desc[irq].depth = 0;
1431 irq_desc[irq].status &= ~(IRQ_DISABLED |
1432 IRQ_INPROGRESS);
1433 irq_desc[irq].handler->startup(irq);
1434 }
1435 spin_unlock_irqrestore(&irq_controller_lock,flags);
1436 return 0;
1437 }
1438
1439 int request_irq(unsigned int irq,
1440 void (*handler)(int, void *, struct pt_regs *),
1441 unsigned long irqflags,
1442 const char * devname,
1443 void *dev_id)
1444 {
1445 int retval;
1446 struct irqaction * action;
1447
1448 if (irq >= NR_IRQS)
1449 return -EINVAL;
1450 if (!handler)
1451 return -EINVAL;
1452
1453 action = (struct irqaction *)
1454 kmalloc(sizeof(struct irqaction), GFP_KERNEL);
1455 if (!action)
1456 return -ENOMEM;
1457
1458 action->handler = handler;
1459 action->flags = irqflags;
1460 action->mask = 0;
1461 action->name = devname;
1462 action->next = NULL;
1463 action->dev_id = dev_id;
1464
1465 retval = setup_x86_irq(irq, action);
1466
1467 if (retval)
1468 kfree(action);
1469 return retval;
1470 }
1471
1472 void free_irq(unsigned int irq, void *dev_id)
1473 {
1474 struct irqaction * action, **p;
1475 unsigned long flags;
1476
1477 if (irq >= NR_IRQS)
1478 return;
1479
1480 spin_lock_irqsave(&irq_controller_lock,flags);
1481 for (p = &irq_desc[irq].action;
1482 (action = *p) != NULL; p = &action->next) {
1483 if (action->dev_id != dev_id)
1484 continue;
1485
1486 /* Found it - now free it */
1487 *p = action->next;
1488 kfree(action);
1489 if (!irq_desc[irq].action) {
1490 irq_desc[irq].status |= IRQ_DISABLED;
1491 irq_desc[irq].handler->shutdown(irq);
1492 }
1493 goto out;
1494 }
1495 printk("Trying to free free IRQ%d\n",irq);
1496 out:
1497 spin_unlock_irqrestore(&irq_controller_lock,flags);
1498 }
1499
1500 /* IRQ autodetection code..
1501 *
1502 * This depends on the fact that any interrupt that comes
1503 * in on to an unassigned handler will get stuck with
1504 * "IRQ_INPROGRESS" asserted and the interrupt disabled.
1505 */
1506 unsigned long probe_irq_on(void)
1507 {
1508 unsigned int i;
1509 unsigned long delay;
1510
1511 /* first, enable any unassigned irqs */
1512 spin_lock_irq(&irq_controller_lock);
1513 for (i = NR_IRQS-1; i > 0; i--) {
1514 if (!irq_desc[i].action) {
1515 unsigned int status =
1516 irq_desc[i].status | IRQ_AUTODETECT;
1517 irq_desc[i].status = status & ~IRQ_INPROGRESS;
1518 irq_desc[i].handler->startup(i);
1519 }
1520 }
1521 spin_unlock_irq(&irq_controller_lock);
1522
1523 /* Wait for spurious interrupts to trigger */
1524 for (delay = jiffies + HZ/10;
1525 time_after(delay, jiffies); )
1526 /* about 100ms delay */ synchronize_irq();
1527
1528 /* Now filter out any obviously spurious interrupts */
1529 spin_lock_irq(&irq_controller_lock);
1530 for (i=0; i<NR_IRQS; i++) {
1531 unsigned int status = irq_desc[i].status;
1532
1533 if (!(status & IRQ_AUTODETECT))
1534 continue;
1535
1536 /* It triggered already - consider it spurious. */
1537 if (status & IRQ_INPROGRESS) {
1538 irq_desc[i].status = status & ~IRQ_AUTODETECT;
1539 irq_desc[i].handler->shutdown(i);
1540 }
1541 }
1542 spin_unlock_irq(&irq_controller_lock);
1543
1544 return 0x12345678;
1545 }
1546
1547 int probe_irq_off(unsigned long unused)
1548 {
1549 int i, irq_found, nr_irqs;
1550
1551 if (unused != 0x12345678)
1552 printk("Bad IRQ probe from %lx\n", (&unused)[-1]);
1553
1554 nr_irqs = 0;
1555 irq_found = 0;
1556 spin_lock_irq(&irq_controller_lock);
1557 for (i=0; i<NR_IRQS; i++) {
1558 unsigned int status = irq_desc[i].status;
1559
1560 if (!(status & IRQ_AUTODETECT))
1561 continue;
1562
1563 if (status & IRQ_INPROGRESS) {
1564 if (!nr_irqs)
1565 irq_found = i;
1566 nr_irqs++;
1567 }
1568 irq_desc[i].status = status & ~IRQ_AUTODETECT;
1569 irq_desc[i].handler->shutdown(i);
1570 }
1571 spin_unlock_irq(&irq_controller_lock);
1572
1573 if (nr_irqs > 1)
1574 irq_found = -irq_found;
1575 return irq_found;
1576 }
1577
1578 void init_ISA_irqs (void)
1579 {
1580 int i;
1581
1582 for (i = 0; i < NR_IRQS; i++) {
1583 irq_desc[i].status = IRQ_DISABLED;
1584 irq_desc[i].action = 0;
1585 irq_desc[i].depth = 0;
1586
1587 if (i < 16) {
1588 /* 16 old-style INTA-cycle interrupts: */
1589 irq_desc[i].handler = &i8259A_irq_type;
1590 } else {
1591 /* 'high' PCI IRQs filled in on demand */
1592 irq_desc[i].handler = &no_irq_type;
1593 }
1594 }
1595 }
1596
1597 __initfunc(void init_IRQ(void))
1598 {
1599 int i;
1600
1601 #ifndef CONFIG_X86_VISWS_APIC
1602 init_ISA_irqs();
1603 #else
1604 init_VISWS_APIC_irqs();
1605 #endif
1606 /* Cover the whole vector space, no vector can escape
1607 * us. (some of these will be overridden and become
1608 * 'special' SMP interrupts) */
1609 for (i = 0; i < NR_IRQS; i++) {
1610 int vector = FIRST_EXTERNAL_VECTOR + i;
1611 if (vector != SYSCALL_VECTOR)
1612 set_intr_gate(vector, interrupt[i]);
1613 }
1614
1615 #ifdef __SMP__
1616
1617 /* IRQ0 must be given a fixed assignment and
1618 * initialized before init_IRQ_SMP. */
1619 set_intr_gate(IRQ0_TRAP_VECTOR, interrupt[0]);
1620
1621 /* The reschedule interrupt is a CPU-to-CPU
1622 * reschedule-helper IPI, driven by wakeup. */
1623 set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1624
1625 /* IPI for invalidation */
1626 set_intr_gate(INVALIDATE_TLB_VECTOR,
1627 invalidate_interrupt);
1628
1629 /* IPI for CPU halt */
1630 set_intr_gate(STOP_CPU_VECTOR, stop_cpu_interrupt);
1631
1632 /* self generated IPI for local APIC timer */
1633 set_intr_gate(LOCAL_TIMER_VECTOR,apic_timer_interrupt);
1634
1635 /* IPI for MTRR control */
1636 set_intr_gate(MTRR_CHANGE_VECTOR, mtrr_interrupt);
1637
1638 /* IPI vector for APIC spurious interrupts */
1639 set_intr_gate(SPURIOUS_APIC_VECTOR,spurious_interrupt);
1640 #endif
1641 request_region(0x20,0x20,"pic1");
1642 request_region(0xa0,0x20,"pic2");
1643
1644 /* Set the clock to 100 Hz, we already have a valid
1645 * vector now: */
1646 outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */
1647 outb_p(LATCH & 0xff , 0x40); /* LSB */
1648 outb(LATCH >> 8 , 0x40); /* MSB */
1649
1650 #ifndef CONFIG_VISWS
1651 setup_x86_irq(2, &irq2);
1652 setup_x86_irq(13, &irq13);
1653 #endif
1654 }
1655
1656 #ifdef CONFIG_X86_IO_APIC
1657 __initfunc(void init_IRQ_SMP(void))
1658 {
1659 int i;
1660 for (i = 0; i < NR_IRQS ; i++)
1661 if (IO_APIC_VECTOR(i) > 0)
1662 set_intr_gate(IO_APIC_VECTOR(i), interrupt[i]);
1663 }
1664 #endif
1665
Сайт управляется системой
uCoz