浏览代码

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha

Pull alpha updates from Matt Turner:
 "It contains a few fixes and some work from Richard to make alpha
  emulation under QEMU much more usable"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha:
  alpha: Prevent a NULL ptr dereference in csum_partial_copy.
  alpha: perf: fix out-of-bounds array access triggered from raw event
  alpha: Use qemu+cserve provided high-res clock and alarm.
  alpha: Switch to GENERIC_CLOCKEVENTS
  alpha: Enable the rpcc clocksource for single processor
  alpha: Reorganize rtc handling
  alpha: Primitive support for CPU power down.
  alpha: Allow HZ to be configured
  alpha: Notice if we're being run under QEMU
  alpha: Eliminate compiler warning from memset macro
Linus Torvalds 11 年之前
父节点
当前提交
d5bdaf4f68

+ 73 - 3
arch/alpha/Kconfig

@@ -16,8 +16,8 @@ config ALPHA
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+	select GENERIC_CLOCKEVENTS
 	select GENERIC_SMP_IDLE_THREAD
 	select GENERIC_SMP_IDLE_THREAD
-	select GENERIC_CMOS_UPDATE
 	select GENERIC_STRNCPY_FROM_USER
 	select GENERIC_STRNCPY_FROM_USER
 	select GENERIC_STRNLEN_USER
 	select GENERIC_STRNLEN_USER
 	select HAVE_MOD_ARCH_SPECIFIC
 	select HAVE_MOD_ARCH_SPECIFIC
@@ -488,6 +488,20 @@ config VGA_HOSE
 	  which always have multiple hoses, and whose consoles support it.
 	  which always have multiple hoses, and whose consoles support it.
 
 
 
 
+config ALPHA_QEMU
+	bool "Run under QEMU emulation"
+	depends on !ALPHA_GENERIC
+	---help---
+	  Assume the presence of special features supported by QEMU PALcode
+	  that reduce the overhead of system emulation.
+
+	  Generic kernels will auto-detect QEMU.  But when building a
+	  system-specific kernel, the assumption is that we want to
+	  elimiate as many runtime tests as possible.
+
+	  If unsure, say N.
+
+
 config ALPHA_SRM
 config ALPHA_SRM
 	bool "Use SRM as bootloader" if ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_NAUTILUS || ALPHA_NONAME
 	bool "Use SRM as bootloader" if ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_NAUTILUS || ALPHA_NONAME
 	depends on TTY
 	depends on TTY
@@ -572,6 +586,30 @@ config NUMA
 	  Access).  This option is for configuring high-end multiprocessor
 	  Access).  This option is for configuring high-end multiprocessor
 	  server machines.  If in doubt, say N.
 	  server machines.  If in doubt, say N.
 
 
+config ALPHA_WTINT
+	bool "Use WTINT" if ALPHA_SRM || ALPHA_GENERIC
+	default y if ALPHA_QEMU
+	default n if ALPHA_EV5 || ALPHA_EV56 || (ALPHA_EV4 && !ALPHA_LCA)
+	default n if !ALPHA_SRM && !ALPHA_GENERIC
+	default y if SMP
+	---help---
+	  The Wait for Interrupt (WTINT) PALcall attempts to place the CPU
+	  to sleep until the next interrupt.  This may reduce the power
+	  consumed, and the heat produced by the computer.  However, it has
+	  the side effect of making the cycle counter unreliable as a timing
+	  device across the sleep.
+
+	  For emulation under QEMU, definitely say Y here, as we have other
+	  mechanisms for measuring time than the cycle counter.
+
+	  For EV4 (but not LCA), EV5 and EV56 systems, or for systems running
+	  MILO, sleep mode is not supported so you might as well say N here.
+
+	  For SMP systems we cannot use the cycle counter for timing anyway,
+	  so you might as well say Y here.
+
+	  If unsure, say N.
+
 config NODES_SHIFT
 config NODES_SHIFT
 	int
 	int
 	default "7"
 	default "7"
@@ -613,9 +651,41 @@ config VERBOSE_MCHECK_ON
 
 
 	  Take the default (1) unless you want more control or more info.
 	  Take the default (1) unless you want more control or more info.
 
 
+choice
+	prompt "Timer interrupt frequency (HZ)?"
+	default HZ_128 if ALPHA_QEMU
+	default HZ_1200 if ALPHA_RAWHIDE
+	default HZ_1024
+	---help---
+	  The frequency at which timer interrupts occur.  A high frequency
+	  minimizes latency, whereas a low frequency minimizes overhead of
+	  process accounting.  The later effect is especially significant
+	  when being run under QEMU.
+
+	  Note that some Alpha hardware cannot change the interrupt frequency
+	  of the timer.  If unsure, say 1024 (or 1200 for Rawhide).
+
+	config HZ_32
+		bool "32 Hz"
+	config HZ_64
+		bool "64 Hz"
+	config HZ_128
+		bool "128 Hz"
+	config HZ_256
+		bool "256 Hz"
+	config HZ_1024
+		bool "1024 Hz"
+	config HZ_1200
+		bool "1200 Hz"
+endchoice
+
 config HZ
 config HZ
-	int
-	default 1200 if ALPHA_RAWHIDE
+	int 
+	default 32 if HZ_32
+	default 64 if HZ_64
+	default 128 if HZ_128
+	default 256 if HZ_256
+	default 1200 if HZ_1200
 	default 1024
 	default 1024
 
 
 source "drivers/pci/Kconfig"
 source "drivers/pci/Kconfig"

+ 13 - 9
arch/alpha/include/asm/machvec.h

@@ -33,6 +33,7 @@ struct alpha_machine_vector
 
 
 	int nr_irqs;
 	int nr_irqs;
 	int rtc_port;
 	int rtc_port;
+	int rtc_boot_cpu_only;
 	unsigned int max_asn;
 	unsigned int max_asn;
 	unsigned long max_isa_dma_address;
 	unsigned long max_isa_dma_address;
 	unsigned long irq_probe_mask;
 	unsigned long irq_probe_mask;
@@ -95,9 +96,6 @@ struct alpha_machine_vector
 
 
 	struct _alpha_agp_info *(*agp_info)(void);
 	struct _alpha_agp_info *(*agp_info)(void);
 
 
-	unsigned int (*rtc_get_time)(struct rtc_time *);
-	int (*rtc_set_time)(struct rtc_time *);
-
 	const char *vector_name;
 	const char *vector_name;
 
 
 	/* NUMA information */
 	/* NUMA information */
@@ -126,13 +124,19 @@ extern struct alpha_machine_vector alpha_mv;
 
 
 #ifdef CONFIG_ALPHA_GENERIC
 #ifdef CONFIG_ALPHA_GENERIC
 extern int alpha_using_srm;
 extern int alpha_using_srm;
+extern int alpha_using_qemu;
 #else
 #else
-#ifdef CONFIG_ALPHA_SRM
-#define alpha_using_srm 1
-#else
-#define alpha_using_srm 0
-#endif
+# ifdef CONFIG_ALPHA_SRM
+#  define alpha_using_srm 1
+# else
+#  define alpha_using_srm 0
+# endif
+# ifdef CONFIG_ALPHA_QEMU
+#  define alpha_using_qemu 1
+# else
+#  define alpha_using_qemu 0
+# endif
 #endif /* GENERIC */
 #endif /* GENERIC */
 
 
-#endif
+#endif /* __KERNEL__ */
 #endif /* __ALPHA_MACHVEC_H */
 #endif /* __ALPHA_MACHVEC_H */

+ 71 - 0
arch/alpha/include/asm/pal.h

@@ -89,6 +89,7 @@ __CALL_PAL_W1(wrmces, unsigned long);
 __CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
 __CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
 __CALL_PAL_W1(wrusp, unsigned long);
 __CALL_PAL_W1(wrusp, unsigned long);
 __CALL_PAL_W1(wrvptptr, unsigned long);
 __CALL_PAL_W1(wrvptptr, unsigned long);
+__CALL_PAL_RW1(wtint, unsigned long, unsigned long);
 
 
 /*
 /*
  * TB routines..
  * TB routines..
@@ -111,5 +112,75 @@ __CALL_PAL_W1(wrvptptr, unsigned long);
 #define tbiap()		__tbi(-1, /* no second argument */)
 #define tbiap()		__tbi(-1, /* no second argument */)
 #define tbia()		__tbi(-2, /* no second argument */)
 #define tbia()		__tbi(-2, /* no second argument */)
 
 
+/*
+ * QEMU Cserv routines..
+ */
+
+static inline unsigned long
+qemu_get_walltime(void)
+{
+	register unsigned long v0 __asm__("$0");
+	register unsigned long a0 __asm__("$16") = 3;
+
+	asm("call_pal %2 # cserve get_time"
+	    : "=r"(v0), "+r"(a0)
+	    : "i"(PAL_cserve)
+	    : "$17", "$18", "$19", "$20", "$21");
+
+	return v0;
+}
+
+static inline unsigned long
+qemu_get_alarm(void)
+{
+	register unsigned long v0 __asm__("$0");
+	register unsigned long a0 __asm__("$16") = 4;
+
+	asm("call_pal %2 # cserve get_alarm"
+	    : "=r"(v0), "+r"(a0)
+	    : "i"(PAL_cserve)
+	    : "$17", "$18", "$19", "$20", "$21");
+
+	return v0;
+}
+
+static inline void
+qemu_set_alarm_rel(unsigned long expire)
+{
+	register unsigned long a0 __asm__("$16") = 5;
+	register unsigned long a1 __asm__("$17") = expire;
+
+	asm volatile("call_pal %2 # cserve set_alarm_rel"
+		     : "+r"(a0), "+r"(a1)
+		     : "i"(PAL_cserve)
+		     : "$0", "$18", "$19", "$20", "$21");
+}
+
+static inline void
+qemu_set_alarm_abs(unsigned long expire)
+{
+	register unsigned long a0 __asm__("$16") = 6;
+	register unsigned long a1 __asm__("$17") = expire;
+
+	asm volatile("call_pal %2 # cserve set_alarm_abs"
+		     : "+r"(a0), "+r"(a1)
+		     : "i"(PAL_cserve)
+		     : "$0", "$18", "$19", "$20", "$21");
+}
+
+static inline unsigned long
+qemu_get_vmtime(void)
+{
+	register unsigned long v0 __asm__("$0");
+	register unsigned long a0 __asm__("$16") = 7;
+
+	asm("call_pal %2 # cserve get_time"
+	    : "=r"(v0), "+r"(a0)
+	    : "i"(PAL_cserve)
+	    : "$17", "$18", "$19", "$20", "$21");
+
+	return v0;
+}
+
 #endif /* !__ASSEMBLY__ */
 #endif /* !__ASSEMBLY__ */
 #endif /* __ALPHA_PAL_H */
 #endif /* __ALPHA_PAL_H */

+ 0 - 11
arch/alpha/include/asm/rtc.h

@@ -1,12 +1 @@
-#ifndef _ALPHA_RTC_H
-#define _ALPHA_RTC_H
-
-#if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) \
- || defined(CONFIG_ALPHA_GENERIC)
-# define get_rtc_time		alpha_mv.rtc_get_time
-# define set_rtc_time		alpha_mv.rtc_set_time
-#endif
-
 #include <asm-generic/rtc.h>
 #include <asm-generic/rtc.h>
-
-#endif

+ 18 - 6
arch/alpha/include/asm/string.h

@@ -22,15 +22,27 @@ extern void * __memcpy(void *, const void *, size_t);
 
 
 #define __HAVE_ARCH_MEMSET
 #define __HAVE_ARCH_MEMSET
 extern void * __constant_c_memset(void *, unsigned long, size_t);
 extern void * __constant_c_memset(void *, unsigned long, size_t);
+extern void * ___memset(void *, int, size_t);
 extern void * __memset(void *, int, size_t);
 extern void * __memset(void *, int, size_t);
 extern void * memset(void *, int, size_t);
 extern void * memset(void *, int, size_t);
 
 
-#define memset(s, c, n)							    \
-(__builtin_constant_p(c)						    \
- ? (__builtin_constant_p(n) && (c) == 0					    \
-    ? __builtin_memset((s),0,(n)) 					    \
-    : __constant_c_memset((s),0x0101010101010101UL*(unsigned char)(c),(n))) \
- : __memset((s),(c),(n)))
+/* For gcc 3.x, we cannot have the inline function named "memset" because
+   the __builtin_memset will attempt to resolve to the inline as well,
+   leading to a "sorry" about unimplemented recursive inlining.  */
+extern inline void *__memset(void *s, int c, size_t n)
+{
+	if (__builtin_constant_p(c)) {
+		if (__builtin_constant_p(n)) {
+			return __builtin_memset(s, c, n);
+		} else {
+			unsigned long c8 = (c & 0xff) * 0x0101010101010101UL;
+			return __constant_c_memset(s, c8, n);
+		}
+	}
+	return ___memset(s, c, n);
+}
+
+#define memset __memset
 
 
 #define __HAVE_ARCH_STRCPY
 #define __HAVE_ARCH_STRCPY
 extern char * strcpy(char *,const char *);
 extern char * strcpy(char *,const char *);

+ 1 - 0
arch/alpha/include/uapi/asm/pal.h

@@ -46,6 +46,7 @@
 #define PAL_rdusp	58
 #define PAL_rdusp	58
 #define PAL_whami	60
 #define PAL_whami	60
 #define PAL_retsys	61
 #define PAL_retsys	61
+#define PAL_wtint	62
 #define PAL_rti		63
 #define PAL_rti		63
 
 
 
 

+ 1 - 0
arch/alpha/kernel/Makefile

@@ -16,6 +16,7 @@ obj-$(CONFIG_PCI)	+= pci.o pci_iommu.o pci-sysfs.o
 obj-$(CONFIG_SRM_ENV)	+= srm_env.o
 obj-$(CONFIG_SRM_ENV)	+= srm_env.o
 obj-$(CONFIG_MODULES)	+= module.o
 obj-$(CONFIG_MODULES)	+= module.o
 obj-$(CONFIG_PERF_EVENTS) += perf_event.o
 obj-$(CONFIG_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_RTC_DRV_ALPHA) += rtc.o
 
 
 ifdef CONFIG_ALPHA_GENERIC
 ifdef CONFIG_ALPHA_GENERIC
 
 

+ 1 - 0
arch/alpha/kernel/alpha_ksyms.c

@@ -40,6 +40,7 @@ EXPORT_SYMBOL(strrchr);
 EXPORT_SYMBOL(memmove);
 EXPORT_SYMBOL(memmove);
 EXPORT_SYMBOL(__memcpy);
 EXPORT_SYMBOL(__memcpy);
 EXPORT_SYMBOL(__memset);
 EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(___memset);
 EXPORT_SYMBOL(__memsetw);
 EXPORT_SYMBOL(__memsetw);
 EXPORT_SYMBOL(__constant_c_memset);
 EXPORT_SYMBOL(__constant_c_memset);
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(copy_page);

+ 1 - 15
arch/alpha/kernel/irq_alpha.c

@@ -66,21 +66,7 @@ do_entInt(unsigned long type, unsigned long vector,
 		break;
 		break;
 	case 1:
 	case 1:
 		old_regs = set_irq_regs(regs);
 		old_regs = set_irq_regs(regs);
-#ifdef CONFIG_SMP
-	  {
-		long cpu;
-
-		smp_percpu_timer_interrupt(regs);
-		cpu = smp_processor_id();
-		if (cpu != boot_cpuid) {
-		        kstat_incr_irqs_this_cpu(RTC_IRQ, irq_to_desc(RTC_IRQ));
-		} else {
-			handle_irq(RTC_IRQ);
-		}
-	  }
-#else
 		handle_irq(RTC_IRQ);
 		handle_irq(RTC_IRQ);
-#endif
 		set_irq_regs(old_regs);
 		set_irq_regs(old_regs);
 		return;
 		return;
 	case 2:
 	case 2:
@@ -228,7 +214,7 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr,
  */
  */
 
 
 struct irqaction timer_irqaction = {
 struct irqaction timer_irqaction = {
-	.handler	= timer_interrupt,
+	.handler	= rtc_timer_interrupt,
 	.name		= "timer",
 	.name		= "timer",
 };
 };
 
 

+ 1 - 4
arch/alpha/kernel/machvec_impl.h

@@ -43,10 +43,7 @@
 #define CAT1(x,y)  x##y
 #define CAT1(x,y)  x##y
 #define CAT(x,y)   CAT1(x,y)
 #define CAT(x,y)   CAT1(x,y)
 
 
-#define DO_DEFAULT_RTC \
-	.rtc_port = 0x70, \
-	.rtc_get_time = common_get_rtc_time, \
-	.rtc_set_time = common_set_rtc_time
+#define DO_DEFAULT_RTC			.rtc_port = 0x70
 
 
 #define DO_EV4_MMU							\
 #define DO_EV4_MMU							\
 	.max_asn =			EV4_MAX_ASN,			\
 	.max_asn =			EV4_MAX_ASN,			\

+ 13 - 2
arch/alpha/kernel/perf_event.c

@@ -83,6 +83,8 @@ struct alpha_pmu_t {
 	long pmc_left[3];
 	long pmc_left[3];
 	 /* Subroutine for allocation of PMCs.  Enforces constraints. */
 	 /* Subroutine for allocation of PMCs.  Enforces constraints. */
 	int (*check_constraints)(struct perf_event **, unsigned long *, int);
 	int (*check_constraints)(struct perf_event **, unsigned long *, int);
+	/* Subroutine for checking validity of a raw event for this PMU. */
+	int (*raw_event_valid)(u64 config);
 };
 };
 
 
 /*
 /*
@@ -203,6 +205,12 @@ success:
 }
 }
 
 
 
 
+static int ev67_raw_event_valid(u64 config)
+{
+	return config >= EV67_CYCLES && config < EV67_LAST_ET;
+};
+
+
 static const struct alpha_pmu_t ev67_pmu = {
 static const struct alpha_pmu_t ev67_pmu = {
 	.event_map = ev67_perfmon_event_map,
 	.event_map = ev67_perfmon_event_map,
 	.max_events = ARRAY_SIZE(ev67_perfmon_event_map),
 	.max_events = ARRAY_SIZE(ev67_perfmon_event_map),
@@ -211,7 +219,8 @@ static const struct alpha_pmu_t ev67_pmu = {
 	.pmc_count_mask = {EV67_PCTR_0_COUNT_MASK,  EV67_PCTR_1_COUNT_MASK,  0},
 	.pmc_count_mask = {EV67_PCTR_0_COUNT_MASK,  EV67_PCTR_1_COUNT_MASK,  0},
 	.pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
 	.pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
 	.pmc_left = {16, 4, 0},
 	.pmc_left = {16, 4, 0},
-	.check_constraints = ev67_check_constraints
+	.check_constraints = ev67_check_constraints,
+	.raw_event_valid = ev67_raw_event_valid,
 };
 };
 
 
 
 
@@ -609,7 +618,9 @@ static int __hw_perf_event_init(struct perf_event *event)
 	} else if (attr->type == PERF_TYPE_HW_CACHE) {
 	} else if (attr->type == PERF_TYPE_HW_CACHE) {
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 	} else if (attr->type == PERF_TYPE_RAW) {
 	} else if (attr->type == PERF_TYPE_RAW) {
-		ev = attr->config & 0xff;
+		if (!alpha_pmu->raw_event_valid(attr->config))
+			return -EINVAL;
+		ev = attr->config;
 	} else {
 	} else {
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 	}
 	}

+ 17 - 0
arch/alpha/kernel/process.c

@@ -46,6 +46,23 @@
 void (*pm_power_off)(void) = machine_power_off;
 void (*pm_power_off)(void) = machine_power_off;
 EXPORT_SYMBOL(pm_power_off);
 EXPORT_SYMBOL(pm_power_off);
 
 
+#ifdef CONFIG_ALPHA_WTINT
+/*
+ * Sleep the CPU.
+ * EV6, LCA45 and QEMU know how to power down, skipping N timer interrupts.
+ */
+void arch_cpu_idle(void)
+{
+	wtint(0);
+	local_irq_enable();
+}
+
+void arch_cpu_idle_dead(void)
+{
+	wtint(INT_MAX);
+}
+#endif /* ALPHA_WTINT */
+
 struct halt_info {
 struct halt_info {
 	int mode;
 	int mode;
 	char *restart_cmd;
 	char *restart_cmd;

+ 2 - 4
arch/alpha/kernel/proto.h

@@ -135,17 +135,15 @@ extern void unregister_srm_console(void);
 /* smp.c */
 /* smp.c */
 extern void setup_smp(void);
 extern void setup_smp(void);
 extern void handle_ipi(struct pt_regs *);
 extern void handle_ipi(struct pt_regs *);
-extern void smp_percpu_timer_interrupt(struct pt_regs *);
 
 
 /* bios32.c */
 /* bios32.c */
 /* extern void reset_for_srm(void); */
 /* extern void reset_for_srm(void); */
 
 
 /* time.c */
 /* time.c */
-extern irqreturn_t timer_interrupt(int irq, void *dev);
+extern irqreturn_t rtc_timer_interrupt(int irq, void *dev);
+extern void init_clockevent(void);
 extern void common_init_rtc(void);
 extern void common_init_rtc(void);
 extern unsigned long est_cycle_freq;
 extern unsigned long est_cycle_freq;
-extern unsigned int common_get_rtc_time(struct rtc_time *time);
-extern int common_set_rtc_time(struct rtc_time *time);
 
 
 /* smc37c93x.c */
 /* smc37c93x.c */
 extern void SMC93x_Init(void);
 extern void SMC93x_Init(void);

+ 323 - 0
arch/alpha/kernel/rtc.c

@@ -0,0 +1,323 @@
+/*
+ *  linux/arch/alpha/kernel/rtc.c
+ *
+ *  Copyright (C) 1991, 1992, 1995, 1999, 2000  Linus Torvalds
+ *
+ * This file contains date handling.
+ */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mc146818rtc.h>
+#include <linux/bcd.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+
+#include <asm/rtc.h>
+
+#include "proto.h"
+
+
+/*
+ * Support for the RTC device.
+ *
+ * We don't want to use the rtc-cmos driver, because we don't want to support
+ * alarms, as that would be indistinguishable from timer interrupts.
+ *
+ * Further, generic code is really, really tied to a 1900 epoch.  This is
+ * true in __get_rtc_time as well as the users of struct rtc_time e.g.
+ * rtc_tm_to_time.  Thankfully all of the other epochs in use are later
+ * than 1900, and so it's easy to adjust.
+ */
+
+static unsigned long rtc_epoch;
+
+static int __init
+specifiy_epoch(char *str)
+{
+	unsigned long epoch = simple_strtoul(str, NULL, 0);
+	if (epoch < 1900)
+		printk("Ignoring invalid user specified epoch %lu\n", epoch);
+	else
+		rtc_epoch = epoch;
+	return 1;
+}
+__setup("epoch=", specifiy_epoch);
+
+static void __init
+init_rtc_epoch(void)
+{
+	int epoch, year, ctrl;
+
+	if (rtc_epoch != 0) {
+		/* The epoch was specified on the command-line.  */
+		return;
+	}
+
+	/* Detect the epoch in use on this computer.  */
+	ctrl = CMOS_READ(RTC_CONTROL);
+	year = CMOS_READ(RTC_YEAR);
+	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+		year = bcd2bin(year);
+
+	/* PC-like is standard; used for year >= 70 */
+	epoch = 1900;
+	if (year < 20) {
+		epoch = 2000;
+	} else if (year >= 20 && year < 48) {
+		/* NT epoch */
+		epoch = 1980;
+	} else if (year >= 48 && year < 70) {
+		/* Digital UNIX epoch */
+		epoch = 1952;
+	}
+	rtc_epoch = epoch;
+
+	printk(KERN_INFO "Using epoch %d for rtc year %d\n", epoch, year);
+}
+
+static int
+alpha_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	__get_rtc_time(tm);
+
+	/* Adjust for non-default epochs.  It's easier to depend on the
+	   generic __get_rtc_time and adjust the epoch here than create
+	   a copy of __get_rtc_time with the edits we need.  */
+	if (rtc_epoch != 1900) {
+		int year = tm->tm_year;
+		/* Undo the century adjustment made in __get_rtc_time.  */
+		if (year >= 100)
+			year -= 100;
+		year += rtc_epoch - 1900;
+		/* Redo the century adjustment with the epoch in place.  */
+		if (year <= 69)
+			year += 100;
+		tm->tm_year = year;
+	}
+
+	return rtc_valid_tm(tm);
+}
+
+static int
+alpha_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct rtc_time xtm;
+
+	if (rtc_epoch != 1900) {
+		xtm = *tm;
+		xtm.tm_year -= rtc_epoch - 1900;
+		tm = &xtm;
+	}
+
+	return __set_rtc_time(tm);
+}
+
+static int
+alpha_rtc_set_mmss(struct device *dev, unsigned long nowtime)
+{
+	int retval = 0;
+	int real_seconds, real_minutes, cmos_minutes;
+	unsigned char save_control, save_freq_select;
+
+	/* Note: This code only updates minutes and seconds.  Comments
+	   indicate this was to avoid messing with unknown time zones,
+	   and with the epoch nonsense described above.  In order for
+	   this to work, the existing clock cannot be off by more than
+	   15 minutes.
+
+	   ??? This choice is may be out of date.  The x86 port does
+	   not have problems with timezones, and the epoch processing has
+	   now been fixed in alpha_set_rtc_time.
+
+	   In either case, one can always force a full rtc update with
+	   the userland hwclock program, so surely 15 minute accuracy
+	   is no real burden.  */
+
+	/* In order to set the CMOS clock precisely, we have to be called
+	   500 ms after the second nowtime has started, because when
+	   nowtime is written into the registers of the CMOS clock, it will
+	   jump to the next second precisely 500 ms later. Check the Motorola
+	   MC146818A or Dallas DS12887 data sheet for details.  */
+
+	/* irq are locally disabled here */
+	spin_lock(&rtc_lock);
+	/* Tell the clock it's being set */
+	save_control = CMOS_READ(RTC_CONTROL);
+	CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+
+	/* Stop and reset prescaler */
+	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+	CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+	cmos_minutes = CMOS_READ(RTC_MINUTES);
+	if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+		cmos_minutes = bcd2bin(cmos_minutes);
+
+	real_seconds = nowtime % 60;
+	real_minutes = nowtime / 60;
+	if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1) {
+		/* correct for half hour time zone */
+		real_minutes += 30;
+	}
+	real_minutes %= 60;
+
+	if (abs(real_minutes - cmos_minutes) < 30) {
+		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+			real_seconds = bin2bcd(real_seconds);
+			real_minutes = bin2bcd(real_minutes);
+		}
+		CMOS_WRITE(real_seconds,RTC_SECONDS);
+		CMOS_WRITE(real_minutes,RTC_MINUTES);
+	} else {
+		printk_once(KERN_NOTICE
+			    "set_rtc_mmss: can't update from %d to %d\n",
+			    cmos_minutes, real_minutes);
+		retval = -1;
+	}
+
+	/* The following flags have to be released exactly in this order,
+	 * otherwise the DS12887 (popular MC146818A clone with integrated
+	 * battery and quartz) will not reset the oscillator and will not
+	 * update precisely 500 ms later. You won't find this mentioned in
+	 * the Dallas Semiconductor data sheets, but who believes data
+	 * sheets anyway ...                           -- Markus Kuhn
+	 */
+	CMOS_WRITE(save_control, RTC_CONTROL);
+	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+	spin_unlock(&rtc_lock);
+
+	return retval;
+}
+
+static int
+alpha_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case RTC_EPOCH_READ:
+		return put_user(rtc_epoch, (unsigned long __user *)arg);
+	case RTC_EPOCH_SET:
+		if (arg < 1900)
+			return -EINVAL;
+		rtc_epoch = arg;
+		return 0;
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+static const struct rtc_class_ops alpha_rtc_ops = {
+	.read_time = alpha_rtc_read_time,
+	.set_time = alpha_rtc_set_time,
+	.set_mmss = alpha_rtc_set_mmss,
+	.ioctl = alpha_rtc_ioctl,
+};
+
+/*
+ * Similarly, except do the actual CMOS access on the boot cpu only.
+ * This requires marshalling the data across an interprocessor call.
+ */
+
+#if defined(CONFIG_SMP) && \
+    (defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_MARVEL))
+# define HAVE_REMOTE_RTC 1
+
+union remote_data {
+	struct rtc_time *tm;
+	unsigned long now;
+	long retval;
+};
+
+static void
+do_remote_read(void *data)
+{
+	union remote_data *x = data;
+	x->retval = alpha_rtc_read_time(NULL, x->tm);
+}
+
+static int
+remote_read_time(struct device *dev, struct rtc_time *tm)
+{
+	union remote_data x;
+	if (smp_processor_id() != boot_cpuid) {
+		x.tm = tm;
+		smp_call_function_single(boot_cpuid, do_remote_read, &x, 1);
+		return x.retval;
+	}
+	return alpha_rtc_read_time(NULL, tm);
+}
+
+static void
+do_remote_set(void *data)
+{
+	union remote_data *x = data;
+	x->retval = alpha_rtc_set_time(NULL, x->tm);
+}
+
+static int
+remote_set_time(struct device *dev, struct rtc_time *tm)
+{
+	union remote_data x;
+	if (smp_processor_id() != boot_cpuid) {
+		x.tm = tm;
+		smp_call_function_single(boot_cpuid, do_remote_set, &x, 1);
+		return x.retval;
+	}
+	return alpha_rtc_set_time(NULL, tm);
+}
+
+static void
+do_remote_mmss(void *data)
+{
+	union remote_data *x = data;
+	x->retval = alpha_rtc_set_mmss(NULL, x->now);
+}
+
+static int
+remote_set_mmss(struct device *dev, unsigned long now)
+{
+	union remote_data x;
+	if (smp_processor_id() != boot_cpuid) {
+		x.now = now;
+		smp_call_function_single(boot_cpuid, do_remote_mmss, &x, 1);
+		return x.retval;
+	}
+	return alpha_rtc_set_mmss(NULL, now);
+}
+
+static const struct rtc_class_ops remote_rtc_ops = {
+	.read_time = remote_read_time,
+	.set_time = remote_set_time,
+	.set_mmss = remote_set_mmss,
+	.ioctl = alpha_rtc_ioctl,
+};
+#endif
+
+static int __init
+alpha_rtc_init(void)
+{
+	const struct rtc_class_ops *ops;
+	struct platform_device *pdev;
+	struct rtc_device *rtc;
+	const char *name;
+
+	init_rtc_epoch();
+	name = "rtc-alpha";
+	ops = &alpha_rtc_ops;
+
+#ifdef HAVE_REMOTE_RTC
+	if (alpha_mv.rtc_boot_cpu_only)
+		ops = &remote_rtc_ops;
+#endif
+
+	pdev = platform_device_register_simple(name, -1, NULL, 0);
+	rtc = devm_rtc_device_register(&pdev->dev, name, ops, THIS_MODULE);
+	if (IS_ERR(rtc))
+		return PTR_ERR(rtc);
+
+	platform_set_drvdata(pdev, rtc);
+	return 0;
+}
+device_initcall(alpha_rtc_init);

+ 20 - 3
arch/alpha/kernel/setup.c

@@ -115,10 +115,17 @@ unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
 
 
 #ifdef CONFIG_ALPHA_GENERIC
 #ifdef CONFIG_ALPHA_GENERIC
 struct alpha_machine_vector alpha_mv;
 struct alpha_machine_vector alpha_mv;
+#endif
+
+#ifndef alpha_using_srm
 int alpha_using_srm;
 int alpha_using_srm;
 EXPORT_SYMBOL(alpha_using_srm);
 EXPORT_SYMBOL(alpha_using_srm);
 #endif
 #endif
 
 
+#ifndef alpha_using_qemu
+int alpha_using_qemu;
+#endif
+
 static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
 static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
 					       unsigned long);
 					       unsigned long);
 static struct alpha_machine_vector *get_sysvec_byname(const char *);
 static struct alpha_machine_vector *get_sysvec_byname(const char *);
@@ -529,11 +536,15 @@ setup_arch(char **cmdline_p)
 	atomic_notifier_chain_register(&panic_notifier_list,
 	atomic_notifier_chain_register(&panic_notifier_list,
 			&alpha_panic_block);
 			&alpha_panic_block);
 
 
-#ifdef CONFIG_ALPHA_GENERIC
+#ifndef alpha_using_srm
 	/* Assume that we've booted from SRM if we haven't booted from MILO.
 	/* Assume that we've booted from SRM if we haven't booted from MILO.
 	   Detect the later by looking for "MILO" in the system serial nr.  */
 	   Detect the later by looking for "MILO" in the system serial nr.  */
 	alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
 	alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
 #endif
 #endif
+#ifndef alpha_using_qemu
+	/* Similarly, look for QEMU.  */
+	alpha_using_qemu = strstr((const char *)hwrpb->ssn, "QEMU") != 0;
+#endif
 
 
 	/* If we are using SRM, we want to allow callbacks
 	/* If we are using SRM, we want to allow callbacks
 	   as early as possible, so do this NOW, and then
 	   as early as possible, so do this NOW, and then
@@ -1207,6 +1218,7 @@ show_cpuinfo(struct seq_file *f, void *slot)
 	char *systype_name;
 	char *systype_name;
 	char *sysvariation_name;
 	char *sysvariation_name;
 	int nr_processors;
 	int nr_processors;
+	unsigned long timer_freq;
 
 
 	cpu_index = (unsigned) (cpu->type - 1);
 	cpu_index = (unsigned) (cpu->type - 1);
 	cpu_name = "Unknown";
 	cpu_name = "Unknown";
@@ -1218,6 +1230,12 @@ show_cpuinfo(struct seq_file *f, void *slot)
 
 
 	nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
 	nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
 
 
+#if CONFIG_HZ == 1024 || CONFIG_HZ == 1200
+	timer_freq = (100UL * hwrpb->intr_freq) / 4096;
+#else
+	timer_freq = 100UL * CONFIG_HZ;
+#endif
+
 	seq_printf(f, "cpu\t\t\t: Alpha\n"
 	seq_printf(f, "cpu\t\t\t: Alpha\n"
 		      "cpu model\t\t: %s\n"
 		      "cpu model\t\t: %s\n"
 		      "cpu variation\t\t: %ld\n"
 		      "cpu variation\t\t: %ld\n"
@@ -1243,8 +1261,7 @@ show_cpuinfo(struct seq_file *f, void *slot)
 		       (char*)hwrpb->ssn,
 		       (char*)hwrpb->ssn,
 		       est_cycle_freq ? : hwrpb->cycle_freq,
 		       est_cycle_freq ? : hwrpb->cycle_freq,
 		       est_cycle_freq ? "est." : "",
 		       est_cycle_freq ? "est." : "",
-		       hwrpb->intr_freq / 4096,
-		       (100 * hwrpb->intr_freq / 4096) % 100,
+		       timer_freq / 100, timer_freq % 100,
 		       hwrpb->pagesize,
 		       hwrpb->pagesize,
 		       hwrpb->pa_bits,
 		       hwrpb->pa_bits,
 		       hwrpb->max_asn,
 		       hwrpb->max_asn,

+ 3 - 30
arch/alpha/kernel/smp.c

@@ -138,9 +138,11 @@ smp_callin(void)
 
 
 	/* Get our local ticker going. */
 	/* Get our local ticker going. */
 	smp_setup_percpu_timer(cpuid);
 	smp_setup_percpu_timer(cpuid);
+	init_clockevent();
 
 
 	/* Call platform-specific callin, if specified */
 	/* Call platform-specific callin, if specified */
-	if (alpha_mv.smp_callin) alpha_mv.smp_callin();
+	if (alpha_mv.smp_callin)
+		alpha_mv.smp_callin();
 
 
 	/* All kernel threads share the same mm context.  */
 	/* All kernel threads share the same mm context.  */
 	atomic_inc(&init_mm.mm_count);
 	atomic_inc(&init_mm.mm_count);
@@ -498,35 +500,6 @@ smp_cpus_done(unsigned int max_cpus)
 	       ((bogosum + 2500) / (5000/HZ)) % 100);
 	       ((bogosum + 2500) / (5000/HZ)) % 100);
 }
 }
 
 
-
-void
-smp_percpu_timer_interrupt(struct pt_regs *regs)
-{
-	struct pt_regs *old_regs;
-	int cpu = smp_processor_id();
-	unsigned long user = user_mode(regs);
-	struct cpuinfo_alpha *data = &cpu_data[cpu];
-
-	old_regs = set_irq_regs(regs);
-
-	/* Record kernel PC.  */
-	profile_tick(CPU_PROFILING);
-
-	if (!--data->prof_counter) {
-		/* We need to make like a normal interrupt -- otherwise
-		   timer interrupts ignore the global interrupt lock,
-		   which would be a Bad Thing.  */
-		irq_enter();
-
-		update_process_times(user);
-
-		data->prof_counter = data->prof_multiplier;
-
-		irq_exit();
-	}
-	set_irq_regs(old_regs);
-}
-
 int
 int
 setup_profiling_timer(unsigned int multiplier)
 setup_profiling_timer(unsigned int multiplier)
 {
 {

+ 0 - 2
arch/alpha/kernel/sys_jensen.c

@@ -224,8 +224,6 @@ struct alpha_machine_vector jensen_mv __initmv = {
 	.machine_check		= jensen_machine_check,
 	.machine_check		= jensen_machine_check,
 	.max_isa_dma_address	= ALPHA_MAX_ISA_DMA_ADDRESS,
 	.max_isa_dma_address	= ALPHA_MAX_ISA_DMA_ADDRESS,
 	.rtc_port		= 0x170,
 	.rtc_port		= 0x170,
-	.rtc_get_time		= common_get_rtc_time,
-	.rtc_set_time		= common_set_rtc_time,
 
 
 	.nr_irqs		= 16,
 	.nr_irqs		= 16,
 	.device_interrupt	= jensen_device_interrupt,
 	.device_interrupt	= jensen_device_interrupt,

+ 1 - 54
arch/alpha/kernel/sys_marvel.c

@@ -22,7 +22,6 @@
 #include <asm/hwrpb.h>
 #include <asm/hwrpb.h>
 #include <asm/tlbflush.h>
 #include <asm/tlbflush.h>
 #include <asm/vga.h>
 #include <asm/vga.h>
-#include <asm/rtc.h>
 
 
 #include "proto.h"
 #include "proto.h"
 #include "err_impl.h"
 #include "err_impl.h"
@@ -400,57 +399,6 @@ marvel_init_rtc(void)
 	init_rtc_irq();
 	init_rtc_irq();
 }
 }
 
 
-struct marvel_rtc_time {
-	struct rtc_time *time;
-	int retval;
-};
-
-#ifdef CONFIG_SMP
-static void
-smp_get_rtc_time(void *data)
-{
-	struct marvel_rtc_time *mrt = data;
-	mrt->retval = __get_rtc_time(mrt->time);
-}
-
-static void
-smp_set_rtc_time(void *data)
-{
-	struct marvel_rtc_time *mrt = data;
-	mrt->retval = __set_rtc_time(mrt->time);
-}
-#endif
-
-static unsigned int
-marvel_get_rtc_time(struct rtc_time *time)
-{
-#ifdef CONFIG_SMP
-	struct marvel_rtc_time mrt;
-
-	if (smp_processor_id() != boot_cpuid) {
-		mrt.time = time;
-		smp_call_function_single(boot_cpuid, smp_get_rtc_time, &mrt, 1);
-		return mrt.retval;
-	}
-#endif
-	return __get_rtc_time(time);
-}
-
-static int
-marvel_set_rtc_time(struct rtc_time *time)
-{
-#ifdef CONFIG_SMP
-	struct marvel_rtc_time mrt;
-
-	if (smp_processor_id() != boot_cpuid) {
-		mrt.time = time;
-		smp_call_function_single(boot_cpuid, smp_set_rtc_time, &mrt, 1);
-		return mrt.retval;
-	}
-#endif
-	return __set_rtc_time(time);
-}
-
 static void
 static void
 marvel_smp_callin(void)
 marvel_smp_callin(void)
 {
 {
@@ -492,8 +440,7 @@ struct alpha_machine_vector marvel_ev7_mv __initmv = {
 	.vector_name		= "MARVEL/EV7",
 	.vector_name		= "MARVEL/EV7",
 	DO_EV7_MMU,
 	DO_EV7_MMU,
 	.rtc_port		= 0x70,
 	.rtc_port		= 0x70,
-	.rtc_get_time		= marvel_get_rtc_time,
-	.rtc_set_time		= marvel_set_rtc_time,
+	.rtc_boot_cpu_only	= 1,
 	DO_MARVEL_IO,
 	DO_MARVEL_IO,
 	.machine_check		= marvel_machine_check,
 	.machine_check		= marvel_machine_check,
 	.max_isa_dma_address	= ALPHA_MAX_ISA_DMA_ADDRESS,
 	.max_isa_dma_address	= ALPHA_MAX_ISA_DMA_ADDRESS,

+ 175 - 230
arch/alpha/kernel/time.c

@@ -3,13 +3,7 @@
  *
  *
  *  Copyright (C) 1991, 1992, 1995, 1999, 2000  Linus Torvalds
  *  Copyright (C) 1991, 1992, 1995, 1999, 2000  Linus Torvalds
  *
  *
- * This file contains the PC-specific time handling details:
- * reading the RTC at bootup, etc..
- * 1994-07-02    Alan Modra
- *	fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
- * 1995-03-26    Markus Kuhn
- *      fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
- *      precision CMOS clock update
+ * This file contains the clocksource time handling.
  * 1997-09-10	Updated NTP code according to technical memorandum Jan '96
  * 1997-09-10	Updated NTP code according to technical memorandum Jan '96
  *		"A Kernel Model for Precision Timekeeping" by Dave Mills
  *		"A Kernel Model for Precision Timekeeping" by Dave Mills
  * 1997-01-09    Adrian Sun
  * 1997-01-09    Adrian Sun
@@ -21,9 +15,6 @@
  * 1999-04-16	Thorsten Kranzkowski (dl8bcu@gmx.net)
  * 1999-04-16	Thorsten Kranzkowski (dl8bcu@gmx.net)
  *	fixed algorithm in do_gettimeofday() for calculating the precise time
  *	fixed algorithm in do_gettimeofday() for calculating the precise time
  *	from processor cycle counter (now taking lost_ticks into account)
  *	from processor cycle counter (now taking lost_ticks into account)
- * 2000-08-13	Jan-Benedict Glaw <jbglaw@lug-owl.de>
- * 	Fixed time_init to be aware of epoches != 1900. This prevents
- * 	booting up in 2048 for me;) Code is stolen from rtc.c.
  * 2003-06-03	R. Scott Bailey <scott.bailey@eds.com>
  * 2003-06-03	R. Scott Bailey <scott.bailey@eds.com>
  *	Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM
  *	Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM
  */
  */
@@ -46,40 +37,19 @@
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/io.h>
 #include <asm/hwrpb.h>
 #include <asm/hwrpb.h>
-#include <asm/rtc.h>
 
 
 #include <linux/mc146818rtc.h>
 #include <linux/mc146818rtc.h>
 #include <linux/time.h>
 #include <linux/time.h>
 #include <linux/timex.h>
 #include <linux/timex.h>
 #include <linux/clocksource.h>
 #include <linux/clocksource.h>
+#include <linux/clockchips.h>
 
 
 #include "proto.h"
 #include "proto.h"
 #include "irq_impl.h"
 #include "irq_impl.h"
 
 
-static int set_rtc_mmss(unsigned long);
-
 DEFINE_SPINLOCK(rtc_lock);
 DEFINE_SPINLOCK(rtc_lock);
 EXPORT_SYMBOL(rtc_lock);
 EXPORT_SYMBOL(rtc_lock);
 
 
-#define TICK_SIZE (tick_nsec / 1000)
-
-/*
- * Shift amount by which scaled_ticks_per_cycle is scaled.  Shifting
- * by 48 gives us 16 bits for HZ while keeping the accuracy good even
- * for large CPU clock rates.
- */
-#define FIX_SHIFT	48
-
-/* lump static variables together for more efficient access: */
-static struct {
-	/* cycle counter last time it got invoked */
-	__u32 last_time;
-	/* ticks/cycle * 2^48 */
-	unsigned long scaled_ticks_per_cycle;
-	/* partial unused tick */
-	unsigned long partial_tick;
-} state;
-
 unsigned long est_cycle_freq;
 unsigned long est_cycle_freq;
 
 
 #ifdef CONFIG_IRQ_WORK
 #ifdef CONFIG_IRQ_WORK
@@ -108,109 +78,156 @@ static inline __u32 rpcc(void)
 	return __builtin_alpha_rpcc();
 	return __builtin_alpha_rpcc();
 }
 }
 
 
-int update_persistent_clock(struct timespec now)
-{
-	return set_rtc_mmss(now.tv_sec);
-}
 
 
-void read_persistent_clock(struct timespec *ts)
+
+/*
+ * The RTC as a clock_event_device primitive.
+ */
+
+static DEFINE_PER_CPU(struct clock_event_device, cpu_ce);
+
+irqreturn_t
+rtc_timer_interrupt(int irq, void *dev)
 {
 {
-	unsigned int year, mon, day, hour, min, sec, epoch;
-
-	sec = CMOS_READ(RTC_SECONDS);
-	min = CMOS_READ(RTC_MINUTES);
-	hour = CMOS_READ(RTC_HOURS);
-	day = CMOS_READ(RTC_DAY_OF_MONTH);
-	mon = CMOS_READ(RTC_MONTH);
-	year = CMOS_READ(RTC_YEAR);
-
-	if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-		sec = bcd2bin(sec);
-		min = bcd2bin(min);
-		hour = bcd2bin(hour);
-		day = bcd2bin(day);
-		mon = bcd2bin(mon);
-		year = bcd2bin(year);
-	}
+	int cpu = smp_processor_id();
+	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
 
 
-	/* PC-like is standard; used for year >= 70 */
-	epoch = 1900;
-	if (year < 20)
-		epoch = 2000;
-	else if (year >= 20 && year < 48)
-		/* NT epoch */
-		epoch = 1980;
-	else if (year >= 48 && year < 70)
-		/* Digital UNIX epoch */
-		epoch = 1952;
+	/* Don't run the hook for UNUSED or SHUTDOWN.  */
+	if (likely(ce->mode == CLOCK_EVT_MODE_PERIODIC))
+		ce->event_handler(ce);
 
 
-	printk(KERN_INFO "Using epoch = %d\n", epoch);
+	if (test_irq_work_pending()) {
+		clear_irq_work_pending();
+		irq_work_run();
+	}
 
 
-	if ((year += epoch) < 1970)
-		year += 100;
+	return IRQ_HANDLED;
+}
 
 
-	ts->tv_sec = mktime(year, mon, day, hour, min, sec);
-	ts->tv_nsec = 0;
+static void
+rtc_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
+{
+	/* The mode member of CE is updated in generic code.
+	   Since we only support periodic events, nothing to do.  */
+}
+
+static int
+rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
+{
+	/* This hook is for oneshot mode, which we don't support.  */
+	return -EINVAL;
 }
 }
 
 
+static void __init
+init_rtc_clockevent(void)
+{
+	int cpu = smp_processor_id();
+	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
+
+	*ce = (struct clock_event_device){
+		.name = "rtc",
+		.features = CLOCK_EVT_FEAT_PERIODIC,
+		.rating = 100,
+		.cpumask = cpumask_of(cpu),
+		.set_mode = rtc_ce_set_mode,
+		.set_next_event = rtc_ce_set_next_event,
+	};
 
 
+	clockevents_config_and_register(ce, CONFIG_HZ, 0, 0);
+}
 
 
+
 /*
 /*
- * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
+ * The QEMU clock as a clocksource primitive.
  */
  */
-irqreturn_t timer_interrupt(int irq, void *dev)
+
+static cycle_t
+qemu_cs_read(struct clocksource *cs)
 {
 {
-	unsigned long delta;
-	__u32 now;
-	long nticks;
+	return qemu_get_vmtime();
+}
 
 
-#ifndef CONFIG_SMP
-	/* Not SMP, do kernel PC profiling here.  */
-	profile_tick(CPU_PROFILING);
-#endif
+static struct clocksource qemu_cs = {
+	.name                   = "qemu",
+	.rating                 = 400,
+	.read                   = qemu_cs_read,
+	.mask                   = CLOCKSOURCE_MASK(64),
+	.flags                  = CLOCK_SOURCE_IS_CONTINUOUS,
+	.max_idle_ns		= LONG_MAX
+};
 
 
-	/*
-	 * Calculate how many ticks have passed since the last update,
-	 * including any previous partial leftover.  Save any resulting
-	 * fraction for the next pass.
-	 */
-	now = rpcc();
-	delta = now - state.last_time;
-	state.last_time = now;
-	delta = delta * state.scaled_ticks_per_cycle + state.partial_tick;
-	state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1); 
-	nticks = delta >> FIX_SHIFT;
 
 
-	if (nticks)
-		xtime_update(nticks);
+/*
+ * The QEMU alarm as a clock_event_device primitive.
+ */
 
 
-	if (test_irq_work_pending()) {
-		clear_irq_work_pending();
-		irq_work_run();
-	}
+static void
+qemu_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
+{
+	/* The mode member of CE is updated for us in generic code.
+	   Just make sure that the event is disabled.  */
+	qemu_set_alarm_abs(0);
+}
 
 
-#ifndef CONFIG_SMP
-	while (nticks--)
-		update_process_times(user_mode(get_irq_regs()));
-#endif
+static int
+qemu_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
+{
+	qemu_set_alarm_rel(evt);
+	return 0;
+}
 
 
+static irqreturn_t
+qemu_timer_interrupt(int irq, void *dev)
+{
+	int cpu = smp_processor_id();
+	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
+
+	ce->event_handler(ce);
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
+static void __init
+init_qemu_clockevent(void)
+{
+	int cpu = smp_processor_id();
+	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
+
+	*ce = (struct clock_event_device){
+		.name = "qemu",
+		.features = CLOCK_EVT_FEAT_ONESHOT,
+		.rating = 400,
+		.cpumask = cpumask_of(cpu),
+		.set_mode = qemu_ce_set_mode,
+		.set_next_event = qemu_ce_set_next_event,
+	};
+
+	clockevents_config_and_register(ce, NSEC_PER_SEC, 1000, LONG_MAX);
+}
+
+
 void __init
 void __init
 common_init_rtc(void)
 common_init_rtc(void)
 {
 {
-	unsigned char x;
+	unsigned char x, sel = 0;
 
 
 	/* Reset periodic interrupt frequency.  */
 	/* Reset periodic interrupt frequency.  */
-	x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f;
-        /* Test includes known working values on various platforms
-           where 0x26 is wrong; we refuse to change those. */
-	if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) {
-		printk("Setting RTC_FREQ to 1024 Hz (%x)\n", x);
-		CMOS_WRITE(0x26, RTC_FREQ_SELECT);
+#if CONFIG_HZ == 1024 || CONFIG_HZ == 1200
+ 	x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f;
+	/* Test includes known working values on various platforms
+	   where 0x26 is wrong; we refuse to change those. */
+ 	if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) {
+		sel = RTC_REF_CLCK_32KHZ + 6;
 	}
 	}
+#elif CONFIG_HZ == 256 || CONFIG_HZ == 128 || CONFIG_HZ == 64 || CONFIG_HZ == 32
+	sel = RTC_REF_CLCK_32KHZ + __builtin_ffs(32768 / CONFIG_HZ);
+#else
+# error "Unknown HZ from arch/alpha/Kconfig"
+#endif
+	if (sel) {
+		printk(KERN_INFO "Setting RTC_FREQ to %d Hz (%x)\n",
+		       CONFIG_HZ, sel);
+		CMOS_WRITE(sel, RTC_FREQ_SELECT);
+ 	}
 
 
 	/* Turn on periodic interrupts.  */
 	/* Turn on periodic interrupts.  */
 	x = CMOS_READ(RTC_CONTROL);
 	x = CMOS_READ(RTC_CONTROL);
@@ -233,16 +250,37 @@ common_init_rtc(void)
 	init_rtc_irq();
 	init_rtc_irq();
 }
 }
 
 
-unsigned int common_get_rtc_time(struct rtc_time *time)
-{
-	return __get_rtc_time(time);
-}
+
+#ifndef CONFIG_ALPHA_WTINT
+/*
+ * The RPCC as a clocksource primitive.
+ *
+ * While we have free-running timecounters running on all CPUs, and we make
+ * a half-hearted attempt in init_rtc_rpcc_info to sync the timecounter
+ * with the wall clock, that initialization isn't kept up-to-date across
+ * different time counters in SMP mode.  Therefore we can only use this
+ * method when there's only one CPU enabled.
+ *
+ * When using the WTINT PALcall, the RPCC may shift to a lower frequency,
+ * or stop altogether, while waiting for the interrupt.  Therefore we cannot
+ * use this method when WTINT is in use.
+ */
 
 
-int common_set_rtc_time(struct rtc_time *time)
+static cycle_t read_rpcc(struct clocksource *cs)
 {
 {
-	return __set_rtc_time(time);
+	return rpcc();
 }
 }
 
 
+static struct clocksource clocksource_rpcc = {
+	.name                   = "rpcc",
+	.rating                 = 300,
+	.read                   = read_rpcc,
+	.mask                   = CLOCKSOURCE_MASK(32),
+	.flags                  = CLOCK_SOURCE_IS_CONTINUOUS
+};
+#endif /* ALPHA_WTINT */
+
+
 /* Validate a computed cycle counter result against the known bounds for
 /* Validate a computed cycle counter result against the known bounds for
    the given processor core.  There's too much brokenness in the way of
    the given processor core.  There's too much brokenness in the way of
    timing hardware for any one method to work everywhere.  :-(
    timing hardware for any one method to work everywhere.  :-(
@@ -353,33 +391,6 @@ rpcc_after_update_in_progress(void)
 	return rpcc();
 	return rpcc();
 }
 }
 
 
-#ifndef CONFIG_SMP
-/* Until and unless we figure out how to get cpu cycle counters
-   in sync and keep them there, we can't use the rpcc.  */
-static cycle_t read_rpcc(struct clocksource *cs)
-{
-	cycle_t ret = (cycle_t)rpcc();
-	return ret;
-}
-
-static struct clocksource clocksource_rpcc = {
-	.name                   = "rpcc",
-	.rating                 = 300,
-	.read                   = read_rpcc,
-	.mask                   = CLOCKSOURCE_MASK(32),
-	.flags                  = CLOCK_SOURCE_IS_CONTINUOUS
-};
-
-static inline void register_rpcc_clocksource(long cycle_freq)
-{
-	clocksource_register_hz(&clocksource_rpcc, cycle_freq);
-}
-#else /* !CONFIG_SMP */
-static inline void register_rpcc_clocksource(long cycle_freq)
-{
-}
-#endif /* !CONFIG_SMP */
-
 void __init
 void __init
 time_init(void)
 time_init(void)
 {
 {
@@ -387,6 +398,15 @@ time_init(void)
 	unsigned long cycle_freq, tolerance;
 	unsigned long cycle_freq, tolerance;
 	long diff;
 	long diff;
 
 
+	if (alpha_using_qemu) {
+		clocksource_register_hz(&qemu_cs, NSEC_PER_SEC);
+		init_qemu_clockevent();
+
+		timer_irqaction.handler = qemu_timer_interrupt;
+		init_rtc_irq();
+		return;
+	}
+
 	/* Calibrate CPU clock -- attempt #1.  */
 	/* Calibrate CPU clock -- attempt #1.  */
 	if (!est_cycle_freq)
 	if (!est_cycle_freq)
 		est_cycle_freq = validate_cc_value(calibrate_cc_with_pit());
 		est_cycle_freq = validate_cc_value(calibrate_cc_with_pit());
@@ -421,100 +441,25 @@ time_init(void)
 		       "and unable to estimate a proper value!\n");
 		       "and unable to estimate a proper value!\n");
 	}
 	}
 
 
-	/* From John Bowman <bowman@math.ualberta.ca>: allow the values
-	   to settle, as the Update-In-Progress bit going low isn't good
-	   enough on some hardware.  2ms is our guess; we haven't found 
-	   bogomips yet, but this is close on a 500Mhz box.  */
-	__delay(1000000);
-
-
-	if (HZ > (1<<16)) {
-		extern void __you_loose (void);
-		__you_loose();
-	}
-
-	register_rpcc_clocksource(cycle_freq);
-
-	state.last_time = cc1;
-	state.scaled_ticks_per_cycle
-		= ((unsigned long) HZ << FIX_SHIFT) / cycle_freq;
-	state.partial_tick = 0L;
+	/* See above for restrictions on using clocksource_rpcc.  */
+#ifndef CONFIG_ALPHA_WTINT
+	if (hwrpb->nr_processors == 1)
+		clocksource_register_hz(&clocksource_rpcc, cycle_freq);
+#endif
 
 
 	/* Startup the timer source. */
 	/* Startup the timer source. */
 	alpha_mv.init_rtc();
 	alpha_mv.init_rtc();
+	init_rtc_clockevent();
 }
 }
 
 
-/*
- * In order to set the CMOS clock precisely, set_rtc_mmss has to be
- * called 500 ms after the second nowtime has started, because when
- * nowtime is written into the registers of the CMOS clock, it will
- * jump to the next second precisely 500 ms later. Check the Motorola
- * MC146818A or Dallas DS12887 data sheet for details.
- *
- * BUG: This routine does not handle hour overflow properly; it just
- *      sets the minutes. Usually you won't notice until after reboot!
- */
-
-
-static int
-set_rtc_mmss(unsigned long nowtime)
+/* Initialize the clock_event_device for secondary cpus.  */
+#ifdef CONFIG_SMP
+void __init
+init_clockevent(void)
 {
 {
-	int retval = 0;
-	int real_seconds, real_minutes, cmos_minutes;
-	unsigned char save_control, save_freq_select;
-
-	/* irq are locally disabled here */
-	spin_lock(&rtc_lock);
-	/* Tell the clock it's being set */
-	save_control = CMOS_READ(RTC_CONTROL);
-	CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
-
-	/* Stop and reset prescaler */
-	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-	CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
-
-	cmos_minutes = CMOS_READ(RTC_MINUTES);
-	if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-		cmos_minutes = bcd2bin(cmos_minutes);
-
-	/*
-	 * since we're only adjusting minutes and seconds,
-	 * don't interfere with hour overflow. This avoids
-	 * messing with unknown time zones but requires your
-	 * RTC not to be off by more than 15 minutes
-	 */
-	real_seconds = nowtime % 60;
-	real_minutes = nowtime / 60;
-	if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) {
-		/* correct for half hour time zone */
-		real_minutes += 30;
-	}
-	real_minutes %= 60;
-
-	if (abs(real_minutes - cmos_minutes) < 30) {
-		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-			real_seconds = bin2bcd(real_seconds);
-			real_minutes = bin2bcd(real_minutes);
-		}
-		CMOS_WRITE(real_seconds,RTC_SECONDS);
-		CMOS_WRITE(real_minutes,RTC_MINUTES);
-	} else {
-		printk_once(KERN_NOTICE
-		       "set_rtc_mmss: can't update from %d to %d\n",
-		       cmos_minutes, real_minutes);
- 		retval = -1;
-	}
-
-	/* The following flags have to be released exactly in this order,
-	 * otherwise the DS12887 (popular MC146818A clone with integrated
-	 * battery and quartz) will not reset the oscillator and will not
-	 * update precisely 500 ms later. You won't find this mentioned in
-	 * the Dallas Semiconductor data sheets, but who believes data
-	 * sheets anyway ...                           -- Markus Kuhn
-	 */
-	CMOS_WRITE(save_control, RTC_CONTROL);
-	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-	spin_unlock(&rtc_lock);
-
-	return retval;
+	if (alpha_using_qemu)
+		init_qemu_clockevent();
+	else
+		init_rtc_clockevent();
 }
 }
+#endif

+ 15 - 0
arch/alpha/kernel/traps.c

@@ -241,6 +241,21 @@ do_entIF(unsigned long type, struct pt_regs *regs)
 			       (const char *)(data[1] | (long)data[2] << 32), 
 			       (const char *)(data[1] | (long)data[2] << 32), 
 			       data[0]);
 			       data[0]);
 		}
 		}
+#ifdef CONFIG_ALPHA_WTINT
+		if (type == 4) {
+			/* If CALL_PAL WTINT is totally unsupported by the
+			   PALcode, e.g. MILO, "emulate" it by overwriting
+			   the insn.  */
+			unsigned int *pinsn
+			  = (unsigned int *) regs->pc - 1;
+			if (*pinsn == PAL_wtint) {
+				*pinsn = 0x47e01400; /* mov 0,$0 */
+				imb();
+				regs->r0 = 0;
+				return;
+			}
+		}
+#endif /* ALPHA_WTINT */
 		die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
 		die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
 			      regs, type, NULL);
 			      regs, type, NULL);
 	}
 	}

+ 5 - 5
arch/alpha/lib/csum_partial_copy.c

@@ -130,7 +130,7 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
 		*dst = word | tmp;
 		*dst = word | tmp;
 		checksum += carry;
 		checksum += carry;
 	}
 	}
-	if (err) *errp = err;
+	if (err && errp) *errp = err;
 	return checksum;
 	return checksum;
 }
 }
 
 
@@ -185,7 +185,7 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src,
 		*dst = word | tmp;
 		*dst = word | tmp;
 		checksum += carry;
 		checksum += carry;
 	}
 	}
-	if (err) *errp = err;
+	if (err && errp) *errp = err;
 	return checksum;
 	return checksum;
 }
 }
 
 
@@ -242,7 +242,7 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src,
 	stq_u(partial_dest | second_dest, dst);
 	stq_u(partial_dest | second_dest, dst);
 out:
 out:
 	checksum += carry;
 	checksum += carry;
-	if (err) *errp = err;
+	if (err && errp) *errp = err;
 	return checksum;
 	return checksum;
 }
 }
 
 
@@ -325,7 +325,7 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
 		stq_u(partial_dest | word | second_dest, dst);
 		stq_u(partial_dest | word | second_dest, dst);
 		checksum += carry;
 		checksum += carry;
 	}
 	}
-	if (err) *errp = err;
+	if (err && errp) *errp = err;
 	return checksum;
 	return checksum;
 }
 }
 
 
@@ -339,7 +339,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len,
 
 
 	if (len) {
 	if (len) {
 		if (!access_ok(VERIFY_READ, src, len)) {
 		if (!access_ok(VERIFY_READ, src, len)) {
-			*errp = -EFAULT;
+			if (errp) *errp = -EFAULT;
 			memset(dst, 0, len);
 			memset(dst, 0, len);
 			return sum;
 			return sum;
 		}
 		}

+ 7 - 5
arch/alpha/lib/ev6-memset.S

@@ -30,14 +30,15 @@
 	.set noat
 	.set noat
 	.set noreorder
 	.set noreorder
 .text
 .text
+	.globl memset
 	.globl __memset
 	.globl __memset
+	.globl ___memset
 	.globl __memsetw
 	.globl __memsetw
 	.globl __constant_c_memset
 	.globl __constant_c_memset
-	.globl memset
 
 
-	.ent __memset
+	.ent ___memset
 .align 5
 .align 5
-__memset:
+___memset:
 	.frame $30,0,$26,0
 	.frame $30,0,$26,0
 	.prologue 0
 	.prologue 0
 
 
@@ -227,7 +228,7 @@ end_b:
 	nop
 	nop
 	nop
 	nop
 	ret $31,($26),1		# L0 :
 	ret $31,($26),1		# L0 :
-	.end __memset
+	.end ___memset
 
 
 	/*
 	/*
 	 * This is the original body of code, prior to replication and
 	 * This is the original body of code, prior to replication and
@@ -594,4 +595,5 @@ end_w:
 
 
 	.end __memsetw
 	.end __memsetw
 
 
-memset = __memset
+memset = ___memset
+__memset = ___memset

+ 7 - 4
arch/alpha/lib/memset.S

@@ -19,11 +19,13 @@
 .text
 .text
 	.globl memset
 	.globl memset
 	.globl __memset
 	.globl __memset
+	.globl ___memset
 	.globl __memsetw
 	.globl __memsetw
 	.globl __constant_c_memset
 	.globl __constant_c_memset
-	.ent __memset
+
+	.ent ___memset
 .align 5
 .align 5
-__memset:
+___memset:
 	.frame $30,0,$26,0
 	.frame $30,0,$26,0
 	.prologue 0
 	.prologue 0
 
 
@@ -103,7 +105,7 @@ within_one_quad:
 
 
 end:
 end:
 	ret $31,($26),1		/* E1 */
 	ret $31,($26),1		/* E1 */
-	.end __memset
+	.end ___memset
 
 
 	.align 5
 	.align 5
 	.ent __memsetw
 	.ent __memsetw
@@ -121,4 +123,5 @@ __memsetw:
 
 
 	.end __memsetw
 	.end __memsetw
 
 
-memset = __memset
+memset = ___memset
+__memset = ___memset

+ 9 - 1
drivers/rtc/Kconfig

@@ -626,7 +626,7 @@ comment "Platform RTC drivers"
 
 
 config RTC_DRV_CMOS
 config RTC_DRV_CMOS
 	tristate "PC-style 'CMOS'"
 	tristate "PC-style 'CMOS'"
-	depends on X86 || ALPHA || ARM || M32R || ATARI || PPC || MIPS || SPARC64
+	depends on X86 || ARM || M32R || ATARI || PPC || MIPS || SPARC64
 	default y if X86
 	default y if X86
 	help
 	help
 	  Say "yes" here to get direct support for the real time clock
 	  Say "yes" here to get direct support for the real time clock
@@ -643,6 +643,14 @@ config RTC_DRV_CMOS
 	  This driver can also be built as a module. If so, the module
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-cmos.
 	  will be called rtc-cmos.
 
 
+config RTC_DRV_ALPHA
+	bool "Alpha PC-style CMOS"
+	depends on ALPHA
+	default y
+	help
+	  Direct support for the real-time clock found on every Alpha
+	  system, specifically MC146818 compatibles.  If in doubt, say Y.
+
 config RTC_DRV_VRTC
 config RTC_DRV_VRTC
 	tristate "Virtual RTC for Intel MID platforms"
 	tristate "Virtual RTC for Intel MID platforms"
 	depends on X86_INTEL_MID
 	depends on X86_INTEL_MID