12428aadPatrick Mooney/*
22428aadPatrick Mooney * This file and its contents are supplied under the terms of the
32428aadPatrick Mooney * Common Development and Distribution License ("CDDL"), version 1.0.
42428aadPatrick Mooney * You may only use this file in accordance with the terms of version
52428aadPatrick Mooney * 1.0 of the CDDL.
62428aadPatrick Mooney *
72428aadPatrick Mooney * A full copy of the text of the CDDL should have accompanied this
82428aadPatrick Mooney * source.  A copy of the CDDL is also available via the Internet at
92428aadPatrick Mooney * http://www.illumos.org/license/CDDL.
102428aadPatrick Mooney */
112428aadPatrick Mooney
122428aadPatrick Mooney/*
132428aadPatrick Mooney * Copyright 2016 Joyent, Inc.
142428aadPatrick Mooney */
152428aadPatrick Mooney
162428aadPatrick Mooney#include <sys/asm_linkage.h>
172428aadPatrick Mooney#include <sys/segments.h>
182428aadPatrick Mooney#include <sys/time_impl.h>
192428aadPatrick Mooney#include <sys/tsc.h>
202428aadPatrick Mooney#include <cp_offsets.h>
212428aadPatrick Mooney
222428aadPatrick Mooney#define	GETCPU_GDT_OFFSET	SEL_GDT(GDT_CPUID, SEL_UPL)
232428aadPatrick Mooney
242428aadPatrick Mooney	.file	"cp_subr.s"
252428aadPatrick Mooney
262428aadPatrick Mooney/*
272428aadPatrick Mooney * These are cloned from TSC and time related code in the kernel.  They should
282428aadPatrick Mooney * be kept in sync in the case that the source values are changed.
292428aadPatrick Mooney * See: uts/i86pc/os/timestamp.c
302428aadPatrick Mooney */
312428aadPatrick Mooney#define	NSEC_SHIFT	5
322428aadPatrick Mooney#define	ADJ_SHIFT	4
332428aadPatrick Mooney#define	NANOSEC		0x3b9aca00
342428aadPatrick Mooney
352428aadPatrick Mooney/*
362428aadPatrick Mooney * hrtime_t
372428aadPatrick Mooney * __cp_tsc_read(comm_page_t *cp)
382428aadPatrick Mooney *
392428aadPatrick Mooney * Stack usage: 0 bytes
402428aadPatrick Mooney */
412428aadPatrick Mooney	ENTRY_NP(__cp_tsc_read)
422428aadPatrick Mooney	movl	CP_TSC_TYPE(%rdi), %esi
432428aadPatrick Mooney	movl	CP_TSC_NCPU(%rdi), %r8d
442428aadPatrick Mooney	leaq	CP_TSC_SYNC_TICK_DELTA(%rdi), %r9
452428aadPatrick Mooney
462428aadPatrick Mooney	cmpl	$TSC_TSCP, %esi
472428aadPatrick Mooney	jne	2f
482428aadPatrick Mooney	rdtscp
492428aadPatrick Mooney	/*
502428aadPatrick Mooney	 * When the TSC is read, the low 32 bits are placed in %eax while the
512428aadPatrick Mooney	 * high 32 bits are placed in %edx.  They are shifted and ORed together
522428aadPatrick Mooney	 * to obtain the full 64-bit value.
532428aadPatrick Mooney	 */
542428aadPatrick Mooney	shlq	$0x20, %rdx
552428aadPatrick Mooney	orq	%rdx, %rax
562428aadPatrick Mooney	cmpl	$0, %esi
572428aadPatrick Mooney	jne	1f
582428aadPatrick Mooney	ret
592428aadPatrick Mooney1:
602428aadPatrick Mooney	/*
612428aadPatrick Mooney	 * When cp_tsc_ncpu is non-zero, it indicates the length of the
622428aadPatrick Mooney	 * cp_tsc_sync_tick_delta array, which contains per-CPU offsets for the
632428aadPatrick Mooney	 * TSC.  The CPU ID furnished by the IA32_TSC_AUX register via rdtscp
642428aadPatrick Mooney	 * is used to look up an offset value in that array and apply it to the
652428aadPatrick Mooney	 * TSC reading.
662428aadPatrick Mooney	 */
672428aadPatrick Mooney	movq	(%r9, %rcx, 8), %rdx
682428aadPatrick Mooney	addq	%rdx, %rax
692428aadPatrick Mooney	ret
702428aadPatrick Mooney
712428aadPatrick Mooney2:
722428aadPatrick Mooney	/*
732428aadPatrick Mooney	 * Without rdtscp, there is no way to perform a TSC reading and
742428aadPatrick Mooney	 * simultaneously query the current CPU.  If tsc_ncpu indicates that
752428aadPatrick Mooney	 * per-CPU TSC offsets are present, the ID of the current CPU is
762428aadPatrick Mooney	 * queried before performing a TSC reading.  It will be later compared
772428aadPatrick Mooney	 * to a second CPU ID lookup to catch CPU migrations.
782428aadPatrick Mooney	 *
792428aadPatrick Mooney	 * This method will catch all but the most pathological scheduling.
802428aadPatrick Mooney	 */
812428aadPatrick Mooney	cmpl	$0, %r8d
822428aadPatrick Mooney	je	3f
832428aadPatrick Mooney	movl	$GETCPU_GDT_OFFSET, %edx
842428aadPatrick Mooney	lsl	%dx, %edx
852428aadPatrick Mooney
862428aadPatrick Mooney3:
872428aadPatrick Mooney	/* Save the most recently queried CPU ID for later comparison. */
882428aadPatrick Mooney	movl	%edx, %r10d
892428aadPatrick Mooney
902428aadPatrick Mooney	cmpl	$TSC_RDTSC_MFENCE, %esi
912428aadPatrick Mooney	jne	4f
922428aadPatrick Mooney	mfence
932428aadPatrick Mooney	rdtsc
942428aadPatrick Mooney	jmp	7f
952428aadPatrick Mooney
962428aadPatrick Mooney4:
972428aadPatrick Mooney	cmpl	$TSC_RDTSC_LFENCE, %esi
982428aadPatrick Mooney	jne	5f
992428aadPatrick Mooney	lfence
1002428aadPatrick Mooney	rdtsc
1012428aadPatrick Mooney	jmp	7f
1022428aadPatrick Mooney
1032428aadPatrick Mooney5:
1042428aadPatrick Mooney	cmpl	$TSC_RDTSC_CPUID, %esi
1052428aadPatrick Mooney	jne	6f
1062428aadPatrick Mooney	/*
1072428aadPatrick Mooney	 * Since the amd64 ABI dictates that %rbx is callee-saved, it must be
1082428aadPatrick Mooney	 * preserved here.  Its contents will be overwritten when cpuid is used
1092428aadPatrick Mooney	 * as a serializing instruction.
1102428aadPatrick Mooney	 */
1112428aadPatrick Mooney	movq	%rbx, %r11
1122428aadPatrick Mooney	xorl	%eax, %eax
1132428aadPatrick Mooney	cpuid
1142428aadPatrick Mooney	rdtsc
1152428aadPatrick Mooney	movq	%r11, %rbx
1162428aadPatrick Mooney	jmp	7f
1172428aadPatrick Mooney
1182428aadPatrick Mooney6:
1192428aadPatrick Mooney	/*
1202428aadPatrick Mooney	 * Other protections should have prevented this function from being
1212428aadPatrick Mooney	 * called in the first place.  The only sane action is to abort.
1222428aadPatrick Mooney	 * The easiest means in this context is via SIGILL.
1232428aadPatrick Mooney	 */
1242428aadPatrick Mooney	ud2a
1252428aadPatrick Mooney
1262428aadPatrick Mooney7:
1272428aadPatrick Mooney	shlq	$0x20, %rdx
1282428aadPatrick Mooney	orq	%rdx, %rax
1292428aadPatrick Mooney
1302428aadPatrick Mooney	/*
1312428aadPatrick Mooney	 * Query the current CPU again if a per-CPU offset is being applied to
1322428aadPatrick Mooney	 * the TSC reading.  If the result differs from the earlier reading,
1332428aadPatrick Mooney	 * then a migration has occured and the TSC must be read again.
1342428aadPatrick Mooney	 */
1352428aadPatrick Mooney	cmpl	$0, %r8d
1362428aadPatrick Mooney	je	8f
1372428aadPatrick Mooney	movl	$GETCPU_GDT_OFFSET, %edx
1382428aadPatrick Mooney	lsl	%dx, %edx
1392428aadPatrick Mooney	cmpl	%edx, %r10d
1402428aadPatrick Mooney	jne	3b
1412428aadPatrick Mooney	movq	(%r9, %rdx, 8), %rdx
1422428aadPatrick Mooney	addq	%rdx, %rax
1432428aadPatrick Mooney8:
1442428aadPatrick Mooney	ret
1452428aadPatrick Mooney	SET_SIZE(__cp_tsc_read)
1462428aadPatrick Mooney
1472428aadPatrick Mooney
1482428aadPatrick Mooney/*
1492428aadPatrick Mooney * uint_t
1502428aadPatrick Mooney * __cp_getcpu(comm_page_t *)
1512428aadPatrick Mooney *
1522428aadPatrick Mooney * Stack usage: 0 bytes
1532428aadPatrick Mooney */
1542428aadPatrick Mooney	ENTRY_NP(__cp_getcpu)
1552428aadPatrick Mooney	movl	CP_TSC_TYPE(%rdi), %edi
1562428aadPatrick Mooney	/*
1572428aadPatrick Mooney	 * If RDTSCP is available, it is a quick way to grab the cpu_id which
1582428aadPatrick Mooney	 * is stored in the TSC_AUX MSR by the kernel.
1592428aadPatrick Mooney	 */
1602428aadPatrick Mooney	cmpl	$TSC_TSCP, %edi
1612428aadPatrick Mooney	jne	1f
1622428aadPatrick Mooney	rdtscp
1632428aadPatrick Mooney	movl	%ecx, %eax
1642428aadPatrick Mooney	ret
1652428aadPatrick Mooney1:
1662428aadPatrick Mooney	mov	$GETCPU_GDT_OFFSET, %eax
1672428aadPatrick Mooney	lsl	%ax, %eax
1682428aadPatrick Mooney	ret
1692428aadPatrick Mooney	SET_SIZE(__cp_getcpu)
1702428aadPatrick Mooney
1712428aadPatrick Mooney/*
1722428aadPatrick Mooney * hrtime_t
1732428aadPatrick Mooney * __cp_gethrtime(comm_page_t *cp)
1742428aadPatrick Mooney *
1752428aadPatrick Mooney * Stack usage: 0x20 local + 0x8 call = 0x28 bytes
1762428aadPatrick Mooney *
1772428aadPatrick Mooney * %rsp+0x00 - hrtime_t tsc_last
1782428aadPatrick Mooney * %rsp+0x08 - hrtime_t hrtime_base
1792428aadPatrick Mooney * %rsp+0x10 - commpage_t *cp
1802428aadPatrick Mooney * %rsp+0x18 - int hres_lock
1812428aadPatrick Mooney */
1822428aadPatrick Mooney	ENTRY_NP(__cp_gethrtime)
1832428aadPatrick Mooney	subq	$0x20, %rsp
1842428aadPatrick Mooney	movq	%rdi, 0x10(%rsp)
1852428aadPatrick Mooney1:
1862428aadPatrick Mooney	movl	CP_HRES_LOCK(%rdi), %r9d
1872428aadPatrick Mooney	movl	%r9d, 0x18(%rsp)
1882428aadPatrick Mooney
1892428aadPatrick Mooney	movq	CP_TSC_LAST(%rdi), %rax
1902428aadPatrick Mooney	movq	CP_TSC_HRTIME_BASE(%rdi), %rdx
1912428aadPatrick Mooney	movq	%rax, (%rsp)
1922428aadPatrick Mooney	movq	%rdx, 0x8(%rsp)
1932428aadPatrick Mooney
1942428aadPatrick Mooney	call	__cp_tsc_read
1952428aadPatrick Mooney	movq	0x10(%rsp), %rdi
1962428aadPatrick Mooney
1972428aadPatrick Mooney	movl	0x18(%rsp), %r9d
1982428aadPatrick Mooney	movl	CP_HRES_LOCK(%rdi), %edx
1992428aadPatrick Mooney	andl	$0xfffffffe, %r9d
2002428aadPatrick Mooney	cmpl	%r9d, %edx
2012428aadPatrick Mooney	jne	1b
2022428aadPatrick Mooney
2032428aadPatrick Mooney	/*
2042428aadPatrick Mooney	 * The in-kernel logic for calculating hrtime performs several checks
2052428aadPatrick Mooney	 * to protect against edge cases.  That logic is summarized as:
2062428aadPatrick Mooney	 * if (tsc >= tsc_last) {
2072428aadPatrick Mooney	 *         delta -= tsc_last;
2082428aadPatrick Mooney	 * } else if (tsc >= tsc_last - 2*tsc_max_delta) {
2092428aadPatrick Mooney	 *         delta = 0;
2102428aadPatrick Mooney	 * } else {
2112428aadPatrick Mooney	 *         delta = MIN(tsc, tsc_resume_cap);
2122428aadPatrick Mooney	 * }
2132428aadPatrick Mooney	 *
2142428aadPatrick Mooney	 * The below implementation achieves the same result, although it is
2152428aadPatrick Mooney	 * structured for speed and optimized for the fast path:
2162428aadPatrick Mooney	 *
2172428aadPatrick Mooney	 * delta = tsc - tsc_last;
2182428aadPatrick Mooney	 * if (delta < 0) {
2192428aadPatrick Mooney	 *         delta += (tsc_max_delta << 1);
2202428aadPatrick Mooney	 *         if (delta >= 0) {
2212428aadPatrick Mooney	 *                 delta = 0;
2222428aadPatrick Mooney	 *         } else {
2232428aadPatrick Mooney	 *                 delta = MIN(tsc, tsc_resume_cap);
2242428aadPatrick Mooney	 *         }
2252428aadPatrick Mooney	 * }
2262428aadPatrick Mooney	 */
2272428aadPatrick Mooney	movq	(%rsp), %rdx
2282428aadPatrick Mooney	subq	%rdx, %rax		/* delta = tsc - tsc_last */
2292428aadPatrick Mooney	jbe	3f			/* if (delta < 0) */
2302428aadPatrick Mooney
2312428aadPatrick Mooney2:
2322428aadPatrick Mooney	/*
2332428aadPatrick Mooney	 * Optimized TSC_CONVERT_AND_ADD:
2342428aadPatrick Mooney	 * hrtime_base += (tsc_delta * nsec_scale) >> (32 - NSEC_SHIFT)
2352428aadPatrick Mooney	 *
2362428aadPatrick Mooney	 * Since the multiply and shift are done in 128-bit, there is no need
2372428aadPatrick Mooney	 * to worry about overflow.
2382428aadPatrick Mooney	 */
2392428aadPatrick Mooney	movl	CP_NSEC_SCALE(%rdi), %ecx
2402428aadPatrick Mooney	mulq	%rcx
2412428aadPatrick Mooney	shrdq	$_CONST(32 - NSEC_SHIFT), %rdx, %rax
2422428aadPatrick Mooney	movq	0x8(%rsp), %r8
2432428aadPatrick Mooney	addq	%r8, %rax
2442428aadPatrick Mooney
2452428aadPatrick Mooney	addq	$0x20, %rsp
2462428aadPatrick Mooney	ret
2472428aadPatrick Mooney
2482428aadPatrick Mooney3:
2492428aadPatrick Mooney	movq	%rax, %r9		/* save (tsc - tsc_last) in r9 */
2502428aadPatrick Mooney	movl	CP_TSC_MAX_DELTA(%rdi), %ecx
2512428aadPatrick Mooney	sall	$1, %ecx
2522428aadPatrick Mooney	addq	%rcx, %rax		/* delta += (tsc_max_delta << 1) */
2532428aadPatrick Mooney	jae	4f			/* delta < 0 */
2542428aadPatrick Mooney	xorq	%rax, %rax
2552428aadPatrick Mooney	jmp	2b
2562428aadPatrick Mooney
2572428aadPatrick Mooney4:
2582428aadPatrick Mooney	/*
2592428aadPatrick Mooney	 * Repopulate %rax with the TSC reading by adding tsc_last to %r9
2602428aadPatrick Mooney	 * (which holds tsc - tsc_last)
2612428aadPatrick Mooney	 */
2622428aadPatrick Mooney	movq	(%rsp), %rax
2632428aadPatrick Mooney	addq	%r9, %rax
2642428aadPatrick Mooney
2652428aadPatrick Mooney	/* delta = MIN(tsc, resume_cap) */
2662428aadPatrick Mooney	movq	CP_TSC_RESUME_CAP(%rdi), %rcx
2672428aadPatrick Mooney	cmpq	%rcx, %rax
2682428aadPatrick Mooney	jbe	5f
2692428aadPatrick Mooney	movq	%rcx, %rax
2702428aadPatrick Mooney5:
2712428aadPatrick Mooney	jmp	2b
2722428aadPatrick Mooney
2732428aadPatrick Mooney	SET_SIZE(__cp_gethrtime)
2742428aadPatrick Mooney
2752428aadPatrick Mooney/*
2762428aadPatrick Mooney * int
2772428aadPatrick Mooney * __cp_clock_gettime_monotonic(comm_page_t *cp, timespec_t *tsp)
2782428aadPatrick Mooney *
2792428aadPatrick Mooney * Stack usage: 0x8 local + 0x8 call + 0x28 called func. = 0x38 bytes
2802428aadPatrick Mooney *
2812428aadPatrick Mooney * %rsp+0x00 - timespec_t *tsp
2822428aadPatrick Mooney */
2832428aadPatrick Mooney	ENTRY_NP(__cp_clock_gettime_monotonic)
2842428aadPatrick Mooney	subq	$0x8, %rsp
2852428aadPatrick Mooney	movq	%rsi, (%rsp)
2862428aadPatrick Mooney
2872428aadPatrick Mooney	call	__cp_gethrtime
2882428aadPatrick Mooney
2892428aadPatrick Mooney	/*
2902428aadPatrick Mooney	 * Convert from hrtime_t (int64_t in nanoseconds) to timespec_t.
2912428aadPatrick Mooney	 * This uses the same approach as hrt2ts, although it has been updated
2922428aadPatrick Mooney	 * to utilize 64-bit math.
2932428aadPatrick Mooney	 * 1 / 1,000,000,000 =
2942428aadPatrick Mooney	 * 1000100101110000010111110100000100110110101101001010110110011B-26
2952428aadPatrick Mooney	 * = 0x112e0be826d694b3 * 2^-26
2962428aadPatrick Mooney	 *
2972428aadPatrick Mooney	 * secs = (nsecs * 0x112e0be826d694b3) >> 26
2982428aadPatrick Mooney	 *
2992428aadPatrick Mooney	 * In order to account for the 2s-compliment of negative inputs, a
3002428aadPatrick Mooney	 * final operation completes the process:
3012428aadPatrick Mooney	 *
3022428aadPatrick Mooney	 * secs -= (nsecs >> 63)
3032428aadPatrick Mooney	 */
3042428aadPatrick Mooney	movq	%rax, %r11
3052428aadPatrick Mooney	movq	$0x112e0be826d694b3, %rdx
3062428aadPatrick Mooney	imulq	%rdx
3072428aadPatrick Mooney	sarq	$0x1a, %rdx
3082428aadPatrick Mooney	movq	%r11, %rax
3092428aadPatrick Mooney	sarq	$0x3f, %rax
3102428aadPatrick Mooney	subq	%rax, %rdx
3112428aadPatrick Mooney	movq	(%rsp), %rsi
3122428aadPatrick Mooney	movq	%rdx, (%rsi)
3132428aadPatrick Mooney	/*
3142428aadPatrick Mooney	 * Populating tv_nsec is easier:
3152428aadPatrick Mooney	 * tv_nsec = nsecs - (secs * NANOSEC)
3162428aadPatrick Mooney	 */
3172428aadPatrick Mooney	imulq	$NANOSEC, %rdx, %rdx
3182428aadPatrick Mooney	subq	%rdx, %r11
3192428aadPatrick Mooney	movq	%r11, 0x8(%rsi)
3202428aadPatrick Mooney
3212428aadPatrick Mooney	xorl	%eax, %eax
3222428aadPatrick Mooney	addq	$0x8, %rsp
3232428aadPatrick Mooney	ret
3242428aadPatrick Mooney	SET_SIZE(__cp_clock_gettime_monotonic)
3252428aadPatrick Mooney
3262428aadPatrick Mooney/*
3272428aadPatrick Mooney * int
3282428aadPatrick Mooney * __cp_clock_gettime_realtime(comm_page_t *cp, timespec_t *tsp)
3292428aadPatrick Mooney *
3302428aadPatrick Mooney * Stack usage: 0x18 local + 0x8 call + 0x28 called func. = 0x48 bytes
3312428aadPatrick Mooney *
3322428aadPatrick Mooney * %rsp+0x00 - commpage_t *cp
3332428aadPatrick Mooney * %rsp+0x08 - timespec_t *tsp
3342428aadPatrick Mooney * %rsp+0x10 - int hres_lock
3352428aadPatrick Mooney */
3362428aadPatrick Mooney	ENTRY_NP(__cp_clock_gettime_realtime)
3372428aadPatrick Mooney	subq	$0x18, %rsp
3382428aadPatrick Mooney	movq	%rdi, (%rsp)
3392428aadPatrick Mooney	movq	%rsi, 0x8(%rsp)
3402428aadPatrick Mooney
3412428aadPatrick Mooney1:
3422428aadPatrick Mooney	movl	CP_HRES_LOCK(%rdi), %eax
3432428aadPatrick Mooney	movl	%eax, 0x10(%rsp)
3442428aadPatrick Mooney
3452428aadPatrick Mooney	call	__cp_gethrtime
3462428aadPatrick Mooney	movq	(%rsp), %rdi
3472428aadPatrick Mooney	movq	CP_HRES_LAST_TICK(%rdi), %rdx
3482428aadPatrick Mooney	subq	%rdx, %rax			/* nslt = hrtime - last_tick */
3492428aadPatrick Mooney	jb	1b
3502428aadPatrick Mooney	movq	CP_HRESTIME(%rdi), %r9
3512428aadPatrick Mooney	movq	_CONST(CP_HRESTIME + CP_HRESTIME_INCR)(%rdi), %r10
3522428aadPatrick Mooney	movl	CP_HRESTIME_ADJ(%rdi), %r11d
3532428aadPatrick Mooney
3542428aadPatrick Mooney	addq	%rax, %r10			/* now.tv_nsec += nslt */
3552428aadPatrick Mooney
3562428aadPatrick Mooney	cmpl	$0, %r11d
3572428aadPatrick Mooney	jb	4f				/* hres_adj > 0 */
3582428aadPatrick Mooney	ja	6f				/* hres_adj < 0 */
3592428aadPatrick Mooney
3602428aadPatrick Mooney2:
3612428aadPatrick Mooney	cmpq	$NANOSEC, %r10
3622428aadPatrick Mooney	jae	8f				/* tv_nsec >= NANOSEC */
3632428aadPatrick Mooney
3642428aadPatrick Mooney3:
3652428aadPatrick Mooney	movl	0x10(%rsp), %eax
3662428aadPatrick Mooney	movl	CP_HRES_LOCK(%rdi), %edx
3672428aadPatrick Mooney	andl	$0xfffffffe, %edx
3682428aadPatrick Mooney	cmpl	%eax, %edx
3692428aadPatrick Mooney	jne	1b
3702428aadPatrick Mooney
3712428aadPatrick Mooney	movq	0x8(%rsp), %rsi
3722428aadPatrick Mooney	movq	%r9, (%rsi)
3732428aadPatrick Mooney	movq	%r10, 0x8(%rsi)
3742428aadPatrick Mooney
3752428aadPatrick Mooney	xorl	%eax, %eax
3762428aadPatrick Mooney	addq	$0x18, %rsp
3772428aadPatrick Mooney	ret
3782428aadPatrick Mooney
3792428aadPatrick Mooney
3802428aadPatrick Mooney4:						/* hres_adj > 0 */
3812428aadPatrick Mooney	sarq	$ADJ_SHIFT, %rax
3822428aadPatrick Mooney	cmpl	%r11d, %eax
3832428aadPatrick Mooney	jbe	5f
3842428aadPatrick Mooney	movl	%r11d, %eax
3852428aadPatrick Mooney5:
3862428aadPatrick Mooney	addq	%rax, %r10
3872428aadPatrick Mooney	jmp	2b
3882428aadPatrick Mooney
3892428aadPatrick Mooney6:						/* hres_adj < 0 */
3902428aadPatrick Mooney	sarq	$ADJ_SHIFT, %rax
3912428aadPatrick Mooney	negl	%r11d
3922428aadPatrick Mooney	cmpl	%r11d, %eax
3932428aadPatrick Mooney	jbe	7f
3942428aadPatrick Mooney	movl	%r11d, %eax
3952428aadPatrick Mooney7:
3962428aadPatrick Mooney	subq	%rax, %r10
3972428aadPatrick Mooney	jmp	2b
3982428aadPatrick Mooney
3992428aadPatrick Mooney8:						/* tv_nsec >= NANOSEC */
4002428aadPatrick Mooney	subq	$NANOSEC, %r10
4012428aadPatrick Mooney	incq	%r9
4022428aadPatrick Mooney	cmpq	$NANOSEC, %r10
4032428aadPatrick Mooney	jae	8b
4042428aadPatrick Mooney	jmp	3b
4052428aadPatrick Mooney
4062428aadPatrick Mooney	SET_SIZE(__cp_clock_gettime_realtime)