File: //proc/self/root/usr/lib/vmware-tools/modules/source/vmmemctl.tar
vmmemctl-only/ 0000755 0000000 0000000 00000000000 12275351017 012362 5 ustar root root vmmemctl-only/dbllnklst.h 0000444 0000000 0000000 00000014744 12275350060 014531 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* dbllnklst.h --
*
* Double linked lists
*/
#ifndef _DBLLNKLST_H_
#define _DBLLNKLST_H_
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_USERLEVEL
#include "includeCheck.h"
#include "vm_basic_types.h"
#define DblLnkLst_OffsetOf(type, field) ((intptr_t)&((type *)0)->field)
#define DblLnkLst_Container(addr, type, field) \
((type *)((char *)(addr) - DblLnkLst_OffsetOf(type, field)))
#define DblLnkLst_ForEach(curr, head) \
for (curr = (head)->next; curr != (head); curr = (curr)->next)
/* Safe from list element removal within loop body. */
#define DblLnkLst_ForEachSafe(curr, nextElem, head) \
for (curr = (head)->next, nextElem = (curr)->next; \
curr != (head); \
curr = nextElem, nextElem = (curr)->next)
typedef struct DblLnkLst_Links {
struct DblLnkLst_Links *prev;
struct DblLnkLst_Links *next;
} DblLnkLst_Links;
/*
* Functions
*
* DblLnkLst_LinkFirst, DblLnkLst_LinkLast, and DblLnkLst_Swap are specific
* to anchored lists. The rest are for both circular and anchored lists.
*/
/*
*----------------------------------------------------------------------
*
* DblLnkLst_Init --
*
* Initialize a member of a doubly linked list
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_Init(DblLnkLst_Links *l) // IN
{
l->prev = l->next = l;
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_Link --
*
* Merge two doubly linked lists into one
*
* The operation is commutative
* The operation is inversible (its inverse is DblLnkLst_Unlink)
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_Link(DblLnkLst_Links *l1, // IN
DblLnkLst_Links *l2) // IN
{
DblLnkLst_Links *tmp;
(tmp = l1->prev)->next = l2;
(l1->prev = l2->prev)->next = l1;
l2->prev = tmp ;
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_Unlink --
*
* Split one doubly linked list into two
*
* No check is performed: the caller must ensure that both members
* belong to the same doubly linked list
*
* The operation is commutative
* The operation is inversible (its inverse is DblLnkLst_Link)
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_Unlink(DblLnkLst_Links *l1, // IN
DblLnkLst_Links *l2) // IN
{
DblLnkLst_Links *tmp;
tmp = l1->prev ;
(l1->prev = l2->prev)->next = l1;
(l2->prev = tmp )->next = l2;
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_Unlink1 --
*
* Unlink an element from its list.
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_Unlink1(DblLnkLst_Links *l) // IN
{
DblLnkLst_Unlink(l, l->next);
}
/*
*----------------------------------------------------------------------------
*
* DblLnkLst_IsLinked --
*
* Determines whether an element is linked with any other elements.
*
* Results:
* TRUE if link is linked, FALSE otherwise.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------------
*/
static INLINE Bool
DblLnkLst_IsLinked(DblLnkLst_Links const *l) // IN
{
/*
* A DblLnkLst_Links is either linked to itself (not linked) or linked to
* other elements in a list (linked).
*/
return l->prev != l;
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_LinkFirst --
*
* Insert 'l' at the beginning of the list anchored at 'head'
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_LinkFirst(DblLnkLst_Links *head, // IN
DblLnkLst_Links *l) // IN
{
DblLnkLst_Link(head->next, l);
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_LinkLast --
*
* Insert 'l' at the end of the list anchored at 'head'
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_LinkLast(DblLnkLst_Links *head, // IN
DblLnkLst_Links *l) // IN
{
DblLnkLst_Link(head, l);
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_Swap --
*
* Swap all entries between the list anchored at 'head1' and the list
* anchored at 'head2'.
*
* The operation is commutative
* The operation is inversible (its inverse is itself)
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_Swap(DblLnkLst_Links *head1, // IN/OUT
DblLnkLst_Links *head2) // IN/OUT
{
DblLnkLst_Links const tmp = *head1;
if (DblLnkLst_IsLinked(head2)) {
(head1->prev = head2->prev)->next = head1;
(head1->next = head2->next)->prev = head1;
} else {
DblLnkLst_Init(head1);
}
if (tmp.prev != head1) {
(head2->prev = tmp.prev)->next = head2;
(head2->next = tmp.next)->prev = head2;
} else {
DblLnkLst_Init(head2);
}
}
#endif /* _DBLLNKLST_H_ */
vmmemctl-only/backdoorGcc64.c 0000444 0000000 0000000 00000015304 12275350060 015077 0 ustar root root /*********************************************************
* Copyright (C) 2005 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* backdoorGcc64.c --
*
* Implements the real work for guest-side backdoor for GCC, 64-bit
* target (supports inline ASM, GAS syntax). The asm sections are marked
* volatile since vmware can change the registers content without the
* compiler knowing it.
*
* See backdoorGCC32.c (from which this code was mostly copied) for
* details on why the ASM is written this way. Also note that it might be
* possible to write the asm blocks using the symbolic operand specifiers
* in such a way that the same asm would generate correct code for both
* 32-bit and 64-bit targets, but I'm too lazy to figure it all out.
* --rrdharan
*/
#ifdef __cplusplus
extern "C" {
#endif
#include "backdoor.h"
#include "backdoorInt.h"
/*
*----------------------------------------------------------------------------
*
* Backdoor_InOut --
*
* Send a low-bandwidth basic request (16 bytes) to vmware, and return its
* reply (24 bytes).
*
* Results:
* Host-side response returned in bp IN/OUT parameter.
*
* Side effects:
* Pokes the backdoor.
*
*----------------------------------------------------------------------------
*/
void
Backdoor_InOut(Backdoor_proto *myBp) // IN/OUT
{
uint64 dummy;
__asm__ __volatile__(
#ifdef __APPLE__
/*
* Save %rbx on the stack because the Mac OS GCC doesn't want us to
* clobber it - it erroneously thinks %rbx is the PIC register.
* (Radar bug 7304232)
*/
"pushq %%rbx" "\n\t"
#endif
"pushq %%rax" "\n\t"
"movq 40(%%rax), %%rdi" "\n\t"
"movq 32(%%rax), %%rsi" "\n\t"
"movq 24(%%rax), %%rdx" "\n\t"
"movq 16(%%rax), %%rcx" "\n\t"
"movq 8(%%rax), %%rbx" "\n\t"
"movq (%%rax), %%rax" "\n\t"
"inl %%dx, %%eax" "\n\t" /* NB: There is no inq instruction */
"xchgq %%rax, (%%rsp)" "\n\t"
"movq %%rdi, 40(%%rax)" "\n\t"
"movq %%rsi, 32(%%rax)" "\n\t"
"movq %%rdx, 24(%%rax)" "\n\t"
"movq %%rcx, 16(%%rax)" "\n\t"
"movq %%rbx, 8(%%rax)" "\n\t"
"popq (%%rax)" "\n\t"
#ifdef __APPLE__
"popq %%rbx" "\n\t"
#endif
: "=a" (dummy)
: "0" (myBp)
/*
* vmware can modify the whole VM state without the compiler knowing
* it. So far it does not modify EFLAGS. --hpreg
*/
:
#ifndef __APPLE__
/* %rbx is unchanged at the end of the function on Mac OS. */
"rbx",
#endif
"rcx", "rdx", "rsi", "rdi", "memory"
);
}
/*
*-----------------------------------------------------------------------------
*
* BackdoorHbIn --
* BackdoorHbOut --
*
* Send a high-bandwidth basic request to vmware, and return its
* reply.
*
* Results:
* Host-side response returned in bp IN/OUT parameter.
*
* Side-effects:
* Pokes the high-bandwidth backdoor port.
*
*-----------------------------------------------------------------------------
*/
void
BackdoorHbIn(Backdoor_proto_hb *myBp) // IN/OUT
{
uint64 dummy;
__asm__ __volatile__(
"pushq %%rbp" "\n\t"
#ifdef __APPLE__
/*
* Save %rbx on the stack because the Mac OS GCC doesn't want us to
* clobber it - it erroneously thinks %rbx is the PIC register.
* (Radar bug 7304232)
*/
"pushq %%rbx" "\n\t"
#endif
"pushq %%rax" "\n\t"
"movq 48(%%rax), %%rbp" "\n\t"
"movq 40(%%rax), %%rdi" "\n\t"
"movq 32(%%rax), %%rsi" "\n\t"
"movq 24(%%rax), %%rdx" "\n\t"
"movq 16(%%rax), %%rcx" "\n\t"
"movq 8(%%rax), %%rbx" "\n\t"
"movq (%%rax), %%rax" "\n\t"
"cld" "\n\t"
"rep; insb" "\n\t"
"xchgq %%rax, (%%rsp)" "\n\t"
"movq %%rbp, 48(%%rax)" "\n\t"
"movq %%rdi, 40(%%rax)" "\n\t"
"movq %%rsi, 32(%%rax)" "\n\t"
"movq %%rdx, 24(%%rax)" "\n\t"
"movq %%rcx, 16(%%rax)" "\n\t"
"movq %%rbx, 8(%%rax)" "\n\t"
"popq (%%rax)" "\n\t"
#ifdef __APPLE__
"popq %%rbx" "\n\t"
#endif
"popq %%rbp"
: "=a" (dummy)
: "0" (myBp)
/*
* vmware can modify the whole VM state without the compiler knowing
* it. --hpreg
*/
:
#ifndef __APPLE__
/* %rbx is unchanged at the end of the function on Mac OS. */
"rbx",
#endif
"rcx", "rdx", "rsi", "rdi", "memory", "cc"
);
}
void
BackdoorHbOut(Backdoor_proto_hb *myBp) // IN/OUT
{
uint64 dummy;
__asm__ __volatile__(
"pushq %%rbp" "\n\t"
#ifdef __APPLE__
/*
* Save %rbx on the stack because the Mac OS GCC doesn't want us to
* clobber it - it erroneously thinks %rbx is the PIC register.
* (Radar bug 7304232)
*/
"pushq %%rbx" "\n\t"
#endif
"pushq %%rax" "\n\t"
"movq 48(%%rax), %%rbp" "\n\t"
"movq 40(%%rax), %%rdi" "\n\t"
"movq 32(%%rax), %%rsi" "\n\t"
"movq 24(%%rax), %%rdx" "\n\t"
"movq 16(%%rax), %%rcx" "\n\t"
"movq 8(%%rax), %%rbx" "\n\t"
"movq (%%rax), %%rax" "\n\t"
"cld" "\n\t"
"rep; outsb" "\n\t"
"xchgq %%rax, (%%rsp)" "\n\t"
"movq %%rbp, 48(%%rax)" "\n\t"
"movq %%rdi, 40(%%rax)" "\n\t"
"movq %%rsi, 32(%%rax)" "\n\t"
"movq %%rdx, 24(%%rax)" "\n\t"
"movq %%rcx, 16(%%rax)" "\n\t"
"movq %%rbx, 8(%%rax)" "\n\t"
"popq (%%rax)" "\n\t"
#ifdef __APPLE__
"popq %%rbx" "\n\t"
#endif
"popq %%rbp"
: "=a" (dummy)
: "0" (myBp)
:
#ifndef __APPLE__
/* %rbx is unchanged at the end of the function on Mac OS. */
"rbx",
#endif
"rcx", "rdx", "rsi", "rdi", "memory", "cc"
);
}
#ifdef __cplusplus
}
#endif
vmmemctl-only/vmballoon.h 0000444 0000000 0000000 00000007075 12275350046 014534 0 ustar root root /*********************************************************
* Copyright (C) 2000-2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmballoon.h: Definitions and macros for vmballoon driver.
*/
#ifndef VMBALLOON_H
#define VMBALLOON_H
#include "balloonInt.h"
#include "vm_basic_types.h"
#include "dbllnklst.h"
#include "os.h"
/*
* Page allocation flags
*/
typedef enum BalloonPageAllocType {
BALLOON_PAGE_ALLOC_NOSLEEP = 0,
BALLOON_PAGE_ALLOC_CANSLEEP = 1,
BALLOON_PAGE_ALLOC_TYPES_NR, // total number of alloc types
} BalloonPageAllocType;
/*
* Types
*/
typedef struct {
/* current status */
uint32 nPages;
uint32 nPagesTarget;
/* adjustment rates */
uint32 rateNoSleepAlloc;
uint32 rateAlloc;
uint32 rateFree;
/* high-level operations */
uint32 timer;
/* primitives */
uint32 primAlloc[BALLOON_PAGE_ALLOC_TYPES_NR];
uint32 primAllocFail[BALLOON_PAGE_ALLOC_TYPES_NR];
uint32 primFree;
uint32 primErrorPageAlloc;
uint32 primErrorPageFree;
/* monitor operations */
uint32 lock;
uint32 lockFail;
uint32 unlock;
uint32 unlockFail;
uint32 target;
uint32 targetFail;
uint32 start;
uint32 startFail;
uint32 guestType;
uint32 guestTypeFail;
} BalloonStats;
#define BALLOON_ERROR_PAGES 16
typedef struct {
PageHandle page[BALLOON_ERROR_PAGES];
uint32 pageCount;
} BalloonErrorPages;
#define BALLOON_CHUNK_PAGES 1000
typedef struct BalloonChunk {
PageHandle page[BALLOON_CHUNK_PAGES];
uint32 pageCount;
DblLnkLst_Links node;
} BalloonChunk;
struct BalloonOps;
typedef struct {
/* sets of reserved physical pages */
DblLnkLst_Links chunks;
int nChunks;
/* transient list of non-balloonable pages */
BalloonErrorPages errors;
BalloonGuest guestType;
/* balloon size */
int nPages;
int nPagesTarget;
/* reset flag */
int resetFlag;
/* adjustment rates (pages per second) */
int rateAlloc;
int rateFree;
/* slowdown page allocations for next few cycles */
int slowPageAllocationCycles;
/* statistics */
BalloonStats stats;
/* hypervisor exposed capabilities */
BalloonCapabilities hypervisorCapabilities;
/* balloon operations, tied to the capabilities */
const struct BalloonOps *balloonOps;
/* Either the batch page handle, or the page to lock on v2 */
PageHandle pageHandle;
Mapping batchPageMapping;
BalloonBatchPage *batchPage;
uint16 batchMaxPages;
BalloonChunk *fallbackChunk;
} Balloon;
typedef struct BalloonOps {
void (*addPage)(Balloon *b, uint16 idx, PageHandle page);
int (*lock)(Balloon *b, uint16 nPages);
int (*unlock)(Balloon *b, uint16 nPages);
} BalloonOps;
/*
* Operations
*/
Bool Balloon_Init(BalloonGuest guestType);
void Balloon_Cleanup(void);
void Balloon_QueryAndExecute(void);
const BalloonStats *Balloon_GetStats(void);
#endif /* VMBALLOON_H */
vmmemctl-only/Makefile 0000644 0000000 0000000 00000010534 12275350061 014023 0 ustar root root #!/usr/bin/make -f
##########################################################
# Copyright (C) 1998 VMware, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation version 2 and no later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
##########################################################
####
#### VMware kernel module Makefile to be distributed externally
####
####
#### SRCROOT _must_ be a relative path.
####
SRCROOT = .
#
# open-vm-tools doesn't replicate shared source files for different modules;
# instead, files are kept in shared locations. So define a few useful macros
# to be able to handle both cases cleanly.
#
INCLUDE :=
ifdef OVT_SOURCE_DIR
AUTOCONF_DIR := $(OVT_SOURCE_DIR)/modules/linux/shared/autoconf
VMLIB_PATH = $(OVT_SOURCE_DIR)/lib/$(1)
INCLUDE += -I$(OVT_SOURCE_DIR)/modules/linux/shared
INCLUDE += -I$(OVT_SOURCE_DIR)/lib/include
else
AUTOCONF_DIR := $(SRCROOT)/shared/autoconf
INCLUDE += -I$(SRCROOT)/shared
endif
VM_UNAME = $(shell uname -r)
# Header directory for the running kernel
ifdef LINUXINCLUDE
HEADER_DIR = $(LINUXINCLUDE)
else
HEADER_DIR = /lib/modules/$(VM_UNAME)/build/include
endif
BUILD_DIR = $(HEADER_DIR)/..
DRIVER := vmmemctl
PRODUCT := tools
# Grep program
GREP = /bin/grep
vm_check_gcc = $(shell if $(CC) $(1) -S -o /dev/null -xc /dev/null \
> /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi)
vm_check_file = $(shell if test -f $(1); then echo "yes"; else echo "no"; fi)
ifndef VM_KBUILD
VM_KBUILD := no
ifeq ($(call vm_check_file,$(BUILD_DIR)/Makefile), yes)
ifneq ($(call vm_check_file,$(BUILD_DIR)/Rules.make), yes)
VM_KBUILD := 26
endif
endif
export VM_KBUILD
endif
ifndef VM_KBUILD_SHOWN
ifeq ($(VM_KBUILD), no)
VM_DUMMY := $(shell echo >&2 "Using standalone build system.")
else
ifeq ($(VM_KBUILD), 24)
VM_DUMMY := $(shell echo >&2 "Using 2.4.x kernel build system.")
else
VM_DUMMY := $(shell echo >&2 "Using 2.6.x kernel build system.")
endif
endif
VM_KBUILD_SHOWN := yes
export VM_KBUILD_SHOWN
endif
ifneq ($(VM_KBUILD), no)
VMCCVER := $(shell $(CC) -dumpversion)
# If there is no version defined, we are in toplevel pass, not yet in kernel makefiles...
ifeq ($(VERSION),)
ifeq ($(VM_KBUILD), 24)
DRIVER_KO := $(DRIVER).o
else
DRIVER_KO := $(DRIVER).ko
endif
.PHONY: $(DRIVER_KO)
auto-build: $(DRIVER_KO)
cp -f $< $(SRCROOT)/../$(DRIVER).o
# $(DRIVER_KO) is a phony target, so compare file times explicitly
$(DRIVER): $(DRIVER_KO)
if [ $< -nt $@ ] || [ ! -e $@ ] ; then cp -f $< $@; fi
# Pass gcc version down the chain, so we can detect if kernel attempts to use unapproved compiler
VM_CCVER := $(VMCCVER)
export VM_CCVER
VM_CC := $(CC)
export VM_CC
MAKEOVERRIDES := $(filter-out CC=%,$(MAKEOVERRIDES))
#
# Define a setup target that gets built before the actual driver.
# This target may not be used at all, but if it is then it will be defined
# in Makefile.kernel
#
prebuild:: ;
postbuild:: ;
$(DRIVER_KO): prebuild
$(MAKE) -C $(BUILD_DIR) SUBDIRS=$$PWD SRCROOT=$$PWD/$(SRCROOT) \
MODULEBUILDDIR=$(MODULEBUILDDIR) modules
$(MAKE) -C $$PWD SRCROOT=$$PWD/$(SRCROOT) \
MODULEBUILDDIR=$(MODULEBUILDDIR) postbuild
endif
vm_check_build = $(shell if $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) \
$(CPPFLAGS) $(CFLAGS) $(CFLAGS_KERNEL) $(LINUXINCLUDE) \
$(EXTRA_CFLAGS) -Iinclude2/asm/mach-default \
-DKBUILD_BASENAME=\"$(DRIVER)\" \
-Werror -S -o /dev/null -xc $(1) \
> /dev/null 2>&1; then echo "$(2)"; else echo "$(3)"; fi)
CC_WARNINGS := -Wall -Wstrict-prototypes
CC_OPTS := $(GLOBAL_DEFS) $(CC_WARNINGS) -DVMW_USING_KBUILD
ifdef VMX86_DEVEL
CC_OPTS += -DVMX86_DEVEL
endif
ifdef VMX86_DEBUG
CC_OPTS += -DVMX86_DEBUG
endif
include $(SRCROOT)/Makefile.kernel
ifdef TOPDIR
ifeq ($(VM_KBUILD), 24)
O_TARGET := $(DRIVER).o
obj-y := $($(DRIVER)-y)
include $(TOPDIR)/Rules.make
endif
endif
else
include $(SRCROOT)/Makefile.normal
endif
#.SILENT:
vmmemctl-only/COPYING 0000444 0000000 0000000 00000043103 12275350055 013415 0 ustar root root GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.
vmmemctl-only/kernelStubs.h 0000444 0000000 0000000 00000011340 12275350045 015031 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* kernelStubs.h
*
* KernelStubs implements some userspace library functions in terms
* of kernel functions to allow library userspace code to be used in a
* kernel.
*/
#ifndef __KERNELSTUBS_H__
#define __KERNELSTUBS_H__
#ifdef linux
# ifndef __KERNEL__
# error "__KERNEL__ is not defined"
# endif
# include "driver-config.h" // Must be included before any other header files
# include "vm_basic_types.h"
# include <linux/kernel.h>
# include <linux/string.h>
#elif defined(_WIN32)
# include "vm_basic_types.h"
# include <ntddk.h> /* kernel memory APIs */
# include <stdio.h> /* for _vsnprintf, vsprintf */
# include <stdarg.h> /* for va_start stuff */
# include <stdlib.h> /* for min macro. */
# include "vm_assert.h" /* Our assert macros */
#elif defined(__FreeBSD__)
# include "vm_basic_types.h"
# ifndef _KERNEL
# error "_KERNEL is not defined"
# endif
# include <sys/types.h>
# include <sys/malloc.h>
# include <sys/param.h>
# include <sys/kernel.h>
# include <machine/stdarg.h>
# include <sys/libkern.h>
#elif defined(__APPLE__)
# include "vm_basic_types.h"
# ifndef KERNEL
# error "KERNEL is not defined"
# endif
# include <stdarg.h>
# include <string.h>
# elif defined(sun)
# include "vm_basic_types.h"
# include <sys/types.h>
# include <sys/varargs.h>
#endif
/*
* Function Prototypes
*/
#if defined(linux) || defined(__APPLE__) || defined (sun)
# ifdef linux /* if (linux) { */
char *strdup(const char *source);
# endif
/* Shared between Linux and Apple kernel stubs. */
void *malloc(size_t size);
void free(void *mem);
void *calloc(size_t num, size_t len);
void *realloc(void *ptr, size_t newSize);
#elif defined(_WIN32) /* } else if (_WIN32) { */
#if (_WIN32_WINNT == 0x0400)
/* The following declarations are missing on NT4. */
typedef unsigned int UINT_PTR;
typedef unsigned int SIZE_T;
/* No free with tag availaible on NT4 kernel! */
#define KRNL_STUBS_FREE(P,T) ExFreePool((P))
#else /* _WIN32_WINNT */
#define KRNL_STUBS_FREE(P,T) ExFreePoolWithTag((P),(T))
/* Win 2K and later useful kernel function, documented but not declared! */
NTKERNELAPI VOID ExFreePoolWithTag(IN PVOID P, IN ULONG Tag);
#endif /* _WIN32_WINNT */
#elif defined(__FreeBSD__) /* } else if (FreeBSD) { */
/* Kernel memory on FreeBSD is tagged for statistics and sanity checking. */
MALLOC_DECLARE(M_VMWARE_TEMP);
/*
* On FreeBSD, the general memory allocator for both userland and the kernel is named
* malloc, but the kernel malloc() takes more arguments. The following alias & macros
* work around this, to provide the standard malloc() API for userspace code that is
* being used in the kernel.
*/
# undef malloc
static INLINE void *
__compat_malloc(unsigned long size, struct malloc_type *type, int flags) {
return malloc(size, type, flags);
}
# define malloc(size) __compat_malloc(size, M_VMWARE_TEMP, M_NOWAIT)
# define calloc(count, size) __compat_malloc((count) * (size), \
M_VMWARE_TEMP, M_NOWAIT|M_ZERO)
# define realloc(buf, size) realloc(buf, size, M_VMWARE_TEMP, M_NOWAIT)
# define free(buf) free(buf, M_VMWARE_TEMP)
# define strchr(s,c) index(s,c)
# define strrchr(s,c) rindex(s,c)
#endif /* } */
/*
* Stub functions we provide.
*/
void Panic(const char *fmt, ...);
char *Str_Strcpy(char *buf, const char *src, size_t maxSize);
int Str_Vsnprintf(char *str, size_t size, const char *format,
va_list arguments);
char *Str_Vasprintf(size_t *length, const char *format,
va_list arguments);
char *Str_Asprintf(size_t *length, const char *Format, ...);
/*
* Functions the driver must implement for the stubs.
*/
EXTERN void Debug(const char *fmt, ...);
#endif /* __KERNELSTUBS_H__ */
vmmemctl-only/os.c 0000644 0000000 0000000 00000041223 12275350046 013152 0 ustar root root /*********************************************************
* Copyright (C) 2000 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* os.c --
*
* Wrappers for Linux system functions required by "vmmemctl".
*/
/*
* Compile-Time Options
*/
#define OS_DISABLE_UNLOAD 0
#define OS_DEBUG 1
/*
* Includes
*/
#include "driver-config.h"
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#if defined(CONFIG_PROC_FS) || defined (CONFIG_DEBUG_FS)
#include <linux/stat.h>
#include <linux/seq_file.h>
#endif /* CONFIG_PROC_FS || CONFIG_DEBUG_FS*/
#include "compat_sched.h"
#include <asm/uaccess.h>
#include <asm/page.h>
#include "vmmemctl_version.h"
#include "os.h"
#include "vmballoon.h"
/*
* Constants
*/
/*
* Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
* allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
* __GFP_NOWARN, to suppress page allocation failure warnings.
*/
#define OS_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
/*
* GFP_ATOMIC allocations dig deep for free pages. Maybe it is
* okay because balloon driver uses OS_Malloc() to only allocate
* few bytes, and the allocation requires a new page only occasionally.
* Still if __GFP_NOMEMALLOC flag is available, then use it to inform
* the guest's page allocator not to use emergency pools.
*/
#ifdef __GFP_NOMEMALLOC
#define OS_KMALLOC_NOSLEEP (GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN)
#else
#define OS_KMALLOC_NOSLEEP (GFP_ATOMIC|__GFP_NOWARN)
#endif
/*
* Use GFP_HIGHUSER when executing in a separate kernel thread
* context and allocation can sleep. This is less stressful to
* the guest memory system, since it allows the thread to block
* while memory is reclaimed, and won't take pages from emergency
* low-memory pools.
*/
#define OS_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
/*
* Globals
*/
static struct task_struct *vmballoon_task;
/*
*-----------------------------------------------------------------------------
*
* OS_Malloc --
*
* Allocates kernel memory.
*
* Results:
* On success: Pointer to allocated memory
* On failure: NULL
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
void *
OS_Malloc(size_t size) // IN
{
return kmalloc(size, OS_KMALLOC_NOSLEEP);
}
/*
*-----------------------------------------------------------------------------
*
* OS_Free --
*
* Free allocated kernel memory.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
void
OS_Free(void *ptr, // IN
size_t size) // IN
{
kfree(ptr);
}
/*
*-----------------------------------------------------------------------------
*
* OS_MemZero --
*
* Fill a memory location with 0s.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
void
OS_MemZero(void *ptr, // OUT
size_t size) // IN
{
memset(ptr, 0, size);
}
/*
*-----------------------------------------------------------------------------
*
* OS_MemCopy --
*
* Copy a memory portion into another location.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
void
OS_MemCopy(void *dest, // OUT
const void *src, // IN
size_t size) // IN
{
memcpy(dest, src, size);
}
/*
*-----------------------------------------------------------------------------
*
* OS_ReservedPageGetLimit --
*
* Predict the maximum achievable balloon size.
*
* In 2.4.x and 2.6.x kernels, the balloon driver can guess the number of pages
* that can be ballooned. But, for now let us just pass the totalram-size as the
* maximum achievable balloon size. Note that normally (unless guest kernel is
* booted with a mem=XX parameter) the totalram-size is equal to alloc.max.
*
* Results:
* The maximum achievable balloon size in pages.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
unsigned long
OS_ReservedPageGetLimit(void)
{
struct sysinfo info;
/*
* si_meminfo() is cheap. Moreover, we want to provide dynamic
* max balloon size later. So let us call si_meminfo() every
* iteration.
*/
si_meminfo(&info);
/* info.totalram is in pages */
return info.totalram;
}
/*
*-----------------------------------------------------------------------------
*
* OS_ReservedPageGetPA --
*
* Convert a page handle (of a physical page previously reserved with
* OS_ReservedPageAlloc()) to a pa.
*
* Results:
* The pa.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
PA64
OS_ReservedPageGetPA(PageHandle handle) // IN: A valid page handle
{
struct page *page = (struct page *)handle;
return PPN_2_PA(page_to_pfn(page));
}
/*
*-----------------------------------------------------------------------------
*
* OS_ReservedPageGetHandle --
*
* Convert a pa (of a physical page previously reserved with
* OS_ReservedPageAlloc()) to a page handle.
*
* Results:
* The page handle.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
PageHandle
OS_ReservedPageGetHandle(PA64 pa) // IN
{
return (PageHandle)pfn_to_page(PA_2_PPN(pa));
}
/*
*-----------------------------------------------------------------------------
*
* OS_ReservedPageAlloc --
*
* Reserve a physical page for the exclusive use of this driver.
*
* Results:
* On success: A valid page handle that can be passed to OS_ReservedPageGetPA()
* or OS_ReservedPageFree().
* On failure: PAGE_HANDLE_INVALID
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
PageHandle
OS_ReservedPageAlloc(int canSleep) // IN
{
struct page *page;
page = alloc_page(canSleep ? OS_PAGE_ALLOC_CANSLEEP : OS_PAGE_ALLOC_NOSLEEP);
if (page == NULL) {
return PAGE_HANDLE_INVALID;
}
return (PageHandle)page;
}
/*
*-----------------------------------------------------------------------------
*
* OS_ReservedPageFree --
*
* Unreserve a physical page previously reserved with OS_ReservedPageAlloc().
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
void
OS_ReservedPageFree(PageHandle handle) // IN: A valid page handle
{
struct page *page = (struct page *)handle;
__free_page(page);
}
/*
*-----------------------------------------------------------------------------
*
* OS_Yield --
*
* Yield the CPU, if needed.
*
* Results:
* None
*
* Side effects:
* This thread might get descheduled, other threads might get scheduled.
*
*-----------------------------------------------------------------------------
*/
void
OS_Yield(void)
{
cond_resched();
}
/*
*-----------------------------------------------------------------------------
*
* OS_MapPageHandle --
*
* Map a page handle into kernel address space, and return the
* mapping to that page handle.
*
* Results:
* The mapping.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
Mapping
OS_MapPageHandle(PageHandle handle) // IN
{
struct page *page = (struct page *)handle;
return (Mapping)vmap(&page, 1, VM_MAP, PAGE_KERNEL);
}
/*
*-----------------------------------------------------------------------------
*
* OS_Mapping2Addr --
*
* Return the address of a previously mapped page handle (with
* OS_MapPageHandle).
*
* Results:
* The mapping address.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
void *
OS_Mapping2Addr(Mapping mapping) // IN
{
return (void *)mapping;
}
/*
*-----------------------------------------------------------------------------
*
* OS_UnmapPage --
*
* Unmap a previously mapped page handle.
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
void
OS_UnmapPage(Mapping mapping) // IN
{
vunmap((void *)mapping);
}
/*
*-----------------------------------------------------------------------------
*
* vmballoon_poll_loop --
*
* Periodically (BALLOON_POLL_PERIOD - 1 sec) calls into common balloon
* code (Balloon_QueryAndExecute) to fetch the new ballooning target and
* adjust balloon size accordingly.
*
* Results:
* Always 0.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static int
vmballoon_poll_loop(void *unused)
{
static wait_queue_head_t vmballoon_waitq;
init_waitqueue_head(&vmballoon_waitq);
/* Allow kernel freeze this thread during OS suspend/hibernate */
compat_set_freezable();
/* main loop */
while (1) {
/* sleep for specified period */
wait_event_interruptible_timeout(vmballoon_waitq,
compat_wait_check_freezing() ||
kthread_should_stop(),
BALLOON_POLL_PERIOD * HZ);
compat_try_to_freeze();
if (kthread_should_stop()) {
break;
}
/* execute registered handler */
Balloon_QueryAndExecute();
}
return 0;
}
#if defined(CONFIG_PROC_FS) || defined(CONFIG_DEBUG_FS)
static int
vmballoon_stats_show(struct seq_file *f, // IN
void *data) // IN: Unused
{
const BalloonStats *stats = Balloon_GetStats();
/* format size info */
seq_printf(f,
"target: %8d pages\n"
"current: %8d pages\n",
stats->nPagesTarget,
stats->nPages);
seq_printf(f,
"rateNoSleepAlloc: %8d pages/sec\n"
"rateSleepAlloc: %8d pages/sec\n"
"rateFree: %8d pages/sec\n",
stats->rateNoSleepAlloc,
stats->rateAlloc,
stats->rateFree);
seq_printf(f,
"\n"
"timer: %8u\n"
"start: %8u (%4u failed)\n"
"guestType: %8u (%4u failed)\n"
"lock: %8u (%4u failed)\n"
"unlock: %8u (%4u failed)\n"
"target: %8u (%4u failed)\n"
"primNoSleepAlloc: %8u (%4u failed)\n"
"primCanSleepAlloc: %8u (%4u failed)\n"
"primFree: %8u\n"
"errAlloc: %8u\n"
"errFree: %8u\n",
stats->timer,
stats->start, stats->startFail,
stats->guestType, stats->guestTypeFail,
stats->lock, stats->lockFail,
stats->unlock, stats->unlockFail,
stats->target, stats->targetFail,
stats->primAlloc[BALLOON_PAGE_ALLOC_NOSLEEP],
stats->primAllocFail[BALLOON_PAGE_ALLOC_NOSLEEP],
stats->primAlloc[BALLOON_PAGE_ALLOC_CANSLEEP],
stats->primAllocFail[BALLOON_PAGE_ALLOC_CANSLEEP],
stats->primFree,
stats->primErrorPageAlloc,
stats->primErrorPageFree);
return 0;
}
static int
vmballoon_stats_open(struct inode *inode, // IN: Unused
struct file *file) // IN
{
return single_open(file, vmballoon_stats_show, NULL);
}
#endif
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
static struct file_operations vmballoon_proc_fops = {
.open = vmballoon_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void
vmballoon_procfs_init(void)
{
struct proc_dir_entry *pde;
pde = create_proc_entry("vmmemctl", S_IFREG | S_IRUGO, NULL);
if (pde) {
pde->proc_fops = &vmballoon_proc_fops;
}
}
static void
vmballoon_procfs_exit(void)
{
remove_proc_entry("vmmemctl", NULL);
}
#else
static void
vmballoon_procfs_init(void)
{
}
static void
vmbaloon_procfs_exit(void)
{
}
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
/*
* Note that vmballoon_debug_fops can't be const because early versions of
* debugfs_create_file() used non-const fops argument.
*/
static struct file_operations vmballoon_debug_fops = {
.owner = THIS_MODULE,
.open = vmballoon_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *vmballoon_dbg_entry;
static void vmballoon_debugfs_init(void)
{
vmballoon_dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, NULL,
&vmballoon_debug_fops);
if (IS_ERR(vmballoon_dbg_entry)) {
int error = PTR_ERR(vmballoon_dbg_entry);
printk(KERN_ERR "vmmemctl: failed to create debugfs entry, error: %d\n", error);
vmballoon_dbg_entry = NULL;
}
}
static void vmballoon_debugfs_exit(void)
{
if (vmballoon_dbg_entry)
debugfs_remove(vmballoon_dbg_entry);
}
#else
static void
vmballoon_debugfs_init(void)
{
}
static void
vmballoon_debugfs_exit(void)
{
}
#endif /* CONFIG_DEBUG_FS */
/*
*-----------------------------------------------------------------------------
*
* vmballoon_init --
*
* Called at driver startup, initializes the balloon state and structures.
*
* Results:
* On success: 0
* On failure: standard error code
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static int
vmballoon_init(void)
{
/* initialize global state */
if (!Balloon_Init(BALLOON_GUEST_LINUX)) {
return -EIO;
}
/* create kernel thread */
vmballoon_task = kthread_run(vmballoon_poll_loop, NULL, BALLOON_NAME);
if (IS_ERR(vmballoon_task)) {
int error = PTR_ERR(vmballoon_task);
printk(KERN_WARNING BALLOON_NAME
": unable to create kernel thread, error: %d\n", error);
Balloon_Cleanup();
return error;
}
if (OS_DEBUG) {
printk(KERN_DEBUG BALLOON_NAME ": started kernel thread pid=%d\n",
vmballoon_task->pid);
}
vmballoon_procfs_init();
vmballoon_debugfs_init();
/* prevent module unload with extra reference */
if (OS_DISABLE_UNLOAD) {
try_module_get(THIS_MODULE);
}
/* log device load */
printk(KERN_INFO BALLOON_NAME_VERBOSE " initialized\n");
return 0;
}
module_init(vmballoon_init);
/*
*-----------------------------------------------------------------------------
*
* vmballoon_exit --
*
* Called when the driver is terminating, cleanup initialized structures.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static void
vmballoon_exit(void)
{
vmballoon_procfs_exit();
vmballoon_debugfs_exit();
kthread_stop(vmballoon_task);
Balloon_Cleanup();
/* log device unload */
printk(KERN_INFO BALLOON_NAME_VERBOSE " unloaded\n");
}
module_exit(vmballoon_exit);
/* Module information. */
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control Driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(VMMEMCTL_DRIVER_VERSION_STRING);
MODULE_ALIAS("vmware_vmmemctl");
/*
* Starting with SLE10sp2, Novell requires that IHVs sign a support agreement
* with them and mark their kernel modules as externally supported via a
* change to the module header. If this isn't done, the module will not load
* by default (i.e., neither mkinitrd nor modprobe will accept it).
*/
MODULE_INFO(supported, "external");
vmmemctl-only/shared/ 0000755 0000000 0000000 00000000000 12275351017 013630 5 ustar root root vmmemctl-only/shared/dbllnklst.h 0000444 0000000 0000000 00000014744 12275350060 015777 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* dbllnklst.h --
*
* Double linked lists
*/
#ifndef _DBLLNKLST_H_
#define _DBLLNKLST_H_
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_USERLEVEL
#include "includeCheck.h"
#include "vm_basic_types.h"
#define DblLnkLst_OffsetOf(type, field) ((intptr_t)&((type *)0)->field)
#define DblLnkLst_Container(addr, type, field) \
((type *)((char *)(addr) - DblLnkLst_OffsetOf(type, field)))
#define DblLnkLst_ForEach(curr, head) \
for (curr = (head)->next; curr != (head); curr = (curr)->next)
/* Safe from list element removal within loop body. */
#define DblLnkLst_ForEachSafe(curr, nextElem, head) \
for (curr = (head)->next, nextElem = (curr)->next; \
curr != (head); \
curr = nextElem, nextElem = (curr)->next)
typedef struct DblLnkLst_Links {
struct DblLnkLst_Links *prev;
struct DblLnkLst_Links *next;
} DblLnkLst_Links;
/*
* Functions
*
* DblLnkLst_LinkFirst, DblLnkLst_LinkLast, and DblLnkLst_Swap are specific
* to anchored lists. The rest are for both circular and anchored lists.
*/
/*
*----------------------------------------------------------------------
*
* DblLnkLst_Init --
*
* Initialize a member of a doubly linked list
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_Init(DblLnkLst_Links *l) // IN
{
l->prev = l->next = l;
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_Link --
*
* Merge two doubly linked lists into one
*
* The operation is commutative
* The operation is inversible (its inverse is DblLnkLst_Unlink)
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_Link(DblLnkLst_Links *l1, // IN
DblLnkLst_Links *l2) // IN
{
DblLnkLst_Links *tmp;
(tmp = l1->prev)->next = l2;
(l1->prev = l2->prev)->next = l1;
l2->prev = tmp ;
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_Unlink --
*
* Split one doubly linked list into two
*
* No check is performed: the caller must ensure that both members
* belong to the same doubly linked list
*
* The operation is commutative
* The operation is inversible (its inverse is DblLnkLst_Link)
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_Unlink(DblLnkLst_Links *l1, // IN
DblLnkLst_Links *l2) // IN
{
DblLnkLst_Links *tmp;
tmp = l1->prev ;
(l1->prev = l2->prev)->next = l1;
(l2->prev = tmp )->next = l2;
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_Unlink1 --
*
* Unlink an element from its list.
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_Unlink1(DblLnkLst_Links *l) // IN
{
DblLnkLst_Unlink(l, l->next);
}
/*
*----------------------------------------------------------------------------
*
* DblLnkLst_IsLinked --
*
* Determines whether an element is linked with any other elements.
*
* Results:
* TRUE if link is linked, FALSE otherwise.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------------
*/
static INLINE Bool
DblLnkLst_IsLinked(DblLnkLst_Links const *l) // IN
{
/*
* A DblLnkLst_Links is either linked to itself (not linked) or linked to
* other elements in a list (linked).
*/
return l->prev != l;
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_LinkFirst --
*
* Insert 'l' at the beginning of the list anchored at 'head'
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_LinkFirst(DblLnkLst_Links *head, // IN
DblLnkLst_Links *l) // IN
{
DblLnkLst_Link(head->next, l);
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_LinkLast --
*
* Insert 'l' at the end of the list anchored at 'head'
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_LinkLast(DblLnkLst_Links *head, // IN
DblLnkLst_Links *l) // IN
{
DblLnkLst_Link(head, l);
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_Swap --
*
* Swap all entries between the list anchored at 'head1' and the list
* anchored at 'head2'.
*
* The operation is commutative
* The operation is inversible (its inverse is itself)
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_Swap(DblLnkLst_Links *head1, // IN/OUT
DblLnkLst_Links *head2) // IN/OUT
{
DblLnkLst_Links const tmp = *head1;
if (DblLnkLst_IsLinked(head2)) {
(head1->prev = head2->prev)->next = head1;
(head1->next = head2->next)->prev = head1;
} else {
DblLnkLst_Init(head1);
}
if (tmp.prev != head1) {
(head2->prev = tmp.prev)->next = head2;
(head2->next = tmp.next)->prev = head2;
} else {
DblLnkLst_Init(head2);
}
}
#endif /* _DBLLNKLST_H_ */
vmmemctl-only/shared/compat_module.h 0000444 0000000 0000000 00000004372 12275350061 016633 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* compat_module.h --
*/
#ifndef __COMPAT_MODULE_H__
# define __COMPAT_MODULE_H__
#include <linux/module.h>
/*
* Modules wishing to use the GPL license are required to include a
* MODULE_LICENSE definition in their module source as of 2.4.10.
*/
#ifndef MODULE_LICENSE
#define MODULE_LICENSE(license)
#endif
/*
* To make use of our own home-brewed MODULE_INFO, we need macros to
* concatenate two expressions to "__mod_", and and to convert an
* expression into a string. I'm sure we've got these in our codebase,
* but I'd rather not introduce such a dependency in a compat header.
*/
#ifndef __module_cat
#define __module_cat_1(a, b) __mod_ ## a ## b
#define __module_cat(a, b) __module_cat_1(a, b)
#endif
#ifndef __stringify
#define __stringify_1(x) #x
#define __stringify(x) __stringify_1(x)
#endif
/*
* MODULE_INFO was born in 2.5.69.
*/
#ifndef MODULE_INFO
#define MODULE_INFO(tag, info) \
static const char __module_cat(tag, __LINE__)[] \
__attribute__((section(".modinfo"), unused)) = __stringify(tag) "=" info
#endif
/*
* MODULE_VERSION was born in 2.6.4. The earlier form appends a long "\0xxx"
* string to the module's version, but that was removed in 2.6.10, so we'll
* ignore it in our wrapper.
*/
#ifndef MODULE_VERSION
#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
#endif
#endif /* __COMPAT_MODULE_H__ */
vmmemctl-only/shared/vm_device_version.h 0000444 0000000 0000000 00000025174 12275350062 017515 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef VM_DEVICE_VERSION_H
#define VM_DEVICE_VERSION_H
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMKDRIVERS
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_VMCORE
#include "includeCheck.h"
#ifdef _WIN32
#ifdef __MINGW32__
#include "initguid.h"
#else
#include "guiddef.h"
#endif
#endif
/* LSILogic 53C1030 Parallel SCSI controller
* LSILogic SAS1068 SAS controller
*/
#define PCI_VENDOR_ID_LSILOGIC 0x1000
#define PCI_DEVICE_ID_LSI53C1030 0x0030
#define PCI_DEVICE_ID_LSISAS1068 0x0054
/* Our own PCI IDs
* VMware SVGA II (Unified VGA)
* VMware SVGA (PCI Accelerator)
* VMware vmxnet (Idealized NIC)
* VMware vmxscsi (Abortive idealized SCSI controller)
* VMware chipset (Subsystem ID for our motherboards)
* VMware e1000 (Subsystem ID)
* VMware vmxnet3 (Uniform Pass Through NIC)
* VMware HD Audio codec
* VMware HD Audio controller
*/
#define PCI_VENDOR_ID_VMWARE 0x15AD
#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
#define PCI_DEVICE_ID_VMWARE_SVGA 0x0710
#define PCI_DEVICE_ID_VMWARE_VGA 0x0711
#define PCI_DEVICE_ID_VMWARE_NET 0x0720
#define PCI_DEVICE_ID_VMWARE_SCSI 0x0730
#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740
#define PCI_DEVICE_ID_VMWARE_CHIPSET 0x1976
#define PCI_DEVICE_ID_VMWARE_82545EM 0x0750 /* single port */
#define PCI_DEVICE_ID_VMWARE_82546EB 0x0760 /* dual port */
#define PCI_DEVICE_ID_VMWARE_EHCI 0x0770
#define PCI_DEVICE_ID_VMWARE_UHCI 0x0774
#define PCI_DEVICE_ID_VMWARE_XHCI_0096 0x0778
#define PCI_DEVICE_ID_VMWARE_XHCI_0100 0x0779
#define PCI_DEVICE_ID_VMWARE_1394 0x0780
#define PCI_DEVICE_ID_VMWARE_BRIDGE 0x0790
#define PCI_DEVICE_ID_VMWARE_ROOTPORT 0x07A0
#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0
#define PCI_DEVICE_ID_VMWARE_VMXWIFI 0x07B8
#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0
#define PCI_DEVICE_ID_VMWARE_82574 0x07D0
#define PCI_DEVICE_ID_VMWARE_AHCI 0x07E0
#define PCI_DEVICE_ID_VMWARE_HDAUDIO_CODEC 0x1975
#define PCI_DEVICE_ID_VMWARE_HDAUDIO_CONTROLLER 0x1977
/* The hypervisor device might grow. Please leave room
* for 7 more subfunctions.
*/
#define PCI_DEVICE_ID_VMWARE_HYPER 0x0800
#define PCI_DEVICE_ID_VMWARE_VMI 0x0801
#define PCI_DEVICE_VMI_CLASS 0x05
#define PCI_DEVICE_VMI_SUBCLASS 0x80
#define PCI_DEVICE_VMI_INTERFACE 0x00
#define PCI_DEVICE_VMI_REVISION 0x01
#define PCI_DEVICE_ID_VMWARE_DUMMY 0x0809
/* From linux/pci_ids.h:
* AMD Lance Ethernet controller
* BusLogic SCSI controller
* Ensoniq ES1371 sound controller
*/
#define PCI_VENDOR_ID_AMD 0x1022
#define PCI_DEVICE_ID_AMD_VLANCE 0x2000
#define PCI_VENDOR_ID_BUSLOGIC 0x104B
#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140
#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040
#define PCI_VENDOR_ID_ENSONIQ 0x1274
#define PCI_DEVICE_ID_ENSONIQ_ES1371 0x1371
/* From linux/pci_ids.h:
* Intel 82439TX (430 HX North Bridge)
* Intel 82371AB (PIIX4 South Bridge)
* Intel 82443BX (440 BX North Bridge and AGP Bridge)
* Intel 82545EM (e1000, server adapter, single port)
* Intel 82546EB (e1000, server adapter, dual port)
* Intel HECI (as embedded in ich9m)
* Intel XHCI (as embedded in PANTHERPOINT)
*/
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_82439TX 0x7100
#define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110
#define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112
#define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113
#define PCI_DEVICE_ID_INTEL_82371AB 0x7111
#define PCI_DEVICE_ID_INTEL_82443BX 0x7190
#define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191
#define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192 /* Used when no AGP support */
#define PCI_DEVICE_ID_INTEL_82545EM 0x100f
#define PCI_DEVICE_ID_INTEL_82546EB 0x1010
#define PCI_DEVICE_ID_INTEL_82574 0x10d3
#define PCI_DEVICE_ID_INTEL_82574_APPLE 0x10f6
#define PCI_DEVICE_ID_INTEL_HECI 0x2a74
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31
#define E1000E_PCI_DEVICE_ID_CONFIG_STR "e1000e.pci.deviceID"
#define E1000E_PCI_SUB_VENDOR_ID_CONFIG_STR "e1000e.pci.subVendorID"
#define E1000E_PCI_SUB_DEVICE_ID_CONFIG_STR "e1000e.pci.subDeviceID"
/*
* Intel HD Audio controller and Realtek ALC885 codec.
*/
#define PCI_DEVICE_ID_INTEL_631XESB_632XESB 0x269a
#define PCI_VENDOR_ID_REALTEK 0x10ec
#define PCI_DEVICE_ID_REALTEK_ALC885 0x0885
/*
* Fresco Logic xHCI (USB 3.0) Controller
*/
#define PCI_VENDOR_ID_FRESCO 0x1B73
#define PCI_DEVICE_ID_FRESCO_FL1000 0x1000 // Original 1-port chip
#define PCI_DEVICE_ID_FRESCO_FL1009 0x1009 // New 2-port chip (Driver 3.0.98+)
#define PCI_DEVICE_ID_FRESCO_FL1400 0x1400 // Unknown (4-port? Dev hardware?)
/*
* NEC/Renesas xHCI (USB 3.0) Controller
*/
#define PCI_VENDOR_ID_NEC 0x1033
#define PCI_DEVICE_ID_NEC_UPD720200 0x0194
#define PCI_REVISION_NEC_UPD720200 0x03
#define PCI_FIRMWARE_NEC_UPD720200 0x3015
#define SATA_ID_SERIAL_STR "00000000000000000001" /* Must be 20 Bytes */
#define SATA_ID_FIRMWARE_STR "00000001" /* Must be 8 Bytes */
#define AHCI_ATA_MODEL_STR PRODUCT_GENERIC_NAME " Virtual SATA Hard Drive"
#define AHCI_ATAPI_MODEL_STR PRODUCT_GENERIC_NAME " Virtual SATA CDRW Drive"
/************* Strings for IDE Identity Fields **************************/
#define VIDE_ID_SERIAL_STR "00000000000000000001" /* Must be 20 Bytes */
#define VIDE_ID_FIRMWARE_STR "00000001" /* Must be 8 Bytes */
/* No longer than 40 Bytes */
#define VIDE_ATA_MODEL_STR PRODUCT_GENERIC_NAME " Virtual IDE Hard Drive"
#define VIDE_ATAPI_MODEL_STR PRODUCT_GENERIC_NAME " Virtual IDE CDROM Drive"
#define ATAPI_VENDOR_ID "NECVMWar" /* Must be 8 Bytes */
#define ATAPI_PRODUCT_ID PRODUCT_GENERIC_NAME " IDE CDROM" /* Must be 16 Bytes */
#define ATAPI_REV_LEVEL "1.00" /* Must be 4 Bytes */
#define IDE_NUM_INTERFACES 2 /* support for two interfaces */
#define IDE_DRIVES_PER_IF 2
/************* Strings for SCSI Identity Fields **************************/
#define SCSI_DISK_MODEL_STR PRODUCT_GENERIC_NAME " Virtual SCSI Hard Drive"
#define SCSI_DISK_VENDOR_NAME COMPANY_NAME
#define SCSI_DISK_REV_LEVEL "1.0"
#define SCSI_CDROM_MODEL_STR PRODUCT_GENERIC_NAME " Virtual SCSI CDROM Drive"
#define SCSI_CDROM_VENDOR_NAME COMPANY_NAME
#define SCSI_CDROM_REV_LEVEL "1.0"
/************* SCSI implementation limits ********************************/
#define SCSI_MAX_CONTROLLERS 4 // Need more than 1 for MSCS clustering
#define SCSI_MAX_DEVICES 16 // BT-958 emulates only 16
#define PVSCSI_MAX_DEVICES 255 // 255 (including the controller)
/************* SATA implementation limits ********************************/
#define SATA_MAX_CONTROLLERS 4
#define SATA_MAX_DEVICES 30
#define AHCI_MIN_PORTS 1
#define AHCI_MAX_PORTS SATA_MAX_DEVICES
/*
* VSCSI_BV_INTS is the number of uint32's needed for a bit vector
* to cover all scsi devices per target.
*/
#define VSCSI_BV_INTS CEILING(PVSCSI_MAX_DEVICES, 8 * sizeof (uint32))
#define SCSI_IDE_CHANNEL SCSI_MAX_CONTROLLERS
#define SCSI_IDE_HOSTED_CHANNEL (SCSI_MAX_CONTROLLERS + 1)
#define SCSI_SATA_CHANNEL_FIRST (SCSI_IDE_HOSTED_CHANNEL + 1)
#define SCSI_MAX_CHANNELS (SCSI_SATA_CHANNEL_FIRST + SATA_MAX_CONTROLLERS)
/************* SCSI-SATA channel IDs********************************/
#define SATA_ID_TO_SCSI_ID(sataId) \
(SCSI_SATA_CHANNEL_FIRST + (sataId))
#define SCSI_ID_TO_SATA_ID(scsiId) \
((scsiId) - SCSI_SATA_CHANNEL_FIRST)
/************* Strings for the VESA BIOS Identity Fields *****************/
#define VBE_OEM_STRING COMPANY_NAME " SVGA"
#define VBE_VENDOR_NAME COMPANY_NAME
#define VBE_PRODUCT_NAME PRODUCT_GENERIC_NAME
/************* PCI implementation limits ********************************/
#define PCI_MAX_BRIDGES 15
/************* Ethernet implementation limits ***************************/
#define MAX_ETHERNET_CARDS 10
/********************** Floppy limits ***********************************/
#define MAX_FLOPPY_DRIVES 2
/************* PCI Passthrough implementation limits ********************/
#define MAX_PCI_PASSTHRU_DEVICES 16
/************* Test device implementation limits ********************/
#define MAX_PCI_TEST_DEVICES 16
/************* USB implementation limits ********************************/
#define MAX_USB_DEVICES_PER_HOST_CONTROLLER 127
/************* Strings for Host USB Driver *******************************/
#ifdef _WIN32
/*
* Globally unique ID for the VMware device interface. Define INITGUID before including
* this header file to instantiate the variable.
*/
DEFINE_GUID(GUID_DEVICE_INTERFACE_VMWARE_USB_DEVICES,
0x2da1fe75, 0xaab3, 0x4d2c, 0xac, 0xdf, 0x39, 0x8, 0x8c, 0xad, 0xa6, 0x65);
/*
* Globally unique ID for the VMware device setup class.
*/
DEFINE_GUID(GUID_CLASS_VMWARE_USB_DEVICES,
0x3b3e62a5, 0x3556, 0x4d7e, 0xad, 0xad, 0xf5, 0xfa, 0x3a, 0x71, 0x2b, 0x56);
/*
* This string defines the device ID string of a VMware USB device.
* The format is USB\Vid_XXXX&Pid_YYYY, where XXXX and YYYY are the
* hexadecimal representations of the vendor and product ids, respectively.
*
* The official vendor ID for VMware, Inc. is 0x0E0F.
* The product id for USB generic devices is 0x0001.
*/
#define USB_VMWARE_DEVICE_ID_WIDE L"USB\\Vid_0E0F&Pid_0001"
#define USB_DEVICE_ID_LENGTH (sizeof(USB_VMWARE_DEVICE_ID_WIDE) / sizeof(WCHAR))
#ifdef UNICODE
#define USB_PNP_SETUP_CLASS_NAME L"VMwareUSBDevices"
#define USB_PNP_DRIVER_NAME L"vmusb"
#else
#define USB_PNP_SETUP_CLASS_NAME "VMwareUSBDevices"
#define USB_PNP_DRIVER_NAME "vmusb"
#endif
#endif
#endif /* VM_DEVICE_VERSION_H */
vmmemctl-only/shared/vm_basic_asm.h 0000444 0000000 0000000 00000102214 12275350062 016421 0 ustar root root /*********************************************************
* Copyright (C) 2003-2011 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vm_basic_asm.h
*
* Basic asm macros
*
* ARM not implemented.
*/
#ifndef _VM_BASIC_ASM_H_
#define _VM_BASIC_ASM_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMIROM
#include "includeCheck.h"
#include "vm_basic_types.h"
#if defined VM_X86_64
#include "vm_basic_asm_x86_64.h"
#elif defined __i386__
#include "vm_basic_asm_x86.h"
#endif
/*
* x86-64 windows doesn't support inline asm so we have to use these
* intrinsic functions defined in the compiler. Not all of these are well
* documented. There is an array in the compiler dll (c1.dll) which has
* an array of the names of all the intrinsics minus the leading
* underscore. Searching around in the ntddk.h file can also be helpful.
*
* The declarations for the intrinsic functions were taken from the DDK.
* Our declarations must match the ddk's otherwise the 64-bit c++ compiler
* will complain about second linkage of the intrinsic functions.
* We define the intrinsic using the basic types corresponding to the
* Windows typedefs. This avoids having to include windows header files
* to get to the windows types.
*/
#ifdef _MSC_VER
#ifdef __cplusplus
extern "C" {
#endif
/*
* It seems x86 & x86-64 windows still implements these intrinsic
* functions. The documentation for the x86-64 suggest the
* __inbyte/__outbyte intrinsics even though the _in/_out work fine and
* __inbyte/__outbyte aren't supported on x86.
*/
int _inp(unsigned short);
unsigned short _inpw(unsigned short);
unsigned long _inpd(unsigned short);
int _outp(unsigned short, int);
unsigned short _outpw(unsigned short, unsigned short);
unsigned long _outpd(uint16, unsigned long);
#pragma intrinsic(_inp, _inpw, _inpd, _outp, _outpw, _outpw, _outpd)
/*
* Prevents compiler from re-ordering reads, writes and reads&writes.
* These functions do not add any instructions thus only affect
* the compiler ordering.
*
* See:
* `Lockless Programming Considerations for Xbox 360 and Microsoft Windows'
* http://msdn.microsoft.com/en-us/library/bb310595(VS.85).aspx
*/
void _ReadBarrier(void);
void _WriteBarrier(void);
void _ReadWriteBarrier(void);
#pragma intrinsic(_ReadBarrier, _WriteBarrier, _ReadWriteBarrier)
void _mm_mfence(void);
void _mm_lfence(void);
#pragma intrinsic(_mm_mfence, _mm_lfence)
unsigned int __getcallerseflags(void);
#pragma intrinsic(__getcallerseflags)
#ifdef VM_X86_64
/*
* intrinsic functions only supported by x86-64 windows as of 2k3sp1
*/
unsigned __int64 __rdtsc(void);
void __stosw(unsigned short *, unsigned short, size_t);
void __stosd(unsigned long *, unsigned long, size_t);
void _mm_pause(void);
#pragma intrinsic(__rdtsc, __stosw, __stosd, _mm_pause)
unsigned char _BitScanForward64(unsigned long *, unsigned __int64);
unsigned char _BitScanReverse64(unsigned long *, unsigned __int64);
#pragma intrinsic(_BitScanForward64, _BitScanReverse64)
#endif /* VM_X86_64 */
unsigned char _BitScanForward(unsigned long *, unsigned long);
unsigned char _BitScanReverse(unsigned long *, unsigned long);
#pragma intrinsic(_BitScanForward, _BitScanReverse)
unsigned char _bittest(const long *, long);
unsigned char _bittestandset(long *, long);
unsigned char _bittestandreset(long *, long);
unsigned char _bittestandcomplement(long *, long);
#pragma intrinsic(_bittest, _bittestandset, _bittestandreset, _bittestandcomplement)
#ifdef VM_X86_64
unsigned char _bittestandset64(__int64 *, __int64);
unsigned char _bittestandreset64(__int64 *, __int64);
#pragma intrinsic(_bittestandset64, _bittestandreset64)
#endif /* VM_X86_64 */
#ifdef __cplusplus
}
#endif
#endif /* _MSC_VER */
#ifdef __GNUC__ // {
#if defined(__i386__) || defined(__x86_64__) // Only on x86*
/*
* Checked against the Intel manual and GCC --hpreg
*
* volatile because reading from port can modify the state of the underlying
* hardware.
*
* Note: The undocumented %z construct doesn't work (internal compiler error)
* with gcc-2.95.1
*/
#define __GCC_IN(s, type, name) \
static INLINE type \
name(uint16 port) \
{ \
type val; \
\
__asm__ __volatile__( \
"in" #s " %w1, %0" \
: "=a" (val) \
: "Nd" (port) \
); \
\
return val; \
}
__GCC_IN(b, uint8, INB)
__GCC_IN(w, uint16, INW)
__GCC_IN(l, uint32, IN32)
/*
* Checked against the Intel manual and GCC --hpreg
*
* Note: The undocumented %z construct doesn't work (internal compiler error)
* with gcc-2.95.1
*/
#define __GCC_OUT(s, s2, port, val) do { \
__asm__( \
"out" #s " %" #s2 "1, %w0" \
: \
: "Nd" (port), "a" (val) \
); \
} while (0)
#define OUTB(port, val) __GCC_OUT(b, b, port, val)
#define OUTW(port, val) __GCC_OUT(w, w, port, val)
#define OUT32(port, val) __GCC_OUT(l, , port, val)
#define GET_CURRENT_EIP(_eip) \
__asm__ __volatile("call 0\n\tpopl %0" : "=r" (_eip): );
static INLINE unsigned int
GetCallerEFlags(void)
{
unsigned long flags;
asm volatile("pushf; pop %0" : "=r"(flags));
return flags;
}
#endif // x86*
#elif defined(_MSC_VER) // } {
static INLINE uint8
INB(uint16 port)
{
return (uint8)_inp(port);
}
static INLINE void
OUTB(uint16 port, uint8 value)
{
_outp(port, value);
}
static INLINE uint16
INW(uint16 port)
{
return _inpw(port);
}
static INLINE void
OUTW(uint16 port, uint16 value)
{
_outpw(port, value);
}
static INLINE uint32
IN32(uint16 port)
{
return _inpd(port);
}
static INLINE void
OUT32(uint16 port, uint32 value)
{
_outpd(port, value);
}
#ifndef VM_X86_64
#ifdef NEAR
#undef NEAR
#endif
#define GET_CURRENT_EIP(_eip) do { \
__asm call NEAR PTR $+5 \
__asm pop eax \
__asm mov _eip, eax \
} while (0)
#endif // VM_X86_64
static INLINE unsigned int
GetCallerEFlags(void)
{
return __getcallerseflags();
}
#else // } {
#error
#endif // }
/* Sequence recommended by Intel for the Pentium 4. */
#define INTEL_MICROCODE_VERSION() ( \
__SET_MSR(MSR_BIOS_SIGN_ID, 0), \
__GET_EAX_FROM_CPUID(1), \
__GET_MSR(MSR_BIOS_SIGN_ID))
/*
* Locate most and least significant bit set functions. Use our own name
* space to avoid namespace collisions. The new names follow a pattern,
* <prefix><size><option>, where:
*
* <prefix> is [lm]ssb (least/most significant bit set)
* <size> is size of the argument: 32 (32-bit), 64 (64-bit) or Ptr (pointer)
* <option> is for alternative versions of the functions
*
* NAME FUNCTION BITS FUNC(0)
*----- -------- ---- -------
* lssb32_0 LSB set (uint32) 0..31 -1
* mssb32_0 MSB set (uint32) 0..31 -1
* lssb64_0 LSB set (uint64) 0..63 -1
* mssb64_0 MSB set (uint64) 0..63 -1
* lssbPtr_0 LSB set (uintptr_t;32-bit) 0..31 -1
* lssbPtr_0 LSB set (uintptr_t;64-bit) 0..63 -1
* mssbPtr_0 MSB set (uintptr_t;32-bit) 0..31 -1
* mssbPtr_0 MSB set (uintptr_t;64-bit) 0..63 -1
* lssbPtr LSB set (uintptr_t;32-bit) 1..32 0
* lssbPtr LSB set (uintptr_t;64-bit) 1..64 0
* mssbPtr MSB set (uintptr_t;32-bit) 1..32 0
* mssbPtr MSB set (uintptr_t;64-bit) 1..64 0
* lssb32 LSB set (uint32) 1..32 0
* mssb32 MSB set (uint32) 1..32 0
* lssb64 LSB set (uint64) 1..64 0
* mssb64 MSB set (uint64) 1..64 0
*/
#if defined(_MSC_VER)
static INLINE int
lssb32_0(const uint32 value)
{
unsigned long idx;
if (UNLIKELY(value == 0)) {
return -1;
}
_BitScanForward(&idx, (unsigned long) value);
return idx;
}
static INLINE int
mssb32_0(const uint32 value)
{
unsigned long idx;
if (UNLIKELY(value == 0)) {
return -1;
}
_BitScanReverse(&idx, (unsigned long) value);
return idx;
}
static INLINE int
lssb64_0(const uint64 value)
{
if (UNLIKELY(value == 0)) {
return -1;
} else {
#if defined(VM_X86_64)
unsigned long idx;
_BitScanForward64(&idx, (unsigned __int64) value);
return idx;
#else
/* The coding was chosen to minimize conditionals and operations */
int lowFirstBit = lssb32_0((uint32) value);
if (lowFirstBit == -1) {
lowFirstBit = lssb32_0((uint32) (value >> 32));
if (lowFirstBit != -1) {
return lowFirstBit + 32;
}
}
return lowFirstBit;
#endif
}
}
static INLINE int
mssb64_0(const uint64 value)
{
if (UNLIKELY(value == 0)) {
return -1;
} else {
#if defined(VM_X86_64)
unsigned long idx;
_BitScanReverse64(&idx, (unsigned __int64) value);
return idx;
#else
/* The coding was chosen to minimize conditionals and operations */
if (value > 0xFFFFFFFFULL) {
return 32 + mssb32_0((uint32) (value >> 32));
}
return mssb32_0((uint32) value);
#endif
}
}
#endif
#if defined(__GNUC__)
#if defined(__i386__) || defined(__x86_64__) // Only on x86*
#define USE_ARCH_X86_CUSTOM
#endif
/* **********************************************************
* GCC's intrinsics for the lssb and mssb family produce sub-optimal code,
* so we use inline assembly to improve matters. However, GCC cannot
* propagate constants through inline assembly, so we help GCC out by
* allowing it to use its intrinsics for compile-time constant values.
* Some day, GCC will make better code and these can collapse to intrinsics.
*
* For example, in Decoder_AddressSize, inlined into VVT_GetVTInstrInfo:
* __builtin_ffs(a) compiles to:
* mov $0xffffffff, %esi
* bsf %eax, %eax
* cmovz %esi, %eax
* sub $0x1, %eax
* and $0x7, %eax
*
* While the code below compiles to:
* bsf %eax, %eax
* sub $0x1, %eax
*
* Ideally, GCC should have recognized non-zero input in the first case.
* Other instances of the intrinsic produce code like
* sub $1, %eax; add $1, %eax; clts
* **********************************************************
*/
#if __GNUC__ < 4
#define FEWER_BUILTINS
#endif
static INLINE int
lssb32_0(uint32 value)
{
#if defined(USE_ARCH_X86_CUSTOM)
if (!__builtin_constant_p(value)) {
if (UNLIKELY(value == 0)) {
return -1;
} else {
int pos;
__asm__ ("bsfl %1, %0\n" : "=r" (pos) : "rm" (value) : "cc");
return pos;
}
}
#endif
return __builtin_ffs(value) - 1;
}
#ifndef FEWER_BUILTINS
static INLINE int
mssb32_0(uint32 value)
{
/*
* We must keep the UNLIKELY(...) outside the #if defined ...
* because __builtin_clz(0) is undefined according to gcc's
* documentation.
*/
if (UNLIKELY(value == 0)) {
return -1;
} else {
int pos;
#if defined(USE_ARCH_X86_CUSTOM)
if (!__builtin_constant_p(value)) {
__asm__ ("bsrl %1, %0\n" : "=r" (pos) : "rm" (value) : "cc");
return pos;
}
#endif
pos = 32 - __builtin_clz(value) - 1;
return pos;
}
}
static INLINE int
lssb64_0(const uint64 value)
{
#if defined(USE_ARCH_X86_CUSTOM)
if (!__builtin_constant_p(value)) {
if (UNLIKELY(value == 0)) {
return -1;
} else {
intptr_t pos;
#if defined(VM_X86_64)
__asm__ ("bsf %1, %0\n" : "=r" (pos) : "rm" (value) : "cc");
#else
/* The coding was chosen to minimize conditionals and operations */
pos = lssb32_0((uint32) value);
if (pos == -1) {
pos = lssb32_0((uint32) (value >> 32));
if (pos != -1) {
return pos + 32;
}
}
#endif
return pos;
}
}
#endif
return __builtin_ffsll(value) - 1;
}
#endif /* !FEWER_BUILTINS */
#if defined(FEWER_BUILTINS)
/* GCC 3.3.x does not like __bulitin_clz or __builtin_ffsll. */
static INLINE int
mssb32_0(uint32 value)
{
if (UNLIKELY(value == 0)) {
return -1;
} else {
int pos;
__asm__ __volatile__("bsrl %1, %0\n" : "=r" (pos) : "rm" (value) : "cc");
return pos;
}
}
static INLINE int
lssb64_0(const uint64 value)
{
if (UNLIKELY(value == 0)) {
return -1;
} else {
intptr_t pos;
#if defined(VM_X86_64)
__asm__ __volatile__("bsf %1, %0\n" : "=r" (pos) : "rm" (value) : "cc");
#else
/* The coding was chosen to minimize conditionals and operations */
pos = lssb32_0((uint32) value);
if (pos == -1) {
pos = lssb32_0((uint32) (value >> 32));
if (pos != -1) {
return pos + 32;
}
}
#endif /* VM_X86_64 */
return pos;
}
}
#endif /* FEWER_BUILTINS */
static INLINE int
mssb64_0(const uint64 value)
{
if (UNLIKELY(value == 0)) {
return -1;
} else {
intptr_t pos;
#if defined(USE_ARCH_X86_CUSTOM)
#if defined(VM_X86_64)
__asm__ ("bsr %1, %0\n" : "=r" (pos) : "rm" (value) : "cc");
#else
/* The coding was chosen to minimize conditionals and operations */
if (value > 0xFFFFFFFFULL) {
pos = 32 + mssb32_0((uint32) (value >> 32));
} else {
pos = mssb32_0((uint32) value);
}
#endif
#else
pos = 64 - __builtin_clzll(value) - 1;
#endif
return pos;
}
}
#if defined(USE_ARCH_X86_CUSTOM)
#undef USE_ARCH_X86_CUSTOM
#endif
#endif
static INLINE int
lssbPtr_0(const uintptr_t value)
{
#if defined(VM_X86_64)
return lssb64_0((uint64) value);
#else
return lssb32_0((uint32) value);
#endif
}
static INLINE int
lssbPtr(const uintptr_t value)
{
return lssbPtr_0(value) + 1;
}
static INLINE int
mssbPtr_0(const uintptr_t value)
{
#if defined(VM_X86_64)
return mssb64_0((uint64) value);
#else
return mssb32_0((uint32) value);
#endif
}
static INLINE int
mssbPtr(const uintptr_t value)
{
return mssbPtr_0(value) + 1;
}
static INLINE int
lssb32(const uint32 value)
{
return lssb32_0(value) + 1;
}
static INLINE int
mssb32(const uint32 value)
{
return mssb32_0(value) + 1;
}
static INLINE int
lssb64(const uint64 value)
{
return lssb64_0(value) + 1;
}
static INLINE int
mssb64(const uint64 value)
{
return mssb64_0(value) + 1;
}
#ifdef __GNUC__
#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
static INLINE void *
uint16set(void *dst, uint16 val, size_t count)
{
#ifdef __arm__
if (count <= 0)
return dst;
__asm__ __volatile__ ("\t"
"1: strh %0, [%1] \n\t"
" subs %2, %2, #1 \n\t"
" bne 1b "
:: "r" (val), "r" (dst), "r" (count)
: "memory"
);
return dst;
#else
size_t dummy0;
void *dummy1;
__asm__ __volatile__("\t"
"cld" "\n\t"
"rep ; stosw" "\n"
: "=c" (dummy0), "=D" (dummy1)
: "0" (count), "1" (dst), "a" (val)
: "memory", "cc"
);
return dst;
#endif
}
static INLINE void *
uint32set(void *dst, uint32 val, size_t count)
{
#ifdef __arm__
if (count <= 0)
return dst;
__asm__ __volatile__ ("\t"
"1: str %0, [%1] \n\t"
" subs %2, %2, #1 \n\t"
" bne 1b "
:: "r" (val), "r" (dst), "r" (count)
: "memory"
);
return dst;
#else
size_t dummy0;
void *dummy1;
__asm__ __volatile__("\t"
"cld" "\n\t"
"rep ; stosl" "\n"
: "=c" (dummy0), "=D" (dummy1)
: "0" (count), "1" (dst), "a" (val)
: "memory", "cc"
);
return dst;
#endif
}
#else /* unknown system: rely on C to write */
static INLINE void *
uint16set(void *dst, uint16 val, size_t count)
{
size_t i;
for (i = 0; i < count; i++) {
((uint16 *) dst)[i] = val;
}
return dst;
}
static INLINE void *
uint32set(void *dst, uint32 val, size_t count)
{
size_t i;
for (i = 0; i < count; i++) {
((uint32 *) dst)[i] = val;
}
return dst;
}
#endif // defined(__i386__) || defined(__x86_64__) || defined(__arm__)
#elif defined(_MSC_VER)
static INLINE void *
uint16set(void *dst, uint16 val, size_t count)
{
#ifdef VM_X86_64
__stosw((uint16*)dst, val, count);
#else
__asm { pushf;
mov ax, val;
mov ecx, count;
mov edi, dst;
cld;
rep stosw;
popf;
}
#endif
return dst;
}
static INLINE void *
uint32set(void *dst, uint32 val, size_t count)
{
#ifdef VM_X86_64
__stosd((unsigned long*)dst, (unsigned long)val, count);
#else
__asm { pushf;
mov eax, val;
mov ecx, count;
mov edi, dst;
cld;
rep stosd;
popf;
}
#endif
return dst;
}
#else
#error "No compiler defined for uint*set"
#endif
/*
*-----------------------------------------------------------------------------
*
* Bswap16 --
*
* Swap the 2 bytes of "v" as follows: 32 -> 23.
*
*-----------------------------------------------------------------------------
*/
static INLINE uint16
Bswap16(uint16 v)
{
return ((v >> 8) & 0x00ff) | ((v << 8) & 0xff00);
}
/*
*-----------------------------------------------------------------------------
*
* Bswap32 --
*
* Swap the 4 bytes of "v" as follows: 3210 -> 0123.
*
*-----------------------------------------------------------------------------
*/
static INLINE uint32
Bswap32(uint32 v) // IN
{
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) || \
(defined(__arm__) && !defined(__ANDROID__)) // {
#ifdef __arm__
__asm__("rev %0, %0" : "+r"(v));
return v;
#else // __arm__
/* Checked against the Intel manual and GCC. --hpreg */
__asm__(
"bswap %0"
: "=r" (v)
: "0" (v)
);
return v;
#endif // !__arm__
#else // } {
return (v >> 24)
| ((v >> 8) & 0xFF00)
| ((v & 0xFF00) << 8)
| (v << 24) ;
#endif // }
}
#define Bswap Bswap32
/*
*-----------------------------------------------------------------------------
*
* Bswap64 --
*
* Swap the 8 bytes of "v" as follows: 76543210 -> 01234567.
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
Bswap64(uint64 v) // IN
{
return ((uint64)Bswap((uint32)v) << 32) | Bswap((uint32)(v >> 32));
}
#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
/*
* COMPILER_MEM_BARRIER prevents the compiler from re-ordering memory
* references accross the barrier. NOTE: It does not generate any
* instruction, so the CPU is free to do whatever it wants to...
*/
#ifdef __GNUC__ // {
#define COMPILER_MEM_BARRIER() __asm__ __volatile__ ("": : :"memory")
#define COMPILER_READ_BARRIER() COMPILER_MEM_BARRIER()
#define COMPILER_WRITE_BARRIER() COMPILER_MEM_BARRIER()
#elif defined(_MSC_VER)
#define COMPILER_MEM_BARRIER() _ReadWriteBarrier()
#define COMPILER_READ_BARRIER() _ReadBarrier()
#define COMPILER_WRITE_BARRIER() _WriteBarrier()
#endif // }
/*
*----------------------------------------------------------------------
*
* COMPILER_FORCED_LOAD_AND_MEM_BARRIER --
*
* This macro prevents the compiler from re-ordering memory references
* across the barrier. In addition it emits a forced load from the given
* memory reference. The memory reference has to be either 1, 2, 4 or 8
* bytes wide.
* The forced load of a memory reference can be used exploit details of a
* given CPUs memory model. For example x86 CPUs won't reorder stores to
* a memory location x with loads from a memory location x.
* NOTE: It does not generate any fencing instruction, so the CPU is free
* to reorder instructions according to its memory model.
*
* Results:
* None
*
* Side Effects:
* None.
*
*----------------------------------------------------------------------
*/
#ifdef VM_X86_64
#ifdef __GNUC__
#define COMPILER_FORCED_LOAD_AND_MEM_BARRIER(_memory_reference) \
do { \
typeof(_memory_reference) _dummy; \
\
asm volatile("mov %1, %0\n\t" \
: "=r" (_dummy) /* Let compiler choose reg for _dummy */ \
: "m" (_memory_reference) \
: "memory"); \
} while(0)
#endif /* __GNUC__ */
#endif /* VM_X86_64 */
/*
* PAUSE is a P4 instruction that improves spinlock power+performance;
* on non-P4 IA32 systems, the encoding is interpreted as a REPZ-NOP.
* Use volatile to avoid NOP removal.
*/
static INLINE void
PAUSE(void)
#ifdef __GNUC__
{
#ifdef __arm__
/*
* ARM has no instruction to execute "spin-wait loop", just leave it
* empty.
*/
#else
__asm__ __volatile__( "pause" :);
#endif
}
#elif defined(_MSC_VER)
#ifdef VM_X86_64
{
_mm_pause();
}
#else /* VM_X86_64 */
#pragma warning( disable : 4035)
{
__asm _emit 0xf3 __asm _emit 0x90
}
#pragma warning (default: 4035)
#endif /* VM_X86_64 */
#else /* __GNUC__ */
#error No compiler defined for PAUSE
#endif
/*
* Checked against the Intel manual and GCC --hpreg
*
* volatile because the tsc always changes without the compiler knowing it.
*/
static INLINE uint64
RDTSC(void)
#ifdef __GNUC__
{
#ifdef VM_X86_64
uint64 tscLow;
uint64 tscHigh;
__asm__ __volatile__(
"rdtsc"
: "=a" (tscLow), "=d" (tscHigh)
);
return tscHigh << 32 | tscLow;
#elif defined(__i386__)
uint64 tim;
__asm__ __volatile__(
"rdtsc"
: "=A" (tim)
);
return tim;
#else
/*
* For platform without cheap timer, just return 0.
*/
return 0;
#endif
}
#elif defined(_MSC_VER)
#ifdef VM_X86_64
{
return __rdtsc();
}
#else
#pragma warning( disable : 4035)
{
__asm _emit 0x0f __asm _emit 0x31
}
#pragma warning (default: 4035)
#endif /* VM_X86_64 */
#else /* __GNUC__ */
#error No compiler defined for RDTSC
#endif /* __GNUC__ */
#if defined(__i386__) || defined(__x86_64__)
/*
*-----------------------------------------------------------------------------
*
* RDTSC_BARRIER --
*
* Implements an RDTSC fence. Instructions executed prior to the
* fence will have completed before the fence and all stores to
* memory are flushed from the store buffer.
*
* On AMD, MFENCE is sufficient. On Intel, only LFENCE is
* documented to fence RDTSC, but LFENCE won't drain the store
* buffer. So, use MFENCE;LFENCE, which will work on both AMD and
* Intel.
*
* It is the callers' responsibility to check for SSE2 before
* calling this function.
*
* Results:
* None.
*
* Side effects:
* Cause loads and stores prior to this to be globally visible, and
* RDTSC will not pass.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
RDTSC_BARRIER(void)
{
#ifdef __GNUC__
__asm__ __volatile__(
"mfence \n\t"
"lfence \n\t"
::: "memory"
);
#elif defined _MSC_VER
/* Prevent compiler from moving code across mfence/lfence. */
_ReadWriteBarrier();
_mm_mfence();
_mm_lfence();
_ReadWriteBarrier();
#else
#error No compiler defined for RDTSC_BARRIER
#endif
}
#endif // __i386 || __x86_64__
/*
*-----------------------------------------------------------------------------
*
* DEBUGBREAK --
*
* Does an int3 for MSVC / GCC. This is a macro to make sure int3 is
* always inlined.
*
*-----------------------------------------------------------------------------
*/
#ifdef __arm__
#define DEBUGBREAK() __asm__("bkpt")
#else
#ifdef _MSC_VER
#define DEBUGBREAK() __debugbreak()
#else
#define DEBUGBREAK() __asm__("int $3")
#endif
#endif // __arm__
#endif // defined(__i386__) || defined(__x86_64__) || defined(__arm__)
/*
*-----------------------------------------------------------------------------
*
* {Clear,Set,Test}Bit{32,64} --
*
* Sets or clears a specified single bit in the provided variable.
* The index input value specifies which bit to modify and is 0-based.
* Index is truncated by hardware to a 5-bit or 6-bit offset for the
* 32 and 64-bit flavors, respectively, but input values are not validated
* with asserts to avoid include dependencies.
* 64-bit flavors are not provided for 32-bit builds because the inlined
* version can defeat user or compiler optimizations.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
SetBit32(uint32 *var, uint32 index)
{
#ifdef __GNUC__
__asm__ (
"bts %1, %0"
: "+mr" (*var)
: "rI" (index)
: "cc"
);
#elif defined(_MSC_VER)
_bittestandset((long *)var, index);
#endif
}
static INLINE void
ClearBit32(uint32 *var, uint32 index)
{
#ifdef __GNUC__
__asm__ (
"btr %1, %0"
: "+mr" (*var)
: "rI" (index)
: "cc"
);
#elif defined(_MSC_VER)
_bittestandreset((long *)var, index);
#endif
}
#if defined(VM_X86_64)
static INLINE void
SetBit64(uint64 *var, uint64 index)
{
#ifdef __GNUC__
__asm__ (
"bts %1, %0"
: "+mr" (*var)
: "rJ" (index)
: "cc"
);
#elif defined _MSC_VER
_bittestandset64((__int64 *)var, index);
#endif
}
static INLINE void
ClearBit64(uint64 *var, uint64 index)
{
#ifdef __GNUC__
__asm__ (
"btrq %1, %0"
: "+mr" (*var)
: "rJ" (index)
: "cc"
);
#elif defined _MSC_VER
_bittestandreset64((__int64 *)var, index);
#endif
}
#endif /* VM_X86_64 */
static INLINE Bool
TestBit32(const uint32 *var, uint32 index)
{
#ifdef __GNUC__
Bool bit;
__asm__ (
"bt %[index], %[var] \n"
"setc %[bit]"
: [bit] "=qQm" (bit)
: [index] "rI" (index), [var] "r" (*var)
: "cc"
);
return bit;
#else
return (*var & (1 << index)) != 0;
#endif
}
static INLINE Bool
TestBit64(const uint64 *var, uint64 index)
{
#if defined __GNUC__ && defined VM_X86_64
Bool bit;
__asm__ (
"bt %[index], %[var] \n"
"setc %[bit]"
: [bit] "=qQm" (bit)
: [index] "rJ" (index), [var] "r" (*var)
: "cc"
);
return bit;
#else
return (*var & (CONST64U(1) << index)) != 0;
#endif
}
/*
*-----------------------------------------------------------------------------
*
* {Clear,Set,Complement,Test}BitVector --
*
* Sets, clears, complements, or tests a specified single bit in the
* provided array. The index input value specifies which bit to modify
* and is 0-based. Bit number can be +-2Gb (+-128MB) relative from 'var'
* variable.
*
* All functions return value of the bit before modification was performed.
*
*-----------------------------------------------------------------------------
*/
static INLINE Bool
SetBitVector(void *var, int32 index)
{
#ifdef __GNUC__
Bool bit;
__asm__ (
"bts %2, %1;"
"setc %0"
: "=qQm" (bit), "+m" (*(volatile uint32 *)var)
: "rI" (index)
: "memory", "cc"
);
return bit;
#elif defined(_MSC_VER)
return _bittestandset((long *)var, index) != 0;
#else
#error No compiler defined for SetBitVector
#endif
}
static INLINE Bool
ClearBitVector(void *var, int32 index)
{
#ifdef __GNUC__
Bool bit;
__asm__ (
"btr %2, %1;"
"setc %0"
: "=qQm" (bit), "+m" (*(volatile uint32 *)var)
: "rI" (index)
: "cc"
);
return bit;
#elif defined(_MSC_VER)
return _bittestandreset((long *)var, index) != 0;
#else
#error No compiler defined for ClearBitVector
#endif
}
static INLINE Bool
ComplementBitVector(void *var, int32 index)
{
#ifdef __GNUC__
Bool bit;
__asm__ (
"btc %2, %1;"
"setc %0"
: "=qQm" (bit), "+m" (*(volatile uint32 *)var)
: "rI" (index)
: "cc"
);
return bit;
#elif defined(_MSC_VER)
return _bittestandcomplement((long *)var, index) != 0;
#else
#error No compiler defined for ComplementBitVector
#endif
}
static INLINE Bool
TestBitVector(const void *var, int32 index)
{
#ifdef __GNUC__
Bool bit;
__asm__ (
"bt %2, %1;"
"setc %0"
: "=qQm" (bit)
: "m" (*(const uint32 *)var), "rI" (index)
: "cc"
);
return bit;
#elif defined _MSC_VER
return _bittest((long *)var, index) != 0;
#else
#error No compiler defined for TestBitVector
#endif
}
/*
*-----------------------------------------------------------------------------
* RoundUpPow2_{64,32} --
*
* Rounds a value up to the next higher power of 2. Returns the original
* value if it is a power of 2. The next power of 2 for inputs {0, 1} is 1.
* The result is undefined for inputs above {2^63, 2^31} (but equal to 1
* in this implementation).
*-----------------------------------------------------------------------------
*/
static INLINE uint64
RoundUpPow2C64(uint64 value)
{
if (value <= 1 || value > (CONST64U(1) << 63)) {
return 1; // Match the assembly's undefined value for large inputs.
} else {
return (CONST64U(2) << mssb64_0(value - 1));
}
}
#if defined(VM_X86_64) && defined(__GNUC__)
static INLINE uint64
RoundUpPow2Asm64(uint64 value)
{
uint64 out = 2;
__asm__("lea -1(%[in]), %%rcx;" // rcx = value - 1. Preserve original.
"bsr %%rcx, %%rcx;" // rcx = log2(value - 1) if value != 1
// if value == 0, then rcx = 63
// if value == 1 then zf = 1, else zf = 0.
"rol %%cl, %[out];" // out = 2 << rcx (if rcx != -1)
// = 2^(log2(value - 1) + 1)
// if rcx == -1 (value == 0), out = 1
// zf is always unmodified.
"cmovz %[in], %[out]" // if value == 1 (zf == 1), write 1 to out.
: [out]"+r"(out) : [in]"r"(value) : "%rcx", "cc");
return out;
}
#endif
static INLINE uint64
RoundUpPow2_64(uint64 value)
{
#if defined(VM_X86_64) && defined(__GNUC__)
if (__builtin_constant_p(value)) {
return RoundUpPow2C64(value);
} else {
return RoundUpPow2Asm64(value);
}
#else
return RoundUpPow2C64(value);
#endif
}
static INLINE uint32
RoundUpPow2C32(uint32 value)
{
if (value <= 1 || value > (1U << 31)) {
return 1; // Match the assembly's undefined value for large inputs.
} else {
return (2 << mssb32_0(value - 1));
}
}
#ifdef __GNUC__
static INLINE uint32
RoundUpPow2Asm32(uint32 value)
{
#ifdef __arm__
uint32 out = 1;
// Note: None Thumb only!
// The value of the argument "value"
// will be affected!
__asm__("sub %[in], %[in], #1;" // r1 = value - 1 . if value == 0 then r1 = 0xFFFFFFFF
"clz %[in], %[in];" // r1 = log2(value - 1) if value != 1
// if value == 0 then r1 = 0
// if value == 1 then r1 = 32
"mov %[out], %[out], ror %[in]" // out = 2^(32 - r1)
// if out == 2^32 then out = 1 as it is right rotate
: [in]"+r"(value),[out]"+r"(out));
#else
uint32 out = 2;
__asm__("lea -1(%[in]), %%ecx;" // ecx = value - 1. Preserve original.
"bsr %%ecx, %%ecx;" // ecx = log2(value - 1) if value != 1
// if value == 0, then ecx = 31
// if value == 1 then zf = 1, else zf = 0.
"rol %%cl, %[out];" // out = 2 << ecx (if ecx != -1)
// = 2^(log2(value - 1) + 1).
// if ecx == -1 (value == 0), out = 1
// zf is always unmodified
"cmovz %[in], %[out]" // if value == 1 (zf == 1), write 1 to out.
: [out]"+r"(out) : [in]"r"(value) : "%ecx", "cc");
#endif
return out;
}
#endif // __GNUC__
static INLINE uint32
RoundUpPow2_32(uint32 value)
{
#ifdef __GNUC__
if (__builtin_constant_p(value)) {
return RoundUpPow2C32(value);
} else {
return RoundUpPow2Asm32(value);
}
#else
return RoundUpPow2C32(value);
#endif
}
#endif // _VM_BASIC_ASM_H_
vmmemctl-only/shared/compat_uaccess.h 0000444 0000000 0000000 00000006062 12275350061 016772 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_UACCESS_H__
# define __COMPAT_UACCESS_H__
/* User space access functions moved in 2.1.7 to asm/uaccess.h --hpreg */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 7)
# include <asm/uaccess.h>
#else
# include <asm/segment.h>
#endif
/* get_user() API modified in 2.1.4 to take 2 arguments --hpreg */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 4)
# define compat_get_user get_user
#else
/*
* We assign 0 to the variable in case of failure to prevent "`_var' might be
* used uninitialized in this function" compiler warnings. I think it is OK,
* because the hardware-based version in newer kernels probably has the same
* semantics and does not guarantee that the value of _var will not be
* modified, should the access fail --hpreg
*/
# define compat_get_user(_var, _uvAddr) ({ \
int _status; \
\
_status = verify_area(VERIFY_READ, _uvAddr, sizeof(*(_uvAddr))); \
if (_status == 0) { \
(_var) = get_user(_uvAddr); \
} else { \
(_var) = 0; \
} \
_status; \
})
#endif
/*
* The copy_from_user() API appeared in 2.1.4
*
* The emulation is not perfect here, but it is conservative: on failure, we
* always return the total size, instead of the potentially smaller faulty
* size --hpreg
*
* Since 2.5.55 copy_from_user() is no longer macro.
*/
#if !defined(copy_from_user) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0)
# define copy_from_user(_to, _from, _size) ( \
verify_area(VERIFY_READ, _from, _size) \
? (_size) \
: (memcpy_fromfs(_to, _from, _size), 0) \
)
# define copy_to_user(_to, _from, _size) ( \
verify_area(VERIFY_WRITE, _to, _size) \
? (_size) \
: (memcpy_tofs(_to, _from, _size), 0) \
)
#endif
#endif /* __COMPAT_UACCESS_H__ */
vmmemctl-only/shared/vmware.h 0000444 0000000 0000000 00000003502 12275350062 015277 0 ustar root root /*********************************************************
* Copyright (C) 2003 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmware.h --
*
* Standard include file for VMware source code.
*/
#ifndef _VMWARE_H_
#define _VMWARE_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#include "includeCheck.h"
#include "vm_basic_types.h"
#include "vm_basic_defs.h"
#include "vm_assert.h"
/*
* Global error codes. Currently used internally, but may be exported
* to customers one day, like VM_E_XXX in vmcontrol_constants.h
*/
typedef enum VMwareStatus {
VMWARE_STATUS_SUCCESS, /* success */
VMWARE_STATUS_ERROR, /* generic error */
VMWARE_STATUS_NOMEM, /* generic memory allocation error */
VMWARE_STATUS_INSUFFICIENT_RESOURCES, /* internal or system resource limit exceeded */
VMWARE_STATUS_INVALID_ARGS /* invalid arguments */
} VMwareStatus;
#define VMWARE_SUCCESS(s) ((s) == VMWARE_STATUS_SUCCESS)
#endif // ifndef _VMWARE_H_
vmmemctl-only/shared/driverLog.c 0000444 0000000 0000000 00000011112 12275350061 015721 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* driverLog.c --
*
* Common logging functions for Linux kernel modules.
*/
#include "driver-config.h"
#include "compat_kernel.h"
#include "compat_sched.h"
#include <asm/current.h>
#include "driverLog.h"
#define LINUXLOG_BUFFER_SIZE 1024
static const char *driverLogPrefix = "";
/*
* vsnprintf was born in 2.4.10. Fall back on vsprintf if we're
* an older kernel.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10)
# define vsnprintf(str, size, fmt, args) vsprintf(str, fmt, args)
#endif
/*
*----------------------------------------------------------------------------
*
* DriverLog_Init --
*
* Initializes the Linux logging.
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------------
*/
void
DriverLog_Init(const char *prefix) // IN
{
driverLogPrefix = prefix ? prefix : "";
}
/*
*----------------------------------------------------------------------
*
* DriverLogPrint --
*
* Log error message from a Linux module.
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static void
DriverLogPrint(const char *level, // IN: KERN_* constant
const char *fmt, // IN: error format string
va_list args) // IN: arguments for format string
{
static char staticBuf[LINUXLOG_BUFFER_SIZE];
char stackBuf[128];
va_list args2;
const char *buf;
/*
* By default, use a small buffer on the stack (thread safe). If it is too
* small, fall back to a larger static buffer (not thread safe).
*/
va_copy(args2, args);
if (vsnprintf(stackBuf, sizeof stackBuf, fmt, args2) < sizeof stackBuf) {
buf = stackBuf;
} else {
vsnprintf(staticBuf, sizeof staticBuf, fmt, args);
buf = staticBuf;
}
va_end(args2);
printk("%s%s[%d]: %s", level, driverLogPrefix, current->pid, buf);
}
/*
*----------------------------------------------------------------------
*
* Warning --
*
* Warning messages from kernel module: logged into kernel log
* as warnings.
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
void
Warning(const char *fmt, ...) // IN: warning format string
{
va_list args;
va_start(args, fmt);
DriverLogPrint(KERN_WARNING, fmt, args);
va_end(args);
}
/*
*----------------------------------------------------------------------
*
* Log --
*
* Log messages from kernel module: logged into kernel log
* as debug information.
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
void
Log(const char *fmt, ...) // IN: log format string
{
va_list args;
/*
* Use the kernel log with at least a KERN_DEBUG level
* so it doesn't garbage the screen at (re)boot time on RedHat 6.0.
*/
va_start(args, fmt);
DriverLogPrint(KERN_DEBUG, fmt, args);
va_end(args);
}
/*
*----------------------------------------------------------------------
*
* Panic --
*
* ASSERTION failures and Panics from kernel module get here.
* Message is logged to the kernel log and on console.
*
* Results:
* None.
*
* Side effects:
* Never returns
*
*----------------------------------------------------------------------
*/
void
Panic(const char *fmt, ...) // IN: panic format string
{
va_list args;
va_start(args, fmt);
DriverLogPrint(KERN_EMERG, fmt, args);
va_end(args);
#ifdef BUG
BUG();
#else
/* Should die with %cs unwritable, or at least with page fault. */
asm volatile("movb $0, %cs:(0)");
#endif
while (1);
}
vmmemctl-only/shared/vmware_pack_init.h 0000444 0000000 0000000 00000003644 12275350062 017327 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __VMWARE_PACK_INIT_H__
# define __VMWARE_PACK_INIT_H__
/*
* vmware_pack_init.h --
*
* Platform-independent code to make the compiler pack (i.e. have them
* occupy the smallest possible space) structure definitions. The following
* constructs are known to work --hpreg
*
* #include "vmware_pack_begin.h"
* struct foo {
* ...
* }
* #include "vmware_pack_end.h"
* ;
*
* typedef
* #include "vmware_pack_begin.h"
* struct foo {
* ...
* }
* #include "vmware_pack_end.h"
* foo;
*/
#ifdef _MSC_VER
/*
* MSVC 6.0 emits warning 4103 when the pack push and pop pragma pairing is
* not balanced within 1 included file. That is annoying because our scheme
* is based on the pairing being balanced between 2 included files.
*
* So we disable this warning, but this is safe because the compiler will also
* emit warning 4161 when there is more pops than pushes within 1 main
* file --hpreg
*/
# pragma warning(disable:4103)
#elif __GNUC__
#else
# error Compiler packing...
#endif
#endif /* __VMWARE_PACK_INIT_H__ */
vmmemctl-only/shared/vmciKernelAPI2.h 0000444 0000000 0000000 00000004104 12275350062 016510 0 ustar root root /*********************************************************
* Copyright (C) 2010 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmciKernelAPI2.h --
*
* Kernel API (v2) exported from the VMCI host and guest drivers.
*/
#ifndef __VMCI_KERNELAPI_2_H__
#define __VMCI_KERNELAPI_2_H__
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#include "includeCheck.h"
#include "vmciKernelAPI1.h"
/* Define version 2. */
#undef VMCI_KERNEL_API_VERSION
#define VMCI_KERNEL_API_VERSION_2 2
#define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2
/* VMCI Doorbell API. */
#define VMCI_FLAG_DELAYED_CB 0x01
typedef void (*VMCICallback)(void *clientData);
int vmci_doorbell_create(VMCIHandle *handle, uint32 flags,
VMCIPrivilegeFlags privFlags, VMCICallback notifyCB,
void *clientData);
int vmci_doorbell_destroy(VMCIHandle handle);
int vmci_doorbell_notify(VMCIHandle handle, VMCIPrivilegeFlags privFlags);
/* Typedefs for all of the above, used by the IOCTLs and the kernel library. */
typedef int (VMCIDoorbell_CreateFct)(VMCIHandle *, uint32, VMCIPrivilegeFlags,
VMCICallback, void *);
typedef int (VMCIDoorbell_DestroyFct)(VMCIHandle);
typedef int (VMCIDoorbell_NotifyFct)(VMCIHandle, VMCIPrivilegeFlags);
#endif /* !__VMCI_KERNELAPI_2_H__ */
vmmemctl-only/shared/compat_highmem.h 0000444 0000000 0000000 00000002423 12275350061 016757 0 ustar root root /*********************************************************
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_HIGHMEM_H__
# define __COMPAT_HIGHMEM_H__
#include <linux/highmem.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
# define compat_kmap_atomic(_page) kmap_atomic(_page)
# define compat_kunmap_atomic(_page) kunmap_atomic(_page)
#else
# define compat_kmap_atomic(_page) kmap_atomic((_page), KM_USER0)
# define compat_kunmap_atomic(_page) kunmap_atomic((_page), KM_USER0)
#endif
#endif /* __COMPAT_HIGHMEM_H__ */
vmmemctl-only/shared/vm_basic_asm_x86.h 0000444 0000000 0000000 00000044321 12275350062 017132 0 ustar root root /*********************************************************
* Copyright (C) 1998-2003 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vm_basic_asm_x86.h
*
* Basic IA32 asm macros
*/
#ifndef _VM_BASIC_ASM_X86_H_
#define _VM_BASIC_ASM_X86_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMIROM
#include "includeCheck.h"
#ifdef VM_X86_64
/*
* The gcc inline asm uses the "A" constraint which differs in 32 & 64
* bit mode. 32 bit means eax and edx, 64 means rax or rdx.
*/
#error "x86-64 not supported"
#endif
/*
* XTEST
* Return TRUE if processor is in transaction region.
*
*/
#if defined(__GNUC__) && (defined(VMM) || defined(VMKERNEL) || defined(FROBOS))
static INLINE Bool
xtest(void)
{
uint8 al;
__asm__ __volatile__(".byte 0x0f, 0x01, 0xd6 # xtest \n"
"setnz %%al\n"
: "=a"(al) : : "cc");
return al;
}
#endif /* __GNUC__ */
/*
* FXSAVE/FXRSTOR
* save/restore SIMD/MMX fpu state
*
* The pointer passed in must be 16-byte aligned.
*
* Intel and AMD processors behave differently w.r.t. fxsave/fxrstor. Intel
* processors unconditionally save the exception pointer state (instruction
* ptr., data ptr., and error instruction opcode). FXSAVE_ES1 and FXRSTOR_ES1
* work correctly for Intel processors.
*
* AMD processors only save the exception pointer state if ES=1. This leads to a
* security hole whereby one process/VM can inspect the state of another process
* VM. The AMD recommended workaround involves clobbering the exception pointer
* state unconditionally, and this is implemented in FXRSTOR_AMD_ES0. Note that
* FXSAVE_ES1 will only save the exception pointer state for AMD processors if
* ES=1.
*
* The workaround (FXRSTOR_AMD_ES0) only costs 1 cycle more than just doing an
* fxrstor, on both AMD Opteron and Intel Core CPUs.
*/
#if defined(__GNUC__)
static INLINE void
FXSAVE_ES1(void *save)
{
__asm__ __volatile__ ("fxsave %0\n" : "=m" (*(uint8 *)save) : : "memory");
}
static INLINE void
FXRSTOR_ES1(const void *load)
{
__asm__ __volatile__ ("fxrstor %0\n"
: : "m" (*(const uint8 *)load) : "memory");
}
static INLINE void
FXRSTOR_AMD_ES0(const void *load)
{
uint64 dummy = 0;
__asm__ __volatile__
("fnstsw %%ax \n" // Grab x87 ES bit
"bt $7,%%ax \n" // Test ES bit
"jnc 1f \n" // Jump if ES=0
"fnclex \n" // ES=1. Clear it so fild doesn't trap
"1: \n"
"ffree %%st(7) \n" // Clear tag bit - avoid poss. stack overflow
"fildl %0 \n" // Dummy Load from "safe address" changes all
// x87 exception pointers.
"fxrstor %1 \n"
:
: "m" (dummy), "m" (*(const uint8 *)load)
: "ax", "memory");
}
#endif /* __GNUC__ */
/*
* XSAVE/XRSTOR
* save/restore GSSE/SIMD/MMX fpu state
*
* The pointer passed in must be 64-byte aligned.
* See above comment for more information.
*/
#if defined(__GNUC__) && (defined(VMM) || defined(VMKERNEL) || defined(FROBOS))
static INLINE void
XSAVE_ES1(void *save, uint64 mask)
{
#if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1
__asm__ __volatile__ (
".byte 0x0f, 0xae, 0x21 \n"
:
: "c" ((uint8 *)save), "a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#else
__asm__ __volatile__ (
"xsave %0 \n"
: "=m" (*(uint8 *)save)
: "a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#endif
}
static INLINE void
XSAVEOPT_ES1(void *save, uint64 mask)
{
__asm__ __volatile__ (
".byte 0x0f, 0xae, 0x31 \n"
:
: "c" ((uint8 *)save), "a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
}
static INLINE void
XRSTOR_ES1(const void *load, uint64 mask)
{
#if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1
__asm__ __volatile__ (
".byte 0x0f, 0xae, 0x29 \n"
:
: "c" ((const uint8 *)load),
"a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#else
__asm__ __volatile__ (
"xrstor %0 \n"
:
: "m" (*(const uint8 *)load),
"a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#endif
}
static INLINE void
XRSTOR_AMD_ES0(const void *load, uint64 mask)
{
uint64 dummy = 0;
__asm__ __volatile__
("fnstsw %%ax \n" // Grab x87 ES bit
"bt $7,%%ax \n" // Test ES bit
"jnc 1f \n" // Jump if ES=0
"fnclex \n" // ES=1. Clear it so fild doesn't trap
"1: \n"
"ffree %%st(7) \n" // Clear tag bit - avoid poss. stack overflow
"fildl %0 \n" // Dummy Load from "safe address" changes all
// x87 exception pointers.
"mov %%ebx, %%eax \n"
#if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1
".byte 0x0f, 0xae, 0x29 \n"
:
: "m" (dummy), "c" ((const uint8 *)load),
"b" ((uint32)mask), "d" ((uint32)(mask >> 32))
#else
"xrstor %1 \n"
:
: "m" (dummy), "m" (*(const uint8 *)load),
"b" ((uint32)mask), "d" ((uint32)(mask >> 32))
#endif
: "eax", "memory");
}
#endif /* __GNUC__ */
/*
*-----------------------------------------------------------------------------
*
* Div643232 --
*
* Unsigned integer division:
* The dividend is 64-bit wide
* The divisor is 32-bit wide
* The quotient is 32-bit wide
*
* Use this function if you are certain that:
* o Either the quotient will fit in 32 bits,
* o Or your code is ready to handle a #DE exception indicating overflow.
* If that is not the case, then use Div643264(). --hpreg
*
* Results:
* Quotient and remainder
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
#if defined(__GNUC__)
static INLINE void
Div643232(uint64 dividend, // IN
uint32 divisor, // IN
uint32 *quotient, // OUT
uint32 *remainder) // OUT
{
/* Checked against the Intel manual and GCC --hpreg */
__asm__(
"divl %4"
: "=a" (*quotient),
"=d" (*remainder)
: "0" ((uint32)dividend),
"1" ((uint32)(dividend >> 32)),
"rm" (divisor)
: "cc"
);
}
#elif defined _MSC_VER
static INLINE void
Div643232(uint64 dividend, // IN
uint32 divisor, // IN
uint32 *quotient, // OUT
uint32 *remainder) // OUT
{
/* Written and tested by mann, checked by dbudko and hpreg */
__asm {
mov eax, DWORD PTR [dividend]
mov edx, DWORD PTR [dividend+4]
div DWORD PTR [divisor]
mov edi, DWORD PTR [quotient]
mov [edi], eax
mov edi, DWORD PTR [remainder]
mov [edi], edx
}
}
#else
#error No compiler defined for Div643232
#endif
#if defined(__GNUC__)
/*
*-----------------------------------------------------------------------------
*
* Div643264 --
*
* Unsigned integer division:
* The dividend is 64-bit wide
* The divisor is 32-bit wide
* The quotient is 64-bit wide --hpreg
*
* Results:
* Quotient and remainder
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Div643264(uint64 dividend, // IN
uint32 divisor, // IN
uint64 *quotient, // OUT
uint32 *remainder) // OUT
{
uint32 hQuotient;
uint32 lQuotient;
/* Checked against the Intel manual and GCC --hpreg */
__asm__(
"divl %5" "\n\t"
"movl %%eax, %0" "\n\t"
"movl %4, %%eax" "\n\t"
"divl %5"
: "=&rm" (hQuotient),
"=a" (lQuotient),
"=d" (*remainder)
: "1" ((uint32)(dividend >> 32)),
"g" ((uint32)dividend),
"rm" (divisor),
"2" (0)
: "cc"
);
*quotient = (uint64)hQuotient << 32 | lQuotient;
}
#endif
/*
*-----------------------------------------------------------------------------
*
* Mul64x3264 --
*
* Unsigned integer by fixed point multiplication:
* Unsigned 64-bit integer multiplicand.
* Unsigned 32-bit fixed point multiplier, represented as
* multiplier >> shift, where shift < 64.
* Unsigned 64-bit integer product.
*
* Implementation:
* Multiply 64x32 bits to yield a full 96-bit product.
* Shift right by shift.
* Return the low-order 64 bits of the result.
*
* Result:
* Product
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
#if defined(__GNUC__)
static INLINE uint64
Mul64x3264(uint64 multiplicand, uint32 multiplier, uint32 shift)
{
uint64 result;
uint32 tmp1, tmp2;
// ASSERT(shift >= 0 && shift < 64);
/*
* Written and tested by mann, improved with suggestions by hpreg.
*
* The main improvement over the previous version is that the test
* of shift against 32 is moved out of the asm and into C code.
* This lets the compiler delete the test and one of the
* alternative code sequences in the case where shift is a
* constant. It also lets us use the best code sequence in each
* alternative, rather than a compromise. The downside is that in
* the non-constant case, this version takes slightly more code
* space.
*
* Note on the constraints: We don't really want multiplicand to
* start in %edx:%eax as the =A constraint dictates; in fact, we'd
* prefer any *other* two registers. But gcc doesn't have
* constraint syntax for any other register pair, and trying to
* constrain ((uint32) multiplicand) to one place and (multiplicand
* >> 32) to another generates *really* bad code -- gcc is just not
* smart enough, at least in the version we are currently using.
*/
if (shift < 32) {
__asm__("mov %%eax, %2 \n\t" // Save lo(multiplicand) in tmp2
"mov %%edx, %%eax \n\t" // Get hi(multiplicand)
"mull %4 \n\t" // p2 = hi(multiplicand) * multiplier
"xchg %%eax, %2 \n\t" // Save lo(p2) in tmp2, get lo(multiplicand)
"mov %%edx, %1 \n\t" // Save hi(p2) in tmp1
"mull %4 \n\t" // p1 = lo(multiplicand) * multiplier
"addl %2, %%edx \n\t" // hi(p1) += lo(p2)
"adcl $0, %1 \n\t" // hi(p2) += carry from previous step
"shrdl %%edx, %%eax \n\t" // result = hi(p2):hi(p1):lo(p1) >> shift
"shrdl %1, %%edx"
: "=A" (result),
"=&r" (tmp1), // use in shrdl requires it to be a register
"=&r" (tmp2) // could be "=&rm" but "m" is slower
: "0" (multiplicand),
"rm" (multiplier),
"c" (shift)
: "cc"
);
} else {
__asm__("mov %%edx, %2 \n\t" // Save hi(multiplicand) in tmp2
"mull %4 \n\t" // p1 = lo(multiplicand) * multiplier
"mov %%edx, %1 \n\t" // Save hi(p1) in tmp1
"mov %2, %%eax \n\t" // Discard lo(p1), get hi(multiplicand)
"mull %4 \n\t" // p2 = hi(multiplicand) * multiplier
"addl %1, %%eax \n\t" // lo(p2) += hi(p1)
"adcl $0, %%edx \n\t" // hi(p2) += carry from previous step
"shrdl %%edx, %%eax \n\t" // result = p2 >> (shift & 31)
"shrl %%cl, %%edx"
: "=A" (result),
"=&r" (tmp1), // could be "=&rm" but "m" is slower
"=&r" (tmp2) // could be "=&rm" but "m" is slower
: "0" (multiplicand),
"rm" (multiplier),
"c" (shift)
: "cc"
);
}
return result;
}
#elif defined _MSC_VER
#pragma warning(disable: 4035)
static INLINE uint64
Mul64x3264(uint64 multiplicand, uint32 multiplier, uint32 shift)
{
// ASSERT(shift >= 0 && shift < 64);
/* Written and tested by mann, checked by dbudko and hpreg */
__asm {
mov eax, DWORD PTR [multiplicand+4] // Get hi(multiplicand)
mul DWORD PTR [multiplier] // p2 = hi(multiplicand) * multiplier
mov ecx, eax // Save lo(p2)
mov ebx, edx // Save hi(p2)
mov eax, DWORD PTR [multiplicand] // Get lo(multiplicand)
mul DWORD PTR [multiplier+0] // p1 = lo(multiplicand) * multiplier
add edx, ecx // hi(p1) += lo(p2)
adc ebx, 0 // hi(p2) += carry from previous step
mov ecx, DWORD PTR [shift] // Get shift
cmp ecx, 32 // shift < 32?
jl SHORT l2 // Go if so
mov eax, edx // result = hi(p2):hi(p1) >> (shift & 31)
mov edx, ebx
shrd eax, edx, cl
shr edx, cl
jmp SHORT l3
l2:
shrd eax, edx, cl // result = hi(p2):hi(p1):lo(p1) >> shift
shrd edx, ebx, cl
l3:
}
// return with result in edx:eax
}
#pragma warning(default: 4035)
#else
#error No compiler defined for Mul64x3264
#endif
/*
*-----------------------------------------------------------------------------
*
* Muls64x32s64 --
*
* Signed integer by fixed point multiplication:
* Signed 64-bit integer multiplicand.
* Unsigned 32-bit fixed point multiplier, represented as
* multiplier >> shift, where shift < 64.
* Signed 64-bit integer product.
*
* Implementation:
* Multiply 64x32 bits to yield a full 96-bit product.
* Shift right by the location of the binary point.
* Return the low-order 64 bits of the result.
*
* Result:
* Product
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
#if defined(__GNUC__)
static INLINE int64
Muls64x32s64(int64 multiplicand, uint32 multiplier, uint32 shift)
{
int64 result;
uint32 tmp1, tmp2;
// ASSERT(shift >= 0 && shift < 64);
/* Written and tested by mann, checked by dbudko and hpreg */
/* XXX hpreg suggested some improvements that we haven't converged on yet */
__asm__("mov %%eax, %2\n\t" // Save lo(multiplicand)
"mov %%edx, %%eax\n\t" // Get hi(multiplicand)
"test %%eax, %%eax\n\t" // Check sign of multiplicand
"jl 0f\n\t" // Go if negative
"mull %4\n\t" // p2 = hi(multiplicand) * multiplier
"jmp 1f\n"
"0:\n\t"
"mull %4\n\t" // p2 = hi(multiplicand) * multiplier
"sub %4, %%edx\n" // hi(p2) += -1 * multiplier
"1:\n\t"
"xchg %%eax, %2\n\t" // Save lo(p2), get lo(multiplicand)
"mov %%edx, %1\n\t" // Save hi(p2)
"mull %4\n\t" // p1 = lo(multiplicand) * multiplier
"addl %2, %%edx\n\t" // hi(p1) += lo(p2)
"adcl $0, %1\n\t" // hi(p2) += carry from previous step
"cmpl $32, %%ecx\n\t" // shift < 32?
"jl 2f\n\t" // Go if so
"mov %%edx, %%eax\n\t" // result = hi(p2):hi(p1) >> (shift & 31)
"mov %1, %%edx\n\t"
"shrdl %%edx, %%eax\n\t"
"sarl %%cl, %%edx\n\t"
"jmp 3f\n"
"2:\n\t"
"shrdl %%edx, %%eax\n\t" // result = hi(p2):hi(p1):lo(p1) >> shift
"shrdl %1, %%edx\n"
"3:\n\t"
: "=A" (result), "=&r" (tmp1), "=&rm" (tmp2)
: "0" (multiplicand), "rm" (multiplier), "c" (shift)
: "cc");
return result;
}
#elif defined _MSC_VER
#pragma warning(disable: 4035)
static INLINE int64
Muls64x32s64(int64 multiplicand, uint32 multiplier, uint32 shift)
{
//ASSERT(shift >= 0 && shift < 64);
/* Written and tested by mann, checked by dbudko and hpreg */
__asm {
mov eax, DWORD PTR [multiplicand+4] // Get hi(multiplicand)
test eax, eax // Check sign of multiplicand
jl SHORT l0 // Go if negative
mul DWORD PTR [multiplier] // p2 = hi(multiplicand) * multiplier
jmp SHORT l1
l0:
mul DWORD PTR [multiplier] // p2 = hi(multiplicand) * multiplier
sub edx, DWORD PTR [multiplier] // hi(p2) += -1 * multiplier
l1:
mov ecx, eax // Save lo(p2)
mov ebx, edx // Save hi(p2)
mov eax, DWORD PTR [multiplicand] // Get lo(multiplicand)
mul DWORD PTR [multiplier] // p1 = lo(multiplicand) * multiplier
add edx, ecx // hi(p1) += lo(p2)
adc ebx, 0 // hi(p2) += carry from previous step
mov ecx, DWORD PTR [shift] // Get shift
cmp ecx, 32 // shift < 32?
jl SHORT l2 // Go if so
mov eax, edx // result = hi(p2):hi(p1) >> (shift & 31)
mov edx, ebx
shrd eax, edx, cl
sar edx, cl
jmp SHORT l3
l2:
shrd eax, edx, cl // result = hi(p2):hi(p1):lo(p1) << shift
shrd edx, ebx, cl
l3:
}
// return with result in edx:eax
}
#pragma warning(default: 4035)
#else
#error No compiler defined for Muls64x32s64
#endif
#endif
vmmemctl-only/shared/vmci_defs.h 0000444 0000000 0000000 00000065447 12275350062 015755 0 ustar root root /*********************************************************
* Copyright (C) 2005-2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef _VMCI_DEF_H_
#define _VMCI_DEF_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMMEXT
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_DISTRIBUTE
#include "includeCheck.h"
#include "vm_basic_types.h"
#include "vm_atomic.h"
#include "vm_assert.h"
/* Register offsets. */
#define VMCI_STATUS_ADDR 0x00
#define VMCI_CONTROL_ADDR 0x04
#define VMCI_ICR_ADDR 0x08
#define VMCI_IMR_ADDR 0x0c
#define VMCI_DATA_OUT_ADDR 0x10
#define VMCI_DATA_IN_ADDR 0x14
#define VMCI_CAPS_ADDR 0x18
#define VMCI_RESULT_LOW_ADDR 0x1c
#define VMCI_RESULT_HIGH_ADDR 0x20
/* Max number of devices. */
#define VMCI_MAX_DEVICES 1
/* Status register bits. */
#define VMCI_STATUS_INT_ON 0x1
/* Control register bits. */
#define VMCI_CONTROL_RESET 0x1
#define VMCI_CONTROL_INT_ENABLE 0x2
#define VMCI_CONTROL_INT_DISABLE 0x4
/* Capabilities register bits. */
#define VMCI_CAPS_HYPERCALL 0x1
#define VMCI_CAPS_GUESTCALL 0x2
#define VMCI_CAPS_DATAGRAM 0x4
#define VMCI_CAPS_NOTIFICATIONS 0x8
/* Interrupt Cause register bits. */
#define VMCI_ICR_DATAGRAM 0x1
#define VMCI_ICR_NOTIFICATION 0x2
/* Interrupt Mask register bits. */
#define VMCI_IMR_DATAGRAM 0x1
#define VMCI_IMR_NOTIFICATION 0x2
/* Interrupt type. */
typedef enum VMCIIntrType {
VMCI_INTR_TYPE_INTX = 0,
VMCI_INTR_TYPE_MSI = 1,
VMCI_INTR_TYPE_MSIX = 2
} VMCIIntrType;
/*
* Maximum MSI/MSI-X interrupt vectors in the device.
*/
#define VMCI_MAX_INTRS 2
/*
* Supported interrupt vectors. There is one for each ICR value above,
* but here they indicate the position in the vector array/message ID.
*/
#define VMCI_INTR_DATAGRAM 0
#define VMCI_INTR_NOTIFICATION 1
/*
* A single VMCI device has an upper limit of 128 MiB on the amount of
* memory that can be used for queue pairs.
*/
#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
/*
* Queues with pre-mapped data pages must be small, so that we don't pin
* too much kernel memory (especially on vmkernel). We limit a queuepair to
* 32 KB, or 16 KB per queue for symmetrical pairs.
*
* XXX, we are raising this limit to 4MB to support high-throughput workloads
* with vioi-filter. Once we switch to rings instead of queuepairs for the
* page channel, we will drop this limit again. See PR 852983.
*/
#define VMCI_MAX_PINNED_QP_MEMORY (4 * 1024 * 1024)
/*
* We have a fixed set of resource IDs available in the VMX.
* This allows us to have a very simple implementation since we statically
* know how many will create datagram handles. If a new caller arrives and
* we have run out of slots we can manually increment the maximum size of
* available resource IDs.
*/
typedef uint32 VMCI_Resource;
/* VMCI reserved hypervisor datagram resource IDs. */
#define VMCI_RESOURCES_QUERY 0
#define VMCI_GET_CONTEXT_ID 1
#define VMCI_SET_NOTIFY_BITMAP 2
#define VMCI_DOORBELL_LINK 3
#define VMCI_DOORBELL_UNLINK 4
#define VMCI_DOORBELL_NOTIFY 5
/*
* VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
* obsoleted by the removal of VM to VM communication.
*/
#define VMCI_DATAGRAM_REQUEST_MAP 6
#define VMCI_DATAGRAM_REMOVE_MAP 7
#define VMCI_EVENT_SUBSCRIBE 8
#define VMCI_EVENT_UNSUBSCRIBE 9
#define VMCI_QUEUEPAIR_ALLOC 10
#define VMCI_QUEUEPAIR_DETACH 11
/*
* VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
* WS 7.0/7.1 and ESX 4.1
*/
#define VMCI_HGFS_TRANSPORT 13
#define VMCI_UNITY_PBRPC_REGISTER 14
/*
* The next two resources are for RPC calls from guest Tools, to replace the
* backdoor calls we used previously. Privileged is for admin/root RPCs,
* unprivileged is for RPCs from any user.
*/
#define VMCI_RPC_PRIVILEGED 15
#define VMCI_RPC_UNPRIVILEGED 16
#define VMCI_RESOURCE_MAX 17
/*
* The core VMCI device functionality only requires the resource IDs of
* VMCI_QUEUEPAIR_DETACH and below.
*/
#define VMCI_CORE_DEVICE_RESOURCE_MAX VMCI_QUEUEPAIR_DETACH
/*
* VMCI reserved host datagram resource IDs.
* vsock control channel has resource id 1.
*/
#define VMCI_DVFILTER_DATA_PATH_DATAGRAM 2
/* VMCI Ids. */
typedef uint32 VMCIId;
typedef struct VMCIIdRange {
int8 action; // VMCI_FA_X, for use in filters.
VMCIId begin; // Beginning of range
VMCIId end; // End of range
} VMCIIdRange;
typedef struct VMCIHandle {
VMCIId context;
VMCIId resource;
} VMCIHandle;
static INLINE VMCIHandle
VMCI_MAKE_HANDLE(VMCIId cid, // IN:
VMCIId rid) // IN:
{
VMCIHandle h;
h.context = cid;
h.resource = rid;
return h;
}
/*
*----------------------------------------------------------------------
*
* VMCI_HANDLE_TO_UINT64 --
*
* Helper for VMCI handle to uint64 conversion.
*
* Results:
* The uint64 value.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static INLINE uint64
VMCI_HANDLE_TO_UINT64(VMCIHandle handle) // IN:
{
uint64 handle64;
handle64 = handle.context;
handle64 <<= 32;
handle64 |= handle.resource;
return handle64;
}
/*
*----------------------------------------------------------------------
*
* VMCI_UINT64_TO_HANDLE --
*
* Helper for uint64 to VMCI handle conversion.
*
* Results:
* The VMCI handle value.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static INLINE VMCIHandle
VMCI_UINT64_TO_HANDLE(uint64 handle64) // IN:
{
VMCIId context = (VMCIId)(handle64 >> 32);
VMCIId resource = (VMCIId)handle64;
return VMCI_MAKE_HANDLE(context, resource);
}
#define VMCI_HANDLE_TO_CONTEXT_ID(_handle) ((_handle).context)
#define VMCI_HANDLE_TO_RESOURCE_ID(_handle) ((_handle).resource)
#define VMCI_HANDLE_EQUAL(_h1, _h2) ((_h1).context == (_h2).context && \
(_h1).resource == (_h2).resource)
#define VMCI_INVALID_ID 0xFFFFFFFF
static const VMCIHandle VMCI_INVALID_HANDLE = {VMCI_INVALID_ID,
VMCI_INVALID_ID};
#define VMCI_HANDLE_INVALID(_handle) \
VMCI_HANDLE_EQUAL((_handle), VMCI_INVALID_HANDLE)
/*
* The below defines can be used to send anonymous requests.
* This also indicates that no response is expected.
*/
#define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID
#define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID
#define VMCI_ANON_SRC_HANDLE VMCI_MAKE_HANDLE(VMCI_ANON_SRC_CONTEXT_ID, \
VMCI_ANON_SRC_RESOURCE_ID)
/* The lowest 16 context ids are reserved for internal use. */
#define VMCI_RESERVED_CID_LIMIT 16
/*
* Hypervisor context id, used for calling into hypervisor
* supplied services from the VM.
*/
#define VMCI_HYPERVISOR_CONTEXT_ID 0
/*
* Well-known context id, a logical context that contains a set of
* well-known services. This context ID is now obsolete.
*/
#define VMCI_WELL_KNOWN_CONTEXT_ID 1
/*
* Context ID used by host endpoints.
*/
#define VMCI_HOST_CONTEXT_ID 2
#define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != _cid && \
_cid > VMCI_HOST_CONTEXT_ID)
/*
* The VMCI_CONTEXT_RESOURCE_ID is used together with VMCI_MAKE_HANDLE to make
* handles that refer to a specific context.
*/
#define VMCI_CONTEXT_RESOURCE_ID 0
/*
*-----------------------------------------------------------------------------
*
* VMCI error codes.
*
*-----------------------------------------------------------------------------
*/
#define VMCI_SUCCESS_QUEUEPAIR_ATTACH 5
#define VMCI_SUCCESS_QUEUEPAIR_CREATE 4
#define VMCI_SUCCESS_LAST_DETACH 3
#define VMCI_SUCCESS_ACCESS_GRANTED 2
#define VMCI_SUCCESS_ENTRY_DEAD 1
#define VMCI_SUCCESS 0LL
#define VMCI_ERROR_INVALID_RESOURCE (-1)
#define VMCI_ERROR_INVALID_ARGS (-2)
#define VMCI_ERROR_NO_MEM (-3)
#define VMCI_ERROR_DATAGRAM_FAILED (-4)
#define VMCI_ERROR_MORE_DATA (-5)
#define VMCI_ERROR_NO_MORE_DATAGRAMS (-6)
#define VMCI_ERROR_NO_ACCESS (-7)
#define VMCI_ERROR_NO_HANDLE (-8)
#define VMCI_ERROR_DUPLICATE_ENTRY (-9)
#define VMCI_ERROR_DST_UNREACHABLE (-10)
#define VMCI_ERROR_PAYLOAD_TOO_LARGE (-11)
#define VMCI_ERROR_INVALID_PRIV (-12)
#define VMCI_ERROR_GENERIC (-13)
#define VMCI_ERROR_PAGE_ALREADY_SHARED (-14)
#define VMCI_ERROR_CANNOT_SHARE_PAGE (-15)
#define VMCI_ERROR_CANNOT_UNSHARE_PAGE (-16)
#define VMCI_ERROR_NO_PROCESS (-17)
#define VMCI_ERROR_NO_DATAGRAM (-18)
#define VMCI_ERROR_NO_RESOURCES (-19)
#define VMCI_ERROR_UNAVAILABLE (-20)
#define VMCI_ERROR_NOT_FOUND (-21)
#define VMCI_ERROR_ALREADY_EXISTS (-22)
#define VMCI_ERROR_NOT_PAGE_ALIGNED (-23)
#define VMCI_ERROR_INVALID_SIZE (-24)
#define VMCI_ERROR_REGION_ALREADY_SHARED (-25)
#define VMCI_ERROR_TIMEOUT (-26)
#define VMCI_ERROR_DATAGRAM_INCOMPLETE (-27)
#define VMCI_ERROR_INCORRECT_IRQL (-28)
#define VMCI_ERROR_EVENT_UNKNOWN (-29)
#define VMCI_ERROR_OBSOLETE (-30)
#define VMCI_ERROR_QUEUEPAIR_MISMATCH (-31)
#define VMCI_ERROR_QUEUEPAIR_NOTSET (-32)
#define VMCI_ERROR_QUEUEPAIR_NOTOWNER (-33)
#define VMCI_ERROR_QUEUEPAIR_NOTATTACHED (-34)
#define VMCI_ERROR_QUEUEPAIR_NOSPACE (-35)
#define VMCI_ERROR_QUEUEPAIR_NODATA (-36)
#define VMCI_ERROR_BUSMEM_INVALIDATION (-37)
#define VMCI_ERROR_MODULE_NOT_LOADED (-38)
#define VMCI_ERROR_DEVICE_NOT_FOUND (-39)
#define VMCI_ERROR_QUEUEPAIR_NOT_READY (-40)
#define VMCI_ERROR_WOULD_BLOCK (-41)
/* VMCI clients should return error code withing this range */
#define VMCI_ERROR_CLIENT_MIN (-500)
#define VMCI_ERROR_CLIENT_MAX (-550)
/* Internal error codes. */
#define VMCI_SHAREDMEM_ERROR_BAD_CONTEXT (-1000)
#define VMCI_PATH_MAX 256
/* VMCI reserved events. */
typedef uint32 VMCI_Event;
#define VMCI_EVENT_CTX_ID_UPDATE 0 // Only applicable to guest endpoints
#define VMCI_EVENT_CTX_REMOVED 1 // Applicable to guest and host
#define VMCI_EVENT_QP_RESUMED 2 // Only applicable to guest endpoints
#define VMCI_EVENT_QP_PEER_ATTACH 3 // Applicable to guest and host
#define VMCI_EVENT_QP_PEER_DETACH 4 // Applicable to guest and host
#define VMCI_EVENT_MEM_ACCESS_ON 5 // Applicable to VMX and vmk. On vmk,
// this event has the Context payload type.
#define VMCI_EVENT_MEM_ACCESS_OFF 6 // Applicable to VMX and vmk. Same as
// above for the payload type.
#define VMCI_EVENT_MAX 7
/*
* Of the above events, a few are reserved for use in the VMX, and
* other endpoints (guest and host kernel) should not use them. For
* the rest of the events, we allow both host and guest endpoints to
* subscribe to them, to maintain the same API for host and guest
* endpoints.
*/
#define VMCI_EVENT_VALID_VMX(_event) (_event == VMCI_EVENT_MEM_ACCESS_ON || \
_event == VMCI_EVENT_MEM_ACCESS_OFF)
#if defined(VMX86_SERVER)
#define VMCI_EVENT_VALID(_event) (_event < VMCI_EVENT_MAX)
#else // VMX86_SERVER
#define VMCI_EVENT_VALID(_event) (_event < VMCI_EVENT_MAX && \
!VMCI_EVENT_VALID_VMX(_event))
#endif // VMX86_SERVER
/* Reserved guest datagram resource ids. */
#define VMCI_EVENT_HANDLER 0
/* VMCI privileges. */
typedef enum VMCIResourcePrivilegeType {
VMCI_PRIV_CH_PRIV,
VMCI_PRIV_DESTROY_RESOURCE,
VMCI_PRIV_ASSIGN_CLIENT,
VMCI_PRIV_DG_CREATE,
VMCI_PRIV_DG_SEND,
VMCI_PRIV_NOTIFY,
VMCI_NUM_PRIVILEGES,
} VMCIResourcePrivilegeType;
/*
* VMCI coarse-grained privileges (per context or host
* process/endpoint. An entity with the restricted flag is only
* allowed to interact with the hypervisor and trusted entities.
*/
typedef uint32 VMCIPrivilegeFlags;
#define VMCI_PRIVILEGE_FLAG_RESTRICTED 0x01
#define VMCI_PRIVILEGE_FLAG_TRUSTED 0x02
#define VMCI_PRIVILEGE_ALL_FLAGS (VMCI_PRIVILEGE_FLAG_RESTRICTED | \
VMCI_PRIVILEGE_FLAG_TRUSTED)
#define VMCI_NO_PRIVILEGE_FLAGS 0x00
#define VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS VMCI_NO_PRIVILEGE_FLAGS
#define VMCI_LEAST_PRIVILEGE_FLAGS VMCI_PRIVILEGE_FLAG_RESTRICTED
#define VMCI_MAX_PRIVILEGE_FLAGS VMCI_PRIVILEGE_FLAG_TRUSTED
#define VMCI_PUBLIC_GROUP_NAME "vmci public group"
/* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
#define VMCI_RESERVED_RESOURCE_ID_MAX 1023
#define VMCI_DOMAIN_NAME_MAXLEN 32
#define VMCI_LGPFX "VMCI: "
/*
* VMCIQueueHeader
*
* A Queue cannot stand by itself as designed. Each Queue's header
* contains a pointer into itself (the producerTail) and into its peer
* (consumerHead). The reason for the separation is one of
* accessibility: Each end-point can modify two things: where the next
* location to enqueue is within its produceQ (producerTail); and
* where the next dequeue location is in its consumeQ (consumerHead).
*
* An end-point cannot modify the pointers of its peer (guest to
* guest; NOTE that in the host both queue headers are mapped r/w).
* But, each end-point needs read access to both Queue header
* structures in order to determine how much space is used (or left)
* in the Queue. This is because for an end-point to know how full
* its produceQ is, it needs to use the consumerHead that points into
* the produceQ but -that- consumerHead is in the Queue header for
* that end-points consumeQ.
*
* Thoroughly confused? Sorry.
*
* producerTail: the point to enqueue new entrants. When you approach
* a line in a store, for example, you walk up to the tail.
*
* consumerHead: the point in the queue from which the next element is
* dequeued. In other words, who is next in line is he who is at the
* head of the line.
*
* Also, producerTail points to an empty byte in the Queue, whereas
* consumerHead points to a valid byte of data (unless producerTail ==
* consumerHead in which case consumerHead does not point to a valid
* byte of data).
*
* For a queue of buffer 'size' bytes, the tail and head pointers will be in
* the range [0, size-1].
*
* If produceQHeader->producerTail == consumeQHeader->consumerHead
* then the produceQ is empty.
*/
typedef struct VMCIQueueHeader {
/* All fields are 64bit and aligned. */
VMCIHandle handle; /* Identifier. */
Atomic_uint64 producerTail; /* Offset in this queue. */
Atomic_uint64 consumerHead; /* Offset in peer queue. */
} VMCIQueueHeader;
/*
* If one client of a QueuePair is a 32bit entity, we restrict the QueuePair
* size to be less than 4GB, and use 32bit atomic operations on the head and
* tail pointers. 64bit atomic read on a 32bit entity involves cmpxchg8b which
* is an atomic read-modify-write. This will cause traces to fire when a 32bit
* consumer tries to read the producer's tail pointer, for example, because the
* consumer has read-only access to the producer's tail pointer.
*
* We provide the following macros to invoke 32bit or 64bit atomic operations
* based on the architecture the code is being compiled on.
*/
/* Architecture independent maximum queue size. */
#define QP_MAX_QUEUE_SIZE_ARCH_ANY CONST64U(0xffffffff)
#ifdef __x86_64__
# define QP_MAX_QUEUE_SIZE_ARCH CONST64U(0xffffffffffffffff)
# define QPAtomic_ReadOffset(x) Atomic_Read64(x)
# define QPAtomic_WriteOffset(x, y) Atomic_Write64(x, y)
#else
/*
* Wrappers below are being used to call Atomic_Read32 because of the
* 'type punned' compilation warning received when Atomic_Read32 is
* called with a Atomic_uint64 pointer typecasted to Atomic_uint32
* pointer from QPAtomic_ReadOffset. Ditto with QPAtomic_WriteOffset.
*/
static INLINE uint32
TypeSafe_Atomic_Read32(void *var) // IN:
{
return Atomic_Read32((Atomic_uint32 *)(var));
}
static INLINE void
TypeSafe_Atomic_Write32(void *var, uint32 val) // IN:
{
Atomic_Write32((Atomic_uint32 *)(var), (uint32)(val));
}
# define QP_MAX_QUEUE_SIZE_ARCH CONST64U(0xffffffff)
# define QPAtomic_ReadOffset(x) TypeSafe_Atomic_Read32((void *)(x))
# define QPAtomic_WriteOffset(x, y) \
TypeSafe_Atomic_Write32((void *)(x), (uint32)(y))
#endif /* __x86_64__ */
/*
*-----------------------------------------------------------------------------
*
* QPAddPointer --
*
* Helper to add a given offset to a head or tail pointer. Wraps the value
* of the pointer around the max size of the queue.
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
QPAddPointer(Atomic_uint64 *var, // IN:
size_t add, // IN:
uint64 size) // IN:
{
uint64 newVal = QPAtomic_ReadOffset(var);
if (newVal >= size - add) {
newVal -= size;
}
newVal += add;
QPAtomic_WriteOffset(var, newVal);
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_ProducerTail() --
*
* Helper routine to get the Producer Tail from the supplied queue.
*
* Results:
* The contents of the queue's producer tail.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
VMCIQueueHeader_ProducerTail(const VMCIQueueHeader *qHeader) // IN:
{
VMCIQueueHeader *qh = (VMCIQueueHeader *)qHeader;
return QPAtomic_ReadOffset(&qh->producerTail);
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_ConsumerHead() --
*
* Helper routine to get the Consumer Head from the supplied queue.
*
* Results:
* The contents of the queue's consumer tail.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
VMCIQueueHeader_ConsumerHead(const VMCIQueueHeader *qHeader) // IN:
{
VMCIQueueHeader *qh = (VMCIQueueHeader *)qHeader;
return QPAtomic_ReadOffset(&qh->consumerHead);
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_AddProducerTail() --
*
* Helper routine to increment the Producer Tail. Fundamentally,
* QPAddPointer() is used to manipulate the tail itself.
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
VMCIQueueHeader_AddProducerTail(VMCIQueueHeader *qHeader, // IN/OUT:
size_t add, // IN:
uint64 queueSize) // IN:
{
QPAddPointer(&qHeader->producerTail, add, queueSize);
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_AddConsumerHead() --
*
* Helper routine to increment the Consumer Head. Fundamentally,
* QPAddPointer() is used to manipulate the head itself.
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
VMCIQueueHeader_AddConsumerHead(VMCIQueueHeader *qHeader, // IN/OUT:
size_t add, // IN:
uint64 queueSize) // IN:
{
QPAddPointer(&qHeader->consumerHead, add, queueSize);
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_CheckAlignment --
*
* Checks if the given queue is aligned to page boundary. Returns TRUE if
* the alignment is good.
*
* Results:
* TRUE or FALSE.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE Bool
VMCIQueueHeader_CheckAlignment(const VMCIQueueHeader *qHeader) // IN:
{
uintptr_t hdr, offset;
hdr = (uintptr_t) qHeader;
offset = hdr & (PAGE_SIZE -1);
return offset == 0;
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_GetPointers --
*
* Helper routine for getting the head and the tail pointer for a queue.
* Both the VMCIQueues are needed to get both the pointers for one queue.
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
VMCIQueueHeader_GetPointers(const VMCIQueueHeader *produceQHeader, // IN:
const VMCIQueueHeader *consumeQHeader, // IN:
uint64 *producerTail, // OUT:
uint64 *consumerHead) // OUT:
{
if (producerTail) {
*producerTail = VMCIQueueHeader_ProducerTail(produceQHeader);
}
if (consumerHead) {
*consumerHead = VMCIQueueHeader_ConsumerHead(consumeQHeader);
}
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_ResetPointers --
*
* Reset the tail pointer (of "this" queue) and the head pointer (of
* "peer" queue).
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
VMCIQueueHeader_ResetPointers(VMCIQueueHeader *qHeader) // IN/OUT:
{
QPAtomic_WriteOffset(&qHeader->producerTail, CONST64U(0));
QPAtomic_WriteOffset(&qHeader->consumerHead, CONST64U(0));
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_Init --
*
* Initializes a queue's state (head & tail pointers).
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
VMCIQueueHeader_Init(VMCIQueueHeader *qHeader, // IN/OUT:
const VMCIHandle handle) // IN:
{
qHeader->handle = handle;
VMCIQueueHeader_ResetPointers(qHeader);
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_FreeSpace --
*
* Finds available free space in a produce queue to enqueue more
* data or reports an error if queue pair corruption is detected.
*
* Results:
* Free space size in bytes or an error code.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE int64
VMCIQueueHeader_FreeSpace(const VMCIQueueHeader *produceQHeader, // IN:
const VMCIQueueHeader *consumeQHeader, // IN:
const uint64 produceQSize) // IN:
{
uint64 tail;
uint64 head;
uint64 freeSpace;
tail = VMCIQueueHeader_ProducerTail(produceQHeader);
head = VMCIQueueHeader_ConsumerHead(consumeQHeader);
if (tail >= produceQSize || head >= produceQSize) {
return VMCI_ERROR_INVALID_SIZE;
}
/*
* Deduct 1 to avoid tail becoming equal to head which causes ambiguity. If
* head and tail are equal it means that the queue is empty.
*/
if (tail >= head) {
freeSpace = produceQSize - (tail - head) - 1;
} else {
freeSpace = head - tail - 1;
}
return freeSpace;
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_BufReady --
*
* VMCIQueueHeader_FreeSpace() does all the heavy lifting of
* determing the number of free bytes in a Queue. This routine,
* then subtracts that size from the full size of the Queue so
* the caller knows how many bytes are ready to be dequeued.
*
* Results:
* On success, available data size in bytes (up to MAX_INT64).
* On failure, appropriate error code.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE int64
VMCIQueueHeader_BufReady(const VMCIQueueHeader *consumeQHeader, // IN:
const VMCIQueueHeader *produceQHeader, // IN:
const uint64 consumeQSize) // IN:
{
int64 freeSpace;
freeSpace = VMCIQueueHeader_FreeSpace(consumeQHeader,
produceQHeader,
consumeQSize);
if (freeSpace < VMCI_SUCCESS) {
return freeSpace;
} else {
return consumeQSize - freeSpace - 1;
}
}
/*
* Defines for the VMCI traffic filter:
* - VMCI_FA_<name> defines the filter action values
* - VMCI_FP_<name> defines the filter protocol values
* - VMCI_FD_<name> defines the direction values (guest or host)
* - VMCI_FT_<name> are the type values (allow or deny)
*/
#define VMCI_FA_INVALID -1
#define VMCI_FA_ALLOW 0
#define VMCI_FA_DENY (VMCI_FA_ALLOW + 1)
#define VMCI_FA_MAX (VMCI_FA_DENY + 1)
#define VMCI_FP_INVALID -1
#define VMCI_FP_HYPERVISOR 0
#define VMCI_FP_QUEUEPAIR (VMCI_FP_HYPERVISOR + 1)
#define VMCI_FP_DOORBELL (VMCI_FP_QUEUEPAIR + 1)
#define VMCI_FP_DATAGRAM (VMCI_FP_DOORBELL + 1)
#define VMCI_FP_STREAMSOCK (VMCI_FP_DATAGRAM + 1)
#define VMCI_FP_ANY (VMCI_FP_STREAMSOCK + 1)
#define VMCI_FP_MAX (VMCI_FP_ANY + 1)
#define VMCI_FD_INVALID -1
#define VMCI_FD_GUEST 0
#define VMCI_FD_HOST (VMCI_FD_GUEST + 1)
#define VMCI_FD_ANY (VMCI_FD_HOST + 1)
#define VMCI_FD_MAX (VMCI_FD_ANY + 1)
/*
* The filter list tracks VMCI Id ranges for a given filter.
*/
typedef struct {
uint32 len;
VMCIIdRange *list;
} VMCIFilterList;
/*
* The filter info is used to communicate the filter configuration
* from the VMX to the host kernel.
*/
typedef struct {
VA64 list; // List of VMCIIdRange
uint32 len; // Length of list
uint8 dir; // VMCI_FD_X
uint8 proto; // VMCI_FP_X
} VMCIFilterInfo;
/*
* In the host kernel, the ingoing and outgoing filters are
* separated. The VMCIProtoFilters type captures all filters in one
* direction. The VMCIFilters type captures all filters.
*/
typedef VMCIFilterList VMCIProtoFilters[VMCI_FP_MAX];
typedef VMCIProtoFilters VMCIFilters[VMCI_FD_MAX];
#endif // _VMCI_DEF_H_
vmmemctl-only/shared/compat_dcache.h 0000444 0000000 0000000 00000004511 12275350061 016550 0 ustar root root /*********************************************************
* Copyright (C) 2006-2013 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_DCACHE_H__
# define __COMPAT_DCACHE_H__
#include <linux/dcache.h>
/*
* per-dentry locking was born in 2.5.62.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 62)
#define compat_lock_dentry(dentry) spin_lock(&dentry->d_lock)
#define compat_unlock_dentry(dentry) spin_unlock(&dentry->d_lock)
#else
#define compat_lock_dentry(dentry) do {} while (0)
#define compat_unlock_dentry(dentry) do {} while (0)
#endif
/*
* d_alloc_name was born in 2.6.10.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
#define compat_d_alloc_name(parent, s) d_alloc_name(parent, s)
#else
#define compat_d_alloc_name(parent, s) \
({ \
struct qstr q; \
q.name = s; \
q.len = strlen(s); \
q.hash = full_name_hash(q.name, q.len); \
d_alloc(parent, &q); \
})
#endif
/*
* d_count field was removed in 3.11.0.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)
#define compat_d_count(dentry) d_count(dentry)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
#define compat_d_count(dentry) dentry->d_count
#else
#define compat_d_count(dentry) atomic_read(&dentry->d_count);
#endif
#endif /* __COMPAT_DCACHE_H__ */
vmmemctl-only/shared/vm_atomic.h 0000444 0000000 0000000 00000215200 12275350062 015754 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vm_atomic.h --
*
* Atomic power
*
* Note: Only partially tested on ARM processors: Works for View Open
* Client, which shouldn't have threads.
*
* In ARM, GCC intrinsics (__sync*) compile but might not
* work, while MS intrinsics (_Interlocked*) do not compile,
* and ARM has no equivalent to the "lock" instruction prior to
* ARMv6; the current ARM target is ARMv5. According to glibc
* documentation, ARMv5 cannot have atomic code in user space.
* Instead a Linux system call to kernel code referenced in
* entry-armv.S is used to achieve atomic functions. See bug
* 478054 for details.
*/
#ifndef _ATOMIC_H_
#define _ATOMIC_H_
//#define FAKE_ATOMIC /* defined if true atomic not needed */
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMKDRIVERS
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMIROM
#include "includeCheck.h"
#include "vm_basic_types.h"
/* Basic atomic type: 32 bits */
typedef struct Atomic_uint32 {
volatile uint32 value;
} Atomic_uint32;
/* Basic atomic type: 64 bits */
typedef struct Atomic_uint64 {
volatile uint64 value;
} Atomic_uint64 ALIGNED(8);
/*
* Prototypes for msft atomics. These are defined & inlined by the
* compiler so no function definition is needed. The prototypes are
* needed for c++. Since amd64 compiler doesn't support inline asm we
* have to use these. Unfortunately, we still have to use some inline asm
* for the 32 bit code since the and/or/xor implementations didn't show up
* untill xp or 2k3.
*
* The declarations for the intrinsic functions were taken from ntddk.h
* in the DDK. The declarations must match otherwise the 64-bit c++
* compiler will complain about second linkage of the intrinsic functions.
* We define the intrinsic using the basic types corresponding to the
* Windows typedefs. This avoids having to include windows header files
* to get to the windows types.
*/
#if defined(_MSC_VER) && _MSC_VER >= 1310
#ifdef __cplusplus
extern "C" {
#endif
long _InterlockedExchange(long volatile*, long);
long _InterlockedCompareExchange(long volatile*, long, long);
long _InterlockedExchangeAdd(long volatile*, long);
long _InterlockedDecrement(long volatile*);
long _InterlockedIncrement(long volatile*);
void _ReadWriteBarrier(void);
#pragma intrinsic(_InterlockedExchange, _InterlockedCompareExchange)
#pragma intrinsic(_InterlockedExchangeAdd, _InterlockedDecrement)
#pragma intrinsic(_InterlockedIncrement, _ReadWriteBarrier)
#if defined(VM_X86_64)
long _InterlockedAnd(long volatile*, long);
__int64 _InterlockedAnd64(__int64 volatile*, __int64);
long _InterlockedOr(long volatile*, long);
__int64 _InterlockedOr64(__int64 volatile*, __int64);
long _InterlockedXor(long volatile*, long);
__int64 _InterlockedXor64(__int64 volatile*, __int64);
__int64 _InterlockedExchangeAdd64(__int64 volatile*, __int64);
__int64 _InterlockedIncrement64(__int64 volatile*);
__int64 _InterlockedDecrement64(__int64 volatile*);
__int64 _InterlockedExchange64(__int64 volatile*, __int64);
__int64 _InterlockedCompareExchange64(__int64 volatile*, __int64, __int64);
#if !defined(_WIN64)
#pragma intrinsic(_InterlockedAnd, _InterlockedAnd64)
#pragma intrinsic(_InterlockedOr, _InterlockedOr64)
#pragma intrinsic(_InterlockedXor, _InterlockedXor64)
#pragma intrinsic(_InterlockedExchangeAdd64, _InterlockedIncrement64)
#pragma intrinsic(_InterlockedDecrement64, _InterlockedExchange64)
#pragma intrinsic(_InterlockedCompareExchange64)
#endif /* !_WIN64 */
#endif /* __x86_64__ */
#ifdef __cplusplus
}
#endif
#endif /* _MSC_VER */
#if defined(__arm__) && !defined(FAKE_ATOMIC)
/*
* LDREX without STREX or CLREX may cause problems in environments where the
* context switch may not clear the reference monitor - according ARM manual
* the reference monitor should be cleared after a context switch, but some
* may not like Linux kernel's non-preemptive context switch path. So use of
* ARM routines in kernel code may not be safe.
*/
# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
defined(__ARM_ARCH_7R__)|| defined(__ARM_ARCH_7M__)
# define VM_ARM_V7
# ifdef __KERNEL__
# warning LDREX/STREX may not be safe in linux kernel, since it \
does not issue CLREX on context switch (as of 2011-09-29).
# endif
# else
# error Only ARMv7 extends the synchronization primitives ldrex/strex. \
For the lower ARM version, please implement the atomic functions \
by kernel APIs.
# endif
#endif
/* Data Memory Barrier */
#ifdef VM_ARM_V7
#define dmb() __asm__ __volatile__("dmb" : : : "memory")
#endif
/* Convert a volatile uint32 to Atomic_uint32. */
static INLINE Atomic_uint32 *
Atomic_VolatileToAtomic(volatile uint32 *var)
{
return (Atomic_uint32 *)var;
}
/* Convert a volatile uint64 to Atomic_uint64. */
static INLINE Atomic_uint64 *
Atomic_VolatileToAtomic64(volatile uint64 *var)
{
return (Atomic_uint64 *)var;
}
/*
*-----------------------------------------------------------------------------
*
* Atomic_Init, Atomic_SetFence, AtomicUseFence --
*
* Determine whether an lfence intruction is executed after
* every locked instruction.
*
* Certain AMD processors have a bug (see bug 107024) that
* requires an lfence after every locked instruction.
*
* The global variable AtomicUseFence controls whether lfence
* is used (see AtomicEpilogue).
*
* Atomic_SetFence sets AtomicUseFence to the given value.
*
* Atomic_Init computes and sets AtomicUseFence for x86.
* It does not take into account the number of processors.
*
* The rationale for all this complexity is that Atomic_Init
* is the easy-to-use interface. It can be called a number
* of times cheaply, and does not depend on other libraries.
* However, because the number of CPUs is difficult to compute,
* it does without it and always assumes there are more than one.
*
* For programs that care or have special requirements,
* Atomic_SetFence can be called directly, in addition to Atomic_Init.
* It overrides the effect of Atomic_Init, and can be called
* before, after, or between calls to Atomic_Init.
*
*-----------------------------------------------------------------------------
*/
// The freebsd assembler doesn't know the lfence instruction
#if defined(__GNUC__) && \
__GNUC__ >= 3 && \
(defined(__VMKERNEL__) || !defined(__FreeBSD__)) && \
(!defined(MODULE) || defined(__VMKERNEL_MODULE__)) && \
!defined(__APPLE__) && \
(defined(__i386__) || defined(__x86_64__)) /* PR136775 */
#define ATOMIC_USE_FENCE
#endif
#if defined(VMATOMIC_IMPORT_DLLDATA)
VMX86_EXTERN_DATA Bool AtomicUseFence;
#else
EXTERN Bool AtomicUseFence;
#endif
EXTERN Bool atomicFenceInitialized;
void AtomicInitFence(void);
static INLINE void
Atomic_Init(void)
{
#ifdef ATOMIC_USE_FENCE
if (!atomicFenceInitialized) {
AtomicInitFence();
}
#endif
}
static INLINE void
Atomic_SetFence(Bool fenceAfterLock) /* IN: TRUE to enable lfence */
/* FALSE to disable. */
{
AtomicUseFence = fenceAfterLock;
#if defined(__VMKERNEL__)
extern void Atomic_SetFenceVMKAPI(Bool fenceAfterLock);
Atomic_SetFenceVMKAPI(fenceAfterLock);
#endif
atomicFenceInitialized = TRUE;
}
/* Conditionally execute fence after interlocked instruction. */
static INLINE void
AtomicEpilogue(void)
{
#ifdef ATOMIC_USE_FENCE
#ifdef VMM
/* The monitor conditionally patches out the lfence when not needed.*/
/* Construct a MonitorPatchTextEntry in the .patchtext section. */
asm volatile ("1:\n\t"
"lfence\n\t"
"2:\n\t"
".pushsection .patchtext\n\t"
".quad 1b\n\t"
".quad 2b\n\t"
".popsection\n\t" ::: "memory");
#else
if (UNLIKELY(AtomicUseFence)) {
asm volatile ("lfence" ::: "memory");
}
#endif
#endif
}
/*
* All the assembly code is tricky and written conservatively.
* For example, to make sure gcc won't introduce copies,
* we force the addressing mode like this:
*
* "xchgl %0, (%1)"
* : "=r" (val)
* : "r" (&var->value),
* "0" (val)
* : "memory"
*
* - edward
*
* Actually - turns out that gcc never generates memory aliases (it
* still does generate register aliases though), so we can be a bit
* more agressive with the memory constraints. The code above can be
* modified like this:
*
* "xchgl %0, %1"
* : "=r" (val),
* "=m" (var->value),
* : "0" (val),
* "1" (var->value)
*
* The advantages are that gcc can use whatever addressing mode it
* likes to access the memory value, and that we dont have to use a
* way-too-generic "memory" clobber as there is now an explicit
* declaration that var->value is modified.
*
* see also /usr/include/asm/atomic.h to convince yourself this is a
* valid optimization.
*
* - walken
*/
/*
*-----------------------------------------------------------------------------
*
* Atomic_Read --
*
* Read
*
* Results:
* The value of the atomic variable.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE uint32
Atomic_Read(Atomic_uint32 const *var) // IN
{
return var->value;
}
#define Atomic_Read32 Atomic_Read
/*
*-----------------------------------------------------------------------------
*
* Atomic_Write --
*
* Write
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_Write(Atomic_uint32 *var, // IN
uint32 val) // IN
{
var->value = val;
}
#define Atomic_Write32 Atomic_Write
/*
*-----------------------------------------------------------------------------
*
* Atomic_ReadWrite --
*
* Read followed by write
*
* Results:
* The value of the atomic variable before the write.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE uint32
Atomic_ReadWrite(Atomic_uint32 *var, // IN
uint32 val) // IN
{
#ifdef FAKE_ATOMIC
uint32 retval = var->value;
var->value = val;
return retval;
#elif defined(__GNUC__)
#ifdef VM_ARM_V7
register volatile uint32 retVal;
register volatile uint32 res;
dmb();
__asm__ __volatile__(
"1: ldrex %[retVal], [%[var]] \n\t"
"strex %[res], %[val], [%[var]] \n\t"
"teq %[res], #0 \n\t"
"bne 1b"
: [retVal] "=&r" (retVal), [res] "=&r" (res)
: [var] "r" (&var->value), [val] "r" (val)
: "cc"
);
dmb();
return retVal;
#else // __VM_ARM_V7 (assume x86*)
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"xchgl %0, %1"
: "=r" (val),
"+m" (var->value)
: "0" (val)
);
AtomicEpilogue();
return val;
#endif // VM_ARM_V7
#elif defined _MSC_VER
#if _MSC_VER >= 1310
return _InterlockedExchange((long *)&var->value, (long)val);
#else
#pragma warning(push)
#pragma warning(disable : 4035) // disable no-return warning
{
__asm mov eax, val
__asm mov ebx, var
__asm xchg [ebx]Atomic_uint32.value, eax
// eax is the return value, this is documented to work - edward
}
#pragma warning(pop)
#endif // _MSC_VER >= 1310
#else
#error No compiler defined for Atomic_ReadWrite
#endif // __GNUC__
}
#define Atomic_ReadWrite32 Atomic_ReadWrite
/*
*-----------------------------------------------------------------------------
*
* Atomic_ReadIfEqualWrite --
*
* Compare exchange: Read variable, if equal to oldVal, write newVal
*
* Results:
* The value of the atomic variable before the write.
*
* Side effects:
* The variable may be modified.
*
*-----------------------------------------------------------------------------
*/
static INLINE uint32
Atomic_ReadIfEqualWrite(Atomic_uint32 *var, // IN
uint32 oldVal, // IN
uint32 newVal) // IN
{
#ifdef FAKE_ATOMIC
uint32 readVal = var->value;
if (oldVal == readVal) {
var->value = newVal;
}
return oldVal;
#elif defined(__GNUC__)
#ifdef VM_ARM_V7
register uint32 retVal;
register uint32 res;
dmb();
__asm__ __volatile__(
"1: ldrex %[retVal], [%[var]] \n\t"
"mov %[res], #0 \n\t"
"teq %[retVal], %[oldVal] \n\t"
"strexeq %[res], %[newVal], [%[var]] \n\t"
"teq %[res], #0 \n\t"
"bne 1b"
: [retVal] "=&r" (retVal), [res] "=&r" (res)
: [var] "r" (&var->value), [oldVal] "r" (oldVal), [newVal] "r" (newVal)
: "cc"
);
dmb();
return retVal;
#else // VM_ARM_V7 (assume x86*)
uint32 val;
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; cmpxchgl %2, %1"
: "=a" (val),
"+m" (var->value)
: "r" (newVal),
"0" (oldVal)
: "cc"
);
AtomicEpilogue();
return val;
#endif // VM_ARM_V7
#elif defined _MSC_VER
#if _MSC_VER >= 1310
return _InterlockedCompareExchange((long *)&var->value,
(long)newVal,
(long)oldVal);
#else
#pragma warning(push)
#pragma warning(disable : 4035) // disable no-return warning
{
__asm mov eax, oldVal
__asm mov ebx, var
__asm mov ecx, newVal
__asm lock cmpxchg [ebx]Atomic_uint32.value, ecx
// eax is the return value, this is documented to work - edward
}
#pragma warning(pop)
#endif
#else
#error No compiler defined for Atomic_ReadIfEqualWrite
#endif
}
#define Atomic_ReadIfEqualWrite32 Atomic_ReadIfEqualWrite
#if defined(__x86_64__) || defined(VM_ARM_V7)
/*
*-----------------------------------------------------------------------------
*
* Atomic_ReadIfEqualWrite64 --
*
* Compare exchange: Read variable, if equal to oldVal, write newVal
*
* Results:
* The value of the atomic variable before the write.
*
* Side effects:
* The variable may be modified.
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
Atomic_ReadIfEqualWrite64(Atomic_uint64 *var, // IN
uint64 oldVal, // IN
uint64 newVal) // IN
{
#if defined(__GNUC__)
#ifdef VM_ARM_V7
register uint64 retVal;
register uint32 res;
dmb();
__asm__ __volatile__(
"1: ldrexd %[retVal], %H[retVal], [%[var]] \n\t"
"mov %[res], #0 \n\t"
"teq %[retVal], %[oldVal] \n\t"
"teqeq %H[retVal], %H[oldVal] \n\t"
"strexdeq %[res], %[newVal], %H[newVal], [%[var]] \n\t"
"teq %[res], #0 \n\t"
"bne 1b"
: [retVal] "=&r" (retVal), [res] "=&r" (res)
: [var] "r" (&var->value), [oldVal] "r" (oldVal), [newVal] "r" (newVal)
: "cc"
);
dmb();
return retVal;
#else // VM_ARM_V7 (assume x86*)
uint64 val;
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; cmpxchgq %2, %1"
: "=a" (val),
"+m" (var->value)
: "r" (newVal),
"0" (oldVal)
: "cc"
);
AtomicEpilogue();
return val;
#endif //VM_ARM_V7
#elif defined _MSC_VER
return _InterlockedCompareExchange64((__int64 *)&var->value,
(__int64)newVal,
(__int64)oldVal);
#else
#error No compiler defined for Atomic_ReadIfEqualWrite64
#endif
}
#endif
/*
*-----------------------------------------------------------------------------
*
* Atomic_And --
*
* Atomic read, bitwise AND with a value, write.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_And(Atomic_uint32 *var, // IN
uint32 val) // IN
{
#ifdef FAKE_ATOMIC
var->value &= val;
#elif defined(__GNUC__)
#ifdef VM_ARM_V7
register volatile uint32 res;
register volatile uint32 tmp;
dmb();
__asm__ __volatile__(
"1: ldrex %[tmp], [%[var]] \n\t"
"and %[tmp], %[tmp], %[val] \n\t"
"strex %[res], %[tmp], [%[var]] \n\t"
"teq %[res], #0 \n\t"
"bne 1b"
: [res] "=&r" (res), [tmp] "=&r" (tmp)
: [var] "r" (&var->value), [val] "r" (val)
: "cc"
);
dmb();
#else /* VM_ARM_V7 */
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; andl %1, %0"
: "+m" (var->value)
: "ri" (val)
: "cc"
);
AtomicEpilogue();
#endif // VM_ARM_V7
#elif defined _MSC_VER
#if defined(__x86_64__)
_InterlockedAnd((long *)&var->value, (long)val);
#else
__asm mov eax, val
__asm mov ebx, var
__asm lock and [ebx]Atomic_uint32.value, eax
#endif
#else
#error No compiler defined for Atomic_And
#endif
}
#define Atomic_And32 Atomic_And
/*
*-----------------------------------------------------------------------------
*
* Atomic_Or --
*
* Atomic read, bitwise OR with a value, write.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_Or(Atomic_uint32 *var, // IN
uint32 val) // IN
{
#ifdef FAKE_ATOMIC
var->value |= val;
#elif defined(__GNUC__)
#ifdef VM_ARM_V7
register volatile uint32 res;
register volatile uint32 tmp;
dmb();
__asm__ __volatile__(
"1: ldrex %[tmp], [%[var]] \n\t"
"orr %[tmp], %[tmp], %[val] \n\t"
"strex %[res], %[tmp], [%[var]] \n\t"
"teq %[res], #0 \n\t"
"bne 1b"
: [res] "=&r" (res), [tmp] "=&r" (tmp)
: [var] "r" (&var->value), [val] "r" (val)
: "cc"
);
dmb();
#else // VM_ARM_V7
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; orl %1, %0"
: "+m" (var->value)
: "ri" (val)
: "cc"
);
AtomicEpilogue();
#endif // VM_ARM_V7
#elif defined _MSC_VER
#if defined(__x86_64__)
_InterlockedOr((long *)&var->value, (long)val);
#else
__asm mov eax, val
__asm mov ebx, var
__asm lock or [ebx]Atomic_uint32.value, eax
#endif
#else
#error No compiler defined for Atomic_Or
#endif
}
#define Atomic_Or32 Atomic_Or
/*
*-----------------------------------------------------------------------------
*
* Atomic_Xor --
*
* Atomic read, bitwise XOR with a value, write.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_Xor(Atomic_uint32 *var, // IN
uint32 val) // IN
{
#ifdef FAKE_ATOMIC
var->value ^= val;
#elif defined(__GNUC__)
#ifdef VM_ARM_V7
register volatile uint32 res;
register volatile uint32 tmp;
dmb();
__asm__ __volatile__(
"1: ldrex %[tmp], [%[var]] \n\t"
"eor %[tmp], %[tmp], %[val] \n\t"
"strex %[res], %[tmp], [%[var]] \n\t"
"teq %[res], #0 \n\t"
"bne 1b"
: [res] "=&r" (res), [tmp] "=&r" (tmp)
: [var] "r" (&var->value), [val] "r" (val)
: "cc"
);
dmb();
#else // VM_ARM_V7
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; xorl %1, %0"
: "+m" (var->value)
: "ri" (val)
: "cc"
);
AtomicEpilogue();
#endif // VM_ARM_V7
#elif defined _MSC_VER
#if defined(__x86_64__)
_InterlockedXor((long *)&var->value, (long)val);
#else
__asm mov eax, val
__asm mov ebx, var
__asm lock xor [ebx]Atomic_uint32.value, eax
#endif
#else
#error No compiler defined for Atomic_Xor
#endif
}
#define Atomic_Xor32 Atomic_Xor
#if defined(__x86_64__)
/*
*-----------------------------------------------------------------------------
*
* Atomic_Xor64 --
*
* Atomic read, bitwise XOR with a value, write.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_Xor64(Atomic_uint64 *var, // IN
uint64 val) // IN
{
#if defined(__GNUC__)
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; xorq %1, %0"
: "+m" (var->value)
: "ri" (val)
: "cc"
);
AtomicEpilogue();
#elif defined _MSC_VER
_InterlockedXor64((__int64 *)&var->value, (__int64)val);
#else
#error No compiler defined for Atomic_Xor64
#endif
}
#endif
/*
*-----------------------------------------------------------------------------
*
* Atomic_Add --
*
* Atomic read, add a value, write.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_Add(Atomic_uint32 *var, // IN
uint32 val) // IN
{
#ifdef FAKE_ATOMIC
var->value += val;
#elif defined(__GNUC__)
#ifdef VM_ARM_V7
register volatile uint32 res;
register volatile uint32 tmp;
dmb();
__asm__ __volatile__(
"1: ldrex %[tmp], [%[var]] \n\t"
"add %[tmp], %[tmp], %[val] \n\t"
"strex %[res], %[tmp], [%[var]] \n\t"
"teq %[res], #0 \n\t"
"bne 1b"
: [res] "=&r" (res), [tmp] "=&r" (tmp)
: [var] "r" (&var->value), [val] "r" (val)
: "cc"
);
dmb();
#else // VM_ARM_V7
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; addl %1, %0"
: "+m" (var->value)
: "ri" (val)
: "cc"
);
AtomicEpilogue();
#endif // VM_ARM_V7
#elif defined _MSC_VER
#if _MSC_VER >= 1310
_InterlockedExchangeAdd((long *)&var->value, (long)val);
#else
__asm mov eax, val
__asm mov ebx, var
__asm lock add [ebx]Atomic_uint32.value, eax
#endif
#else
#error No compiler defined for Atomic_Add
#endif
}
#define Atomic_Add32 Atomic_Add
#if defined(__x86_64__)
/*
*-----------------------------------------------------------------------------
*
* Atomic_Add64 --
*
* Atomic read, add a value, write.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_Add64(Atomic_uint64 *var, // IN
uint64 val) // IN
{
#if defined(__GNUC__)
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; addq %1, %0"
: "+m" (var->value)
: "ri" (val)
: "cc"
);
AtomicEpilogue();
#elif defined _MSC_VER
_InterlockedExchangeAdd64((__int64 *)&var->value, (__int64)val);
#else
#error No compiler defined for Atomic_Add64
#endif
}
#endif
/*
*-----------------------------------------------------------------------------
*
* Atomic_Sub --
*
* Atomic read, subtract a value, write.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_Sub(Atomic_uint32 *var, // IN
uint32 val) // IN
{
#ifdef FAKE_ATOMIC
var->value -= val;
#elif defined(__GNUC__)
#ifdef VM_ARM_V7
register volatile uint32 res;
register volatile uint32 tmp;
dmb();
__asm__ __volatile__(
"1: ldrex %[tmp], [%[var]] \n\t"
"sub %[tmp], %[tmp], %[val] \n\t"
"strex %[res], %[tmp], [%[var]] \n\t"
"teq %[res], #0 \n\t"
"bne 1b"
: [res] "=&r" (res), [tmp] "=&r" (tmp)
: [var] "r" (&var->value), [val] "r" (val)
: "cc"
);
dmb();
#else // VM_ARM_V7
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; subl %1, %0"
: "+m" (var->value)
: "ri" (val)
: "cc"
);
AtomicEpilogue();
#endif // VM_ARM_V7
#elif defined _MSC_VER
#if _MSC_VER >= 1310
_InterlockedExchangeAdd((long *)&var->value, (long)-val);
#else
__asm mov eax, val
__asm mov ebx, var
__asm lock sub [ebx]Atomic_uint32.value, eax
#endif
#else
#error No compiler defined for Atomic_Sub
#endif
}
#define Atomic_Sub32 Atomic_Sub
#if defined(__x86_64__)
/*
*-----------------------------------------------------------------------------
*
* Atomic_Sub64 --
*
* Atomic read, subtract a value, write.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_Sub64(Atomic_uint64 *var, // IN
uint64 val) // IN
{
#ifdef __GNUC__
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; subq %1, %0"
: "+m" (var->value)
: "ri" (val)
: "cc"
);
AtomicEpilogue();
#elif defined _MSC_VER
_InterlockedExchangeAdd64((__int64 *)&var->value, (__int64)-val);
#else
#error No compiler defined for Atomic_Sub64
#endif
}
#endif
/*
*-----------------------------------------------------------------------------
*
* Atomic_Inc --
*
* Atomic read, increment, write.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_Inc(Atomic_uint32 *var) // IN
{
#ifdef __GNUC__
#if defined(VM_ARM_V7) || defined(FAKE_ATOMIC)
Atomic_Add(var, 1);
#else // VM_ARM_V7
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; incl %0"
: "+m" (var->value)
:
: "cc"
);
AtomicEpilogue();
#endif // VM_ARM_V7
#elif defined _MSC_VER
#if _MSC_VER >= 1310
_InterlockedIncrement((long *)&var->value);
#else
__asm mov ebx, var
__asm lock inc [ebx]Atomic_uint32.value
#endif
#else
#error No compiler defined for Atomic_Inc
#endif
}
#define Atomic_Inc32 Atomic_Inc
/*
*-----------------------------------------------------------------------------
*
* Atomic_Dec --
*
* Atomic read, decrement, write.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_Dec(Atomic_uint32 *var) // IN
{
#ifdef __GNUC__
#if defined(VM_ARM_V7) || defined(FAKE_ATOMIC)
Atomic_Sub(var, 1);
#else // VM_ARM_V7
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; decl %0"
: "+m" (var->value)
:
: "cc"
);
AtomicEpilogue();
#endif // VM_ARM_V7
#elif defined _MSC_VER
#if _MSC_VER >= 1310
_InterlockedDecrement((long *)&var->value);
#else
__asm mov ebx, var
__asm lock dec [ebx]Atomic_uint32.value
#endif
#else
#error No compiler defined for Atomic_Dec
#endif
}
#define Atomic_Dec32 Atomic_Dec
/*
* Note that the technique below can be used to implement ReadX(), where X is
* an arbitrary mathematical function.
*/
/*
*-----------------------------------------------------------------------------
*
* Atomic_FetchAndOr --
*
* Atomic read (returned), bitwise OR with a value, write.
*
* Results:
* The value of the variable before the operation.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE uint32
Atomic_FetchAndOr(Atomic_uint32 *var, // IN
uint32 val) // IN
{
uint32 res;
do {
res = Atomic_Read(var);
} while (res != Atomic_ReadIfEqualWrite(var, res, res | val));
return res;
}
/*
*-----------------------------------------------------------------------------
*
* Atomic_FetchAndAnd --
*
* Atomic read (returned), bitwise And with a value, write.
*
* Results:
* The value of the variable before the operation.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE uint32
Atomic_FetchAndAnd(Atomic_uint32 *var, // IN
uint32 val) // IN
{
uint32 res;
do {
res = Atomic_Read(var);
} while (res != Atomic_ReadIfEqualWrite(var, res, res & val));
return res;
}
#define Atomic_ReadOr32 Atomic_FetchAndOr
#if defined(__x86_64__)
/*
*-----------------------------------------------------------------------------
*
* Atomic_ReadOr64 --
*
* Atomic read (returned), bitwise OR with a value, write.
*
* Results:
* The value of the variable before the operation.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
Atomic_ReadOr64(Atomic_uint64 *var, // IN
uint64 val) // IN
{
uint64 res;
do {
res = var->value;
} while (res != Atomic_ReadIfEqualWrite64(var, res, res | val));
return res;
}
/*
*-----------------------------------------------------------------------------
*
* Atomic_ReadAnd64 --
*
* Atomic read (returned), bitwise AND with a value, write.
*
* Results:
* The value of the variable before the operation.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
Atomic_ReadAnd64(Atomic_uint64 *var, // IN
uint64 val) // IN
{
uint64 res;
do {
res = var->value;
} while (res != Atomic_ReadIfEqualWrite64(var, res, res & val));
return res;
}
#endif // __x86_64__
/*
*-----------------------------------------------------------------------------
*
* Atomic_FetchAndAddUnfenced --
*
* Atomic read (returned), add a value, write.
*
* If you have to implement FetchAndAdd() on an architecture other than
* x86 or x86-64, you might want to consider doing something similar to
* Atomic_FetchAndOr().
*
* The "Unfenced" version of Atomic_FetchAndInc never executes
* "lfence" after the interlocked operation.
*
* Results:
* The value of the variable before the operation.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE uint32
Atomic_FetchAndAddUnfenced(Atomic_uint32 *var, // IN
uint32 val) // IN
{
#ifdef FAKE_ATOMIC
uint32 res = var->value;
var->value = res + val;
return res;
#elif defined(__GNUC__)
#ifdef VM_ARM_V7
register volatile uint32 res;
register volatile uint32 retVal;
register volatile uint32 tmp;
dmb();
__asm__ __volatile__(
"1: ldrex %[retVal], [%[var]] \n\t"
"add %[tmp], %[val], %[retVal] \n\t"
"strex %[res], %[tmp], [%[var]] \n\t"
"teq %[res], #0 \n\t"
"bne 1b"
: [tmp] "=&r" (tmp), [res] "=&r" (res), [retVal] "=&r" (retVal)
: [var] "r" (&var->value), [val] "r" (val)
: "cc"
);
dmb();
return retVal;
#else // VM_ARM_V7
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; xaddl %0, %1"
: "=r" (val),
"+m" (var->value)
: "0" (val)
: "cc"
);
return val;
#endif // VM_ARM_V7
#elif defined _MSC_VER
#if _MSC_VER >= 1310
return _InterlockedExchangeAdd((long *)&var->value, (long)val);
#else
#pragma warning(push)
#pragma warning(disable : 4035) // disable no-return warning
{
__asm mov eax, val
__asm mov ebx, var
__asm lock xadd [ebx]Atomic_uint32.value, eax
}
#pragma warning(pop)
#endif
#else
#error No compiler defined for Atomic_FetchAndAdd
#endif
}
#define Atomic_ReadAdd32 Atomic_FetchAndAdd
/*
*-----------------------------------------------------------------------------
*
* Atomic_FetchAndAdd --
*
* Atomic read (returned), add a value, write.
*
* If you have to implement FetchAndAdd() on an architecture other than
* x86 or x86-64, you might want to consider doing something similar to
* Atomic_FetchAndOr().
*
* Unlike "Unfenced" version, this one may execute the "lfence" after
* interlocked operation.
*
* Results:
* The value of the variable before the operation.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE uint32
Atomic_FetchAndAdd(Atomic_uint32 *var, // IN
uint32 val) // IN
{
#if defined(__GNUC__) && !defined(VM_ARM_V7) && !defined(FAKE_ATOMIC)
val = Atomic_FetchAndAddUnfenced(var, val);
AtomicEpilogue();
return val;
#else
return Atomic_FetchAndAddUnfenced(var, val);
#endif
}
#if defined(__x86_64__)
/*
*-----------------------------------------------------------------------------
*
* Atomic_ReadAdd64 --
*
* Atomic read (returned), add a value, write.
*
* Results:
* The value of the variable before the operation.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
Atomic_ReadAdd64(Atomic_uint64 *var, // IN
uint64 val) // IN
{
#if defined(__GNUC__)
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; xaddq %0, %1"
: "=r" (val),
"+m" (var->value)
: "0" (val)
: "cc"
);
AtomicEpilogue();
return val;
#elif defined _MSC_VER
return _InterlockedExchangeAdd64((__int64 *)&var->value, (__int64)val);
#else
#error No compiler defined for Atomic_ReadAdd64
#endif
}
/*
*-----------------------------------------------------------------------------
*
* Atomic_ReadSub64 --
*
* Atomic read (returned), sub a value, write.
*
* Results:
* The value of the variable before the operation.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
Atomic_ReadSub64(Atomic_uint64 *var, // IN
uint64 val) // IN
{
// Do an sub by an add and a overflow
return Atomic_ReadAdd64(var, -val);
}
#endif
/*
*-----------------------------------------------------------------------------
*
* Atomic_FetchAndInc --
*
* Atomic read (returned), increment, write.
*
* Results:
* The value of the variable before the operation.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE uint32
Atomic_FetchAndInc(Atomic_uint32 *var) // IN
{
return Atomic_FetchAndAdd(var, 1);
}
#define Atomic_ReadInc32 Atomic_FetchAndInc
#if defined(__x86_64__)
/*
*-----------------------------------------------------------------------------
*
* Atomic_ReadInc64 --
*
* Atomic read (returned), increment, write.
*
* Results:
* The value of the variable before the operation.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
Atomic_ReadInc64(Atomic_uint64 *var) // IN
{
return Atomic_ReadAdd64(var, 1);
}
#endif
/*
*-----------------------------------------------------------------------------
*
* Atomic_FetchAndDec --
*
* Atomic read (returned), decrement, write.
*
* Results:
* The value of the variable before the operation.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE uint32
Atomic_FetchAndDec(Atomic_uint32 *var) // IN
{
return Atomic_FetchAndAdd(var, (uint32)-1);
}
#define Atomic_ReadDec32 Atomic_FetchAndDec
#if defined(__x86_64__)
/*
*-----------------------------------------------------------------------------
*
* Atomic_ReadDec64 --
*
* Atomic read (returned), decrement, write.
*
* Results:
* The value of the variable before the operation.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
Atomic_ReadDec64(Atomic_uint64 *var) // IN
{
return Atomic_ReadAdd64(var, CONST64U(-1));
}
#endif
#ifdef VMKERNEL
/*
*-----------------------------------------------------------------------------
*
* CMPXCHG1B --
*
* Compare and exchange a single byte.
*
* Results:
* The value read from ptr.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE uint8
CMPXCHG1B(volatile uint8 *ptr, // IN
uint8 oldVal, // IN
uint8 newVal) // IN
{
uint8 val;
__asm__ __volatile__("lock; cmpxchgb %b2, %1"
: "=a" (val),
"+m" (*ptr)
: "r" (newVal),
"0" (oldVal)
: "cc");
return val;
}
#endif
/*
* Usage of this helper struct is strictly reserved to the following
* function. --hpreg
*/
typedef struct {
uint32 lowValue;
uint32 highValue;
} S_uint64;
/*
*-----------------------------------------------------------------------------
*
* Atomic_CMPXCHG64 --
*
* Compare exchange: Read variable, if equal to oldVal, write newVal
*
* XXX: Ensure that if this function is to be inlined by gcc, it is
* compiled with -fno-strict-aliasing. Otherwise it will break.
* Unfortunately we know that gcc 2.95.3 (used to build the FreeBSD 3.2
* Tools) does not honor -fno-strict-aliasing. As a workaround, we avoid
* inlining the function entirely for versions of gcc under 3.0.
*
* Results:
* TRUE if equal, FALSE if not equal
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
#if defined(__GNUC__) && __GNUC__ < 3
static Bool
#else
static INLINE Bool
#endif
Atomic_CMPXCHG64(Atomic_uint64 *var, // IN/OUT
uint64 const *oldVal, // IN
uint64 const *newVal) // IN
{
#ifdef FAKE_ATOMIC
uint64 readVal = var->value;
if (*oldVal == readVal) {
var->value = *newVal;
}
return (*oldVal == readVal);
#elif defined(__GNUC__)
#if defined(VM_ARM_V7)
return (Atomic_ReadIfEqualWrite64(var, *oldVal, *newVal) == *oldVal);
#else // VM_ARM_V7
Bool equal;
/* Checked against the Intel manual and GCC --walken */
#if defined(__x86_64__)
uint64 dummy;
__asm__ __volatile__(
"lock; cmpxchgq %3, %0" "\n\t"
"sete %1"
: "+m" (*var),
"=qm" (equal),
"=a" (dummy)
: "r" (*newVal),
"2" (*oldVal)
: "cc"
);
#else /* 32-bit version for non-ARM */
int dummy1, dummy2;
# if defined __PIC__
/*
* Rules for __asm__ statements in __PIC__ code
* --------------------------------------------
*
* The compiler uses %ebx for __PIC__ code, so an __asm__ statement cannot
* clobber %ebx. The __asm__ statement can temporarily modify %ebx, but _for
* each parameter that is used while %ebx is temporarily modified_:
*
* 1) The constraint cannot be "m", because the memory location the compiler
* chooses could then be relative to %ebx.
*
* 2) The constraint cannot be a register class which contains %ebx (such as
* "r" or "q"), because the register the compiler chooses could then be
* %ebx. (This happens when compiling the Fusion UI with gcc 4.2.1, Apple
* build 5577.)
*
* 3) Using register classes even for other values is problematic, as gcc
* can decide e.g. %ecx == %edi == 0 (as compile-time constants) and
* ends up using one register for two things. Which breaks xchg's ability
* to temporarily put the PIC pointer somewhere else. PR772455
*
* For that reason alone, the __asm__ statement should keep the regions
* where it temporarily modifies %ebx as small as possible, and should
* prefer specific register assignments.
*/
# if __GNUC__ < 3 // Part of #188541 - for RHL 6.2 etc.
__asm__ __volatile__(
"xchg %%ebx, %6" "\n\t"
"mov 4(%%ebx), %%ecx" "\n\t"
"mov (%%ebx), %%ebx" "\n\t"
"lock; cmpxchg8b (%3)" "\n\t"
"xchg %%ebx, %6" "\n\t"
"sete %0"
: "=a" (equal),
"=d" (dummy2),
"=D" (dummy1)
: /*
* See the "Rules for __asm__ statements in __PIC__ code" above: %3
* must use a register class which does not contain %ebx.
*/
"S" (var),
"0" (((S_uint64 const *)oldVal)->lowValue),
"1" (((S_uint64 const *)oldVal)->highValue),
"D" (newVal)
: "ecx", "cc", "memory"
);
# else
__asm__ __volatile__(
"xchgl %%ebx, %6" "\n\t"
"lock; cmpxchg8b (%3)" "\n\t"
"xchgl %%ebx, %6" "\n\t"
"sete %0"
: "=qm" (equal),
"=a" (dummy1),
"=d" (dummy2)
: /*
* See the "Rules for __asm__ statements in __PIC__ code" above: %3
* must use a register class which does not contain %ebx.
* "a"/"c"/"d" are already used, so we are left with either "S" or "D".
*
* Note that this assembly uses ALL GP registers (with %esp reserved for
* stack, %ebp reserved for frame, %ebx reserved for PIC).
*/
"S" (var),
"1" (((S_uint64 const *)oldVal)->lowValue),
"2" (((S_uint64 const *)oldVal)->highValue),
"D" (((S_uint64 const *)newVal)->lowValue),
"c" (((S_uint64 const *)newVal)->highValue)
: "cc", "memory"
);
# endif
# else
__asm__ __volatile__(
"lock; cmpxchg8b %0" "\n\t"
"sete %1"
: "+m" (*var),
"=qm" (equal),
"=a" (dummy1),
"=d" (dummy2)
: "2" (((S_uint64 const *)oldVal)->lowValue),
"3" (((S_uint64 const *)oldVal)->highValue),
"b" (((S_uint64 const *)newVal)->lowValue),
"c" (((S_uint64 const *)newVal)->highValue)
: "cc"
);
# endif
#endif
AtomicEpilogue();
return equal;
#endif //VM_ARM_V7
#elif defined _MSC_VER
#if defined(__x86_64__)
return (__int64)*oldVal == _InterlockedCompareExchange64((__int64 *)&var->value,
(__int64)*newVal,
(__int64)*oldVal);
#else
#pragma warning(push)
#pragma warning(disable : 4035) // disable no-return warning
{
__asm mov esi, var
__asm mov edx, oldVal
__asm mov ecx, newVal
__asm mov eax, [edx]S_uint64.lowValue
__asm mov edx, [edx]S_uint64.highValue
__asm mov ebx, [ecx]S_uint64.lowValue
__asm mov ecx, [ecx]S_uint64.highValue
__asm lock cmpxchg8b [esi]
__asm sete al
__asm movzx eax, al
// eax is the return value, this is documented to work - edward
}
#pragma warning(pop)
#endif
#else
#error No compiler defined for Atomic_CMPXCHG64
#endif // !GNUC
}
/*
*-----------------------------------------------------------------------------
*
* Atomic_CMPXCHG32 --
*
* Compare exchange: Read variable, if equal to oldVal, write newVal
*
* Results:
* TRUE if equal, FALSE if not equal
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE Bool
Atomic_CMPXCHG32(Atomic_uint32 *var, // IN/OUT
uint32 oldVal, // IN
uint32 newVal) // IN
{
#ifdef FAKE_ATOMIC
uint32 readVal = var->value;
if (oldVal == readVal) {
var->value = newVal;
}
return (oldVal == readVal);
#elif defined(__GNUC__)
#ifdef VM_ARM_V7
return (Atomic_ReadIfEqualWrite(var, oldVal, newVal) == oldVal);
#else // VM_ARM_V7
Bool equal;
uint32 dummy;
__asm__ __volatile__(
"lock; cmpxchgl %3, %0" "\n\t"
"sete %1"
: "+m" (*var),
"=qm" (equal),
"=a" (dummy)
: "r" (newVal),
"2" (oldVal)
: "cc"
);
AtomicEpilogue();
return equal;
#endif // VM_ARM_V7
#else // defined(__GNUC__)
return (Atomic_ReadIfEqualWrite(var, oldVal, newVal) == oldVal);
#endif // !defined(__GNUC__)
}
/*
*-----------------------------------------------------------------------------
*
* Atomic_Read64 --
*
* Read and return.
*
* Results:
* The value of the atomic variable.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
Atomic_Read64(Atomic_uint64 const *var) // IN
{
#ifdef FAKE_ATOMIC
return var->value;
#elif defined(__GNUC__) && defined(__x86_64__)
uint64 value;
#ifdef VMM
ASSERT((uintptr_t)var % 8 == 0);
#endif
/*
* Use asm to ensure we emit a single load.
*/
__asm__ __volatile__(
"movq %1, %0"
: "=r" (value)
: "m" (var->value)
);
return value;
#elif defined(__GNUC__) && defined(__i386__)
uint64 value;
/*
* Since cmpxchg8b will replace the contents of EDX:EAX with the
* value in memory if there is no match, we need only execute the
* instruction once in order to atomically read 64 bits from
* memory. The only constraint is that ECX:EBX must have the same
* value as EDX:EAX so that if the comparison succeeds. We
* intentionally don't tell gcc that we are using ebx and ecx as we
* don't modify them and do not care what value they store.
*/
__asm__ __volatile__(
"mov %%ebx, %%eax" "\n\t"
"mov %%ecx, %%edx" "\n\t"
"lock; cmpxchg8b %1"
: "=&A" (value)
: "m" (*var)
: "cc"
);
AtomicEpilogue();
return value;
#elif defined (_MSC_VER) && defined(__x86_64__)
/*
* Microsoft docs guarantee "Simple reads and writes to properly
* aligned 64-bit variables are atomic on 64-bit Windows."
* http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx
*
* XXX Verify that value is properly aligned. Bug 61315.
*/
return var->value;
#elif defined (_MSC_VER) && defined(__i386__)
# pragma warning(push)
# pragma warning(disable : 4035) // disable no-return warning
{
__asm mov ecx, var
__asm mov edx, ecx
__asm mov eax, ebx
__asm lock cmpxchg8b [ecx]
// edx:eax is the return value; this is documented to work. --mann
}
# pragma warning(pop)
#elif defined(__GNUC__) && defined (VM_ARM_V7)
uint64 value;
__asm__ __volatile__(
"ldrexd %[value], %H[value], [%[var]] \n\t"
: [value] "=&r" (value)
: [var] "r" (&var->value)
);
return value;
#endif
}
/*
*----------------------------------------------------------------------
*
* Atomic_ReadUnaligned64 --
*
* Atomically read a 64 bit integer, possibly misaligned.
* This function can be *very* expensive, costing over 50 kcycles
* on Nehalem.
*
* Note that "var" needs to be writable, even though it will not
* be modified.
*
* Results:
* The value of the atomic variable.
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
#if defined(__x86_64__)
static INLINE uint64
Atomic_ReadUnaligned64(Atomic_uint64 const *var)
{
return Atomic_ReadIfEqualWrite64((Atomic_uint64*)var, 0, 0);
}
#endif
/*
*----------------------------------------------------------------------
*
* Atomic_FetchAndAdd64 --
*
* Atomically adds a 64-bit integer to another
*
* Results:
* Returns the old value just prior to the addition
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE uint64
Atomic_FetchAndAdd64(Atomic_uint64 *var, // IN/OUT
uint64 addend) // IN
{
uint64 oldVal;
uint64 newVal;
do {
oldVal = var->value;
newVal = oldVal + addend;
} while (!Atomic_CMPXCHG64(var, &oldVal, &newVal));
return oldVal;
}
/*
*----------------------------------------------------------------------
*
* Atomic_FetchAndInc64 --
*
* Atomically increments a 64-bit integer
*
* Results:
* Returns the old value just prior to incrementing
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE uint64
Atomic_FetchAndInc64(Atomic_uint64 *var) // IN/OUT
{
return Atomic_FetchAndAdd64(var, 1);
}
/*
*----------------------------------------------------------------------
*
* Atomic_FetchAndDec64 --
*
* Atomically decrements a 64-bit integer
*
* Results:
* Returns the old value just prior to decrementing
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE uint64
Atomic_FetchAndDec64(Atomic_uint64 *var) // IN/OUT
{
uint64 oldVal;
uint64 newVal;
do {
oldVal = var->value;
newVal = oldVal - 1;
} while (!Atomic_CMPXCHG64(var, &oldVal, &newVal));
return oldVal;
}
/*
*-----------------------------------------------------------------------------
*
* Atomic_Inc64 --
*
* Atomic read, increment, write.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_Inc64(Atomic_uint64 *var) // IN
{
#if !defined(__x86_64__)
Atomic_FetchAndInc64(var);
#elif defined(__GNUC__)
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; incq %0"
: "+m" (var->value)
:
: "cc"
);
AtomicEpilogue();
#elif defined _MSC_VER
_InterlockedIncrement64((__int64 *)&var->value);
#else
#error No compiler defined for Atomic_Inc64
#endif
}
/*
*-----------------------------------------------------------------------------
*
* Atomic_Dec64 --
*
* Atomic read, decrement, write.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_Dec64(Atomic_uint64 *var) // IN
{
#if !defined(__x86_64__)
Atomic_FetchAndDec64(var);
#elif defined(__GNUC__)
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; decq %0"
: "+m" (var->value)
:
: "cc"
);
AtomicEpilogue();
#elif defined _MSC_VER
_InterlockedDecrement64((__int64 *)&var->value);
#else
#error No compiler defined for Atomic_Dec64
#endif
}
/*
*-----------------------------------------------------------------------------
*
* Atomic_ReadWrite64 --
*
* Read followed by write
*
* Results:
* The value of the atomic variable before the write.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
Atomic_ReadWrite64(Atomic_uint64 *var, // IN
uint64 val) // IN
{
#if defined(__x86_64__)
#if defined(__GNUC__)
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"xchgq %0, %1"
: "=r" (val),
"+m" (var->value)
: "0" (val)
);
AtomicEpilogue();
return val;
#elif defined _MSC_VER
return _InterlockedExchange64((__int64 *)&var->value, (__int64)val);
#else
#error No compiler defined for Atomic_ReadWrite64
#endif
#else
uint64 oldVal;
do {
oldVal = var->value;
} while (!Atomic_CMPXCHG64(var, &oldVal, &val));
return oldVal;
#endif
}
/*
*-----------------------------------------------------------------------------
*
* Atomic_Write64 --
*
* Write
*
* Results:
* None.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_Write64(Atomic_uint64 *var, // IN
uint64 val) // IN
{
#if defined(__x86_64__)
#if defined(__GNUC__)
#ifdef VMM
ASSERT((uintptr_t)var % 8 == 0);
#endif
/*
* There is no move instruction for 64-bit immediate to memory, so unless
* the immediate value fits in 32-bit (i.e. can be sign-extended), GCC
* breaks the assignment into two movl instructions. The code below forces
* GCC to load the immediate value into a register first.
*/
__asm__ __volatile__(
"movq %1, %0"
: "=m" (var->value)
: "r" (val)
);
#elif defined _MSC_VER
/*
* Microsoft docs guarantee "Simple reads and writes to properly aligned
* 64-bit variables are atomic on 64-bit Windows."
* http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx
*
* XXX Verify that value is properly aligned. Bug 61315.
*/
var->value = val;
#else
#error No compiler defined for Atomic_Write64
#endif
#else /* defined(__x86_64__) */
(void)Atomic_ReadWrite64(var, val);
#endif
}
/*
*-----------------------------------------------------------------------------
*
* Atomic_Or64 --
*
* Atomic read, bitwise OR with a 64-bit value, write.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_Or64(Atomic_uint64 *var, // IN
uint64 val) // IN
{
#if defined(__x86_64__)
#if defined(__GNUC__)
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; orq %1, %0"
: "+m" (var->value)
: "ri" (val)
: "cc"
);
AtomicEpilogue();
#elif defined _MSC_VER
_InterlockedOr64((__int64 *)&var->value, (__int64)val);
#else
#error No compiler defined for Atomic_Or64
#endif
#else // __x86_64__
uint64 oldVal;
uint64 newVal;
do {
oldVal = var->value;
newVal = oldVal | val;
} while (!Atomic_CMPXCHG64(var, &oldVal, &newVal));
#endif
}
/*
*-----------------------------------------------------------------------------
*
* Atomic_And64 --
*
* Atomic read, bitwise AND with a 64-bit value, write.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_And64(Atomic_uint64 *var, // IN
uint64 val) // IN
{
#if defined(__x86_64__)
#if defined(__GNUC__)
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; andq %1, %0"
: "+m" (var->value)
: "ri" (val)
: "cc"
);
AtomicEpilogue();
#elif defined _MSC_VER
_InterlockedAnd64((__int64 *)&var->value, (__int64)val);
#else
#error No compiler defined for Atomic_And64
#endif
#else // __x86_64__
uint64 oldVal;
uint64 newVal;
do {
oldVal = var->value;
newVal = oldVal & val;
} while (!Atomic_CMPXCHG64(var, &oldVal, &newVal));
#endif
}
/*
*-----------------------------------------------------------------------------
*
* Atomic_SetBit64 --
*
* Atomic read, set bit N, and write.
* Be careful: if bit is in a register (not in an immediate), then it
* can specify a bit offset above 63 or even a negative offset.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_SetBit64(Atomic_uint64 *var, // IN/OUT
uint64 bit) // IN
{
#if defined(__x86_64__)
#if defined(__GNUC__)
__asm__ __volatile__(
"lock; bts %1, %0"
: "+m" (var->value)
: "ri" (bit)
: "cc"
);
AtomicEpilogue();
#elif defined _MSC_VER
uint64 oldVal;
uint64 newVal;
do {
oldVal = var->value;
newVal = oldVal | (CONST64U(1) << bit);
} while (!Atomic_CMPXCHG64(var, &oldVal, &newVal));
#else
#error No compiler defined for Atomic_SetBit64
#endif
#else // __x86_64__
uint64 oldVal;
uint64 newVal;
do {
oldVal = var->value;
newVal = oldVal | (CONST64U(1) << bit);
} while (!Atomic_CMPXCHG64(var, &oldVal, &newVal));
#endif
}
/*
*-----------------------------------------------------------------------------
*
* Atomic_ClearBit64 --
*
* Atomic read, clear bit N, and write.
* Be careful: if bit is in a register (not in an immediate), then it
* can specify a bit offset above 63 or even a negative offset.
*
* Results:
* None
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_ClearBit64(Atomic_uint64 *var, // IN/OUT
uint64 bit) // IN
{
#if defined(__x86_64__)
#if defined(__GNUC__)
__asm__ __volatile__(
"lock; btr %1, %0"
: "+m" (var->value)
: "ri" (bit)
: "cc"
);
AtomicEpilogue();
#elif defined _MSC_VER
uint64 oldVal;
uint64 newVal;
do {
oldVal = var->value;
newVal = oldVal & ~(CONST64U(1) << bit);
} while (!Atomic_CMPXCHG64(var, &oldVal, &newVal));
#else
#error No compiler defined for Atomic_ClearBit64
#endif
#else // __x86_64__
uint64 oldVal;
uint64 newVal;
do {
oldVal = var->value;
newVal = oldVal & ~(CONST64U(1) << bit);
} while (!Atomic_CMPXCHG64(var, &oldVal, &newVal));
#endif
}
/*
*-----------------------------------------------------------------------------
*
* Atomic_TestBit64 --
*
* Read a bit.
* Be careful: if bit is in a register (not in an immediate), then it
* can specify a bit offset above 63 or even a negative offset.
*
* Results:
* TRUE if the tested bit was set; else FALSE.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE Bool
Atomic_TestBit64(Atomic_uint64 *var, // IN
uint64 bit) // IN
{
#if defined(__x86_64__)
#if defined(__GNUC__)
Bool out = FALSE;
__asm__ __volatile__(
"bt %2, %1; setc %0"
: "=rm"(out)
: "m" (var->value),
"rJ" (bit)
: "cc"
);
return out;
#elif defined _MSC_VER
return (var->value & (CONST64U(1) << bit)) != 0;
#else
#error No compiler defined for Atomic_TestBit64
#endif
#else // __x86_64__
return (var->value & (CONST64U(1) << bit)) != 0;
#endif
}
/*
* Template code for the Atomic_<name> type and its operators.
*
* The cast argument is an intermediate type cast to make some
* compilers stop complaining about casting uint32 <-> void *,
* even though we only do it in the 32-bit case so they are always
* the same size. So for val of type uint32, instead of
* (void *)val, we have (void *)(uintptr_t)val.
* The specific problem case is the Windows ddk compiler
* (as used by the SVGA driver). -- edward
*
* NOTE: See the comment in vm_assert.h for why we need UNUSED_TYPE in
* AtomicAssertOnCompile(), and why we need to be very careful doing so.
*/
#define MAKE_ATOMIC_TYPE(name, size, in, out, cast) \
typedef Atomic_uint ## size Atomic_ ## name; \
\
\
static INLINE void \
AtomicAssertOnCompile ## name(void) \
{ \
enum { AssertOnCompileMisused = 8 * sizeof (in) == size \
&& 8 * sizeof (out) == size \
&& 8 * sizeof (cast) == size \
? 1 : -1 }; \
UNUSED_TYPE(typedef char AssertOnCompileFailed[AssertOnCompileMisused]);\
} \
\
\
static INLINE out \
Atomic_Read ## name(Atomic_ ## name const *var) \
{ \
return (out)(cast)Atomic_Read ## size(var); \
} \
\
\
static INLINE void \
Atomic_Write ## name(Atomic_ ## name *var, \
in val) \
{ \
Atomic_Write ## size(var, (uint ## size)(cast)val); \
} \
\
\
static INLINE out \
Atomic_ReadWrite ## name(Atomic_ ## name *var, \
in val) \
{ \
return (out)(cast)Atomic_ReadWrite ## size(var, \
(uint ## size)(cast)val); \
} \
\
\
static INLINE out \
Atomic_ReadIfEqualWrite ## name(Atomic_ ## name *var, \
in oldVal, \
in newVal) \
{ \
return (out)(cast)Atomic_ReadIfEqualWrite ## size(var, \
(uint ## size)(cast)oldVal, (uint ## size)(cast)newVal); \
} \
\
\
static INLINE void \
Atomic_And ## name(Atomic_ ## name *var, \
in val) \
{ \
Atomic_And ## size(var, (uint ## size)(cast)val); \
} \
\
\
static INLINE void \
Atomic_Or ## name(Atomic_ ## name *var, \
in val) \
{ \
Atomic_Or ## size(var, (uint ## size)(cast)val); \
} \
\
\
static INLINE void \
Atomic_Xor ## name(Atomic_ ## name *var, \
in val) \
{ \
Atomic_Xor ## size(var, (uint ## size)(cast)val); \
} \
\
\
static INLINE void \
Atomic_Add ## name(Atomic_ ## name *var, \
in val) \
{ \
Atomic_Add ## size(var, (uint ## size)(cast)val); \
} \
\
\
static INLINE void \
Atomic_Sub ## name(Atomic_ ## name *var, \
in val) \
{ \
Atomic_Sub ## size(var, (uint ## size)(cast)val); \
} \
\
\
static INLINE void \
Atomic_Inc ## name(Atomic_ ## name *var) \
{ \
Atomic_Inc ## size(var); \
} \
\
\
static INLINE void \
Atomic_Dec ## name(Atomic_ ## name *var) \
{ \
Atomic_Dec ## size(var); \
} \
\
\
static INLINE out \
Atomic_ReadOr ## name(Atomic_ ## name *var, \
in val) \
{ \
return (out)(cast)Atomic_ReadOr ## size(var, (uint ## size)(cast)val); \
} \
\
\
static INLINE out \
Atomic_ReadAdd ## name(Atomic_ ## name *var, \
in val) \
{ \
return (out)(cast)Atomic_ReadAdd ## size(var, (uint ## size)(cast)val); \
} \
\
\
static INLINE out \
Atomic_ReadInc ## name(Atomic_ ## name *var) \
{ \
return (out)(cast)Atomic_ReadInc ## size(var); \
} \
\
\
static INLINE out \
Atomic_ReadDec ## name(Atomic_ ## name *var) \
{ \
return (out)(cast)Atomic_ReadDec ## size(var); \
}
/*
* Since we use a macro to generate these definitions, it is hard to look for
* them. So DO NOT REMOVE THIS COMMENT and keep it up-to-date. --hpreg
*
* Atomic_Ptr
* Atomic_ReadPtr --
* Atomic_WritePtr --
* Atomic_ReadWritePtr --
* Atomic_ReadIfEqualWritePtr --
* Atomic_AndPtr --
* Atomic_OrPtr --
* Atomic_XorPtr --
* Atomic_AddPtr --
* Atomic_SubPtr --
* Atomic_IncPtr --
* Atomic_DecPtr --
* Atomic_ReadOrPtr --
* Atomic_ReadAddPtr --
* Atomic_ReadIncPtr --
* Atomic_ReadDecPtr --
*
* Atomic_Int
* Atomic_ReadInt --
* Atomic_WriteInt --
* Atomic_ReadWriteInt --
* Atomic_ReadIfEqualWriteInt --
* Atomic_AndInt --
* Atomic_OrInt --
* Atomic_XorInt --
* Atomic_AddInt --
* Atomic_SubInt --
* Atomic_IncInt --
* Atomic_DecInt --
* Atomic_ReadOrInt --
* Atomic_ReadAddInt --
* Atomic_ReadIncInt --
* Atomic_ReadDecInt --
*/
#if defined(__x86_64__)
MAKE_ATOMIC_TYPE(Ptr, 64, void const *, void *, uintptr_t)
#else
MAKE_ATOMIC_TYPE(Ptr, 32, void const *, void *, uintptr_t)
#endif
MAKE_ATOMIC_TYPE(Int, 32, int, int, int)
/* Prevent the compiler from re-ordering memory references. */
#ifdef __GNUC__
#define ATOMIC_COMPILER_BARRIER() __asm__ __volatile__ ("": : :"memory")
#elif defined(_MSC_VER)
#define ATOMIC_COMPILER_BARRIER() _ReadWriteBarrier()
#else
#error No compiler defined for ATOMIC_COMPILER_BARRIER
#endif
/*
*-----------------------------------------------------------------------------
*
* Atomic_MFence --
*
* Implements mfence in terms of a lock xor. The reason for implementing
* our own mfence is that not all of our supported cpus have an assembly
* mfence (P3, Athlon). We put it here to avoid duplicating code which is
* also why it is prefixed with "Atomic_".
*
* Results:
* None.
*
* Side effects:
* Cause loads and stores prior to this to be globally
* visible.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Atomic_MFence(void)
{
Atomic_uint32 fence;
ATOMIC_COMPILER_BARRIER();
Atomic_Xor(&fence, 0x1);
ATOMIC_COMPILER_BARRIER();
}
#ifdef ATOMIC_COMPILER_BARRIER
#undef ATOMIC_COMPILER_BARRIER
#endif
#endif // ifndef _ATOMIC_H_
vmmemctl-only/shared/compat_spinlock.h 0000444 0000000 0000000 00000003377 12275350061 017174 0 ustar root root /*********************************************************
* Copyright (C) 2005 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_SPINLOCK_H__
# define __COMPAT_SPINLOCK_H__
#include <linux/spinlock.h>
/*
* Preempt support was added during 2.5.x development cycle, and later
* it was backported to 2.4.x. In 2.4.x backport these definitions
* live in linux/spinlock.h, that's why we put them here (in 2.6.x they
* are defined in linux/preempt.h which is included by linux/spinlock.h).
*/
#ifdef CONFIG_PREEMPT
#define compat_preempt_disable() preempt_disable()
#define compat_preempt_enable() preempt_enable()
#else
#define compat_preempt_disable() do { } while (0)
#define compat_preempt_enable() do { } while (0)
#endif
/* Some older kernels - 2.6.10 and earlier - lack DEFINE_SPINLOCK */
#ifndef DEFINE_SPINLOCK
#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
#endif
/* Same goes for DEFINE_RWLOCK */
#ifndef DEFINE_RWLOCK
#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
#endif
#endif /* __COMPAT_SPINLOCK_H__ */
vmmemctl-only/shared/autoconf/ 0000755 0000000 0000000 00000000000 12275351017 015446 5 ustar root root vmmemctl-only/shared/autoconf/getsb1.c 0000444 0000000 0000000 00000003076 12275350061 017001 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
#include <linux/fs.h>
/*
* Around 2.6.18, a pointer to a vfsmount was added to get_sb. Red Hat
* backported this behavior into a 2.6.17 kernel.
*
* This test will fail on a kernel with such a patch.
*/
static struct super_block * LinuxDriverGetSb(struct file_system_type *fs_type,
int flags,
const char *dev_name,
void *rawData)
{
return 0;
}
struct file_system_type fs_type = {
.get_sb = LinuxDriverGetSb
};
#else
#error "This test intentionally fails on 2.6.19 or newer kernels."
#endif
vmmemctl-only/shared/autoconf/inode1.c 0000444 0000000 0000000 00000002703 12275350061 016767 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
#include <linux/fs.h>
#include <linux/stddef.h> /* NULL */
/*
* After 2.6.18, inodes were "slimmed". This involved removing the union
* that encapsulates inode private data (and using i_private instead), as well
* as removing i_blksize. Red Hat backported this behavior into a 2.6.17
* kernel.
*
* This test will fail on a kernel with such a patch.
*/
void test(void)
{
struct inode inode;
inode.u.generic_ip = NULL;
}
#else
#error "This test intentionally fails on 2.6.20 and newer kernels."
#endif
vmmemctl-only/shared/autoconf/cachecreate.c 0000444 0000000 0000000 00000003210 12275350061 020031 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
/*
* All kernels before 2.6.22 take 6 arguments. All kernels since
* 2.6.23-rc1 take 5 arguments. Only kernels between 2.6.22 and
* 2.6.23-rc1 are questionable - we could ignore them if we wanted,
* nobody cares about them even now. But unfortunately RedHat is
* re-releasing 2.6.X-rc kernels under 2.6.(X-1) name, so they
* are releasing 2.6.23-rc1 as 2.6.22-5055-something, so we have
* to do autodetection for them.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)
/* Success... */
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
#error "This test intentionally fails on 2.6.23 and newer kernels."
#else
#include <linux/slab.h>
struct kmem_cache *kmemtest(void) {
return kmem_cache_create("test", 12, 0, 0, NULL, NULL);
}
#endif
vmmemctl-only/shared/autoconf/netcreate_num_params.c 0000444 0000000 0000000 00000003160 12275350061 022002 0 ustar root root /*********************************************************
* Copyright (C) 2010 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* During 2.6.33 merge window net_proto_ops->create() method was changed -
* a new 'kern' field, signalling whether socket is being created by kernel
* or userspace application, was added to it. Unfortunately, some
* distributions, such as RHEL 6, have backported the change to earlier
* kernels, so we can't rely solely on kernel version to determine number of
* arguments.
*/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
# error This compile test intentionally fails.
#else
#include <linux/net.h>
static int TestCreate(struct net *net,
struct socket *sock, int protocol,
int kern)
{
return 0;
}
struct net_proto_family testFamily = {
.create = TestCreate,
};
#endif
vmmemctl-only/shared/autoconf/cachector1.c 0000444 0000000 0000000 00000003075 12275350061 017627 0 ustar root root /*********************************************************
* Copyright (C) 2008 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
/*
* Between 2.6.27-rc1 and 2.6.27-rc2 ctor prototype was changed from
* ctor(cache, ptr) to ctor(ptr). Unfortunately there
* is no typedef for ctor, so we have to redefine kmem_cache_create
* to find out ctor prototype. If prototype matches, then this is old
* kernel.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
#error "This test intentionally fails on 2.6.28 and newer kernels."
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
#include <linux/slab.h>
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(struct kmem_cache *, void *));
#endif
vmmemctl-only/shared/autoconf/netif_num_params.c 0000444 0000000 0000000 00000003352 12275350061 021140 0 ustar root root /*********************************************************
* Copyright (C) 2009 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* Detect whether netif_rx_complete (and netif_rx_schedule) take a single
* napi_struct argument. The foundation was laid whith introducing Generic
* Receive Offload infrastructure but dropping unneeded net_device argument
* did not happen till few commits later so we can't simply test for presence
* of NETIF_F_GRO.
*
* Test succeeds if netif_rx_complete takes dev & napi arguments, or if it
* takes dev argument only (kernels before 2.6.24). Test fails if netif_rx_complete
* takes only single napi argument.
*/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
# error This compile test intentionally fails.
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
#include <linux/netdevice.h>
#ifdef NETIF_F_GRO
void test_netif_rx_complete(struct net_device *dev, struct napi_struct *napi)
{
netif_rx_complete(dev, napi);
}
#endif
#endif
vmmemctl-only/shared/autoconf/statfs1.c 0000444 0000000 0000000 00000002671 12275350061 017201 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
#include <linux/fs.h>
/*
* Around 2.6.18, the super_block pointer in statfs was changed to a dentry
* pointer. Red Hat backported this behavior into a 2.6.17 kernel.
*
* This test will fail on a kernel with such a patch.
*/
static int LinuxDriverStatFs(struct super_block *sb,
struct kstatfs *stat)
{
return 0;
}
struct super_operations super_ops = {
.statfs = LinuxDriverStatFs
};
#else
#error "This test intentionally fails on 2.6.19 and newer kernels."
#endif
vmmemctl-only/shared/autoconf/file_operations_fsync.c 0000444 0000000 0000000 00000002746 12275350061 022203 0 ustar root root /*********************************************************
* Copyright (C) 2011 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* Linux v3.1 added 2 params to fsync for fine-grained locking control.
* But SLES11 SP2 has backported the change to its 3.0 kernel,
* so we can't rely solely on kernel version to determine number of
* arguments.
*/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
# error This compile test intentionally fails.
#else
#include <linux/fs.h>
#include <linux/types.h> /* loff_t */
static int TestFsync(struct file *file,
loff_t start, loff_t end,
int datasync)
{
return 0;
}
struct file_operations testFO = {
.fsync = TestFsync,
};
#endif
vmmemctl-only/shared/autoconf/skblin.c 0000444 0000000 0000000 00000002572 12275350061 017076 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* Detect whether skb_linearize takes one or two arguments.
*/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 17)
/*
* Since 2.6.18 all kernels have single-argument skb_linearize. For
* older kernels use autodetection. Not using autodetection on newer
* kernels saves us from compile failure on some post 2.6.18 kernels
* which do not have selfcontained skbuff.h.
*/
#include <linux/skbuff.h>
int test_skb_linearize(struct sk_buff *skb)
{
return skb_linearize(skb);
}
#endif
vmmemctl-only/shared/autoconf/cachector.c 0000444 0000000 0000000 00000003270 12275350061 017543 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
/*
* Between 2.6.23 and 2.6.24-rc1 ctor prototype was changed from
* ctor(ptr, cache, flags) to ctor(cache, ptr). Unfortunately there
* is no typedef for ctor, so we have to redefine kmem_cache_create
* to find out ctor prototype. This assumes that kmem_cache_create
* takes 5 arguments and not 6 - that change occured between
* 2.6.22 and 2.6.23-rc1. If prototype matches, then this is old
* kernel.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
#error "This test intentionally fails on 2.6.24 and newer kernels."
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
#include <linux/slab.h>
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(void *, struct kmem_cache *, unsigned long));
#endif
vmmemctl-only/shared/autoconf/geninclude.c 0000444 0000000 0000000 00000002321 12275350061 017721 0 ustar root root /*********************************************************
* Copyright (C) 2003 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
#ifdef CONFIG_X86_VOYAGER
APATH/mach-voyager
#endif
#ifdef CONFIG_X86_VISWS
APATH/mach-visws
#endif
#ifdef CONFIG_X86_NUMAQ
APATH/mach-numaq
#endif
#ifdef CONFIG_X86_BIGSMP
APATH/mach-bigsmp
#endif
#ifdef CONFIG_X86_SUMMIT
APATH/mach-summit
#endif
#ifdef CONFIG_X86_GENERICARCH
APATH/mach-generic
#endif
APATH/mach-default
vmmemctl-only/shared/autoconf/filldir1.c 0000444 0000000 0000000 00000003260 12275350061 017315 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
#include <linux/fs.h>
#include <linux/types.h> /* loff_t */
#include <linux/stddef.h> /* NULL */
/*
* After 2.6.18, filldir and statfs were changed to send 64-bit inode
* numbers to user space. Red Hat backported this behavior into a 2.6.17
* kernel.
*
* This test will fail on a kernel with such a patch.
*/
static int LinuxDriverFilldir(void *buf,
const char *name,
int namelen,
loff_t offset,
ino_t ino,
unsigned int d_type)
{
return 0;
}
void test(void)
{
vfs_readdir(NULL, LinuxDriverFilldir, NULL);
}
#else
#error "This test intentionally fails on 2.6.20 and newer kernels."
#endif
vmmemctl-only/shared/kernelStubs.h 0000444 0000000 0000000 00000011340 12275350045 016277 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* kernelStubs.h
*
* KernelStubs implements some userspace library functions in terms
* of kernel functions to allow library userspace code to be used in a
* kernel.
*/
#ifndef __KERNELSTUBS_H__
#define __KERNELSTUBS_H__
#ifdef linux
# ifndef __KERNEL__
# error "__KERNEL__ is not defined"
# endif
# include "driver-config.h" // Must be included before any other header files
# include "vm_basic_types.h"
# include <linux/kernel.h>
# include <linux/string.h>
#elif defined(_WIN32)
# include "vm_basic_types.h"
# include <ntddk.h> /* kernel memory APIs */
# include <stdio.h> /* for _vsnprintf, vsprintf */
# include <stdarg.h> /* for va_start stuff */
# include <stdlib.h> /* for min macro. */
# include "vm_assert.h" /* Our assert macros */
#elif defined(__FreeBSD__)
# include "vm_basic_types.h"
# ifndef _KERNEL
# error "_KERNEL is not defined"
# endif
# include <sys/types.h>
# include <sys/malloc.h>
# include <sys/param.h>
# include <sys/kernel.h>
# include <machine/stdarg.h>
# include <sys/libkern.h>
#elif defined(__APPLE__)
# include "vm_basic_types.h"
# ifndef KERNEL
# error "KERNEL is not defined"
# endif
# include <stdarg.h>
# include <string.h>
# elif defined(sun)
# include "vm_basic_types.h"
# include <sys/types.h>
# include <sys/varargs.h>
#endif
/*
* Function Prototypes
*/
#if defined(linux) || defined(__APPLE__) || defined (sun)
# ifdef linux /* if (linux) { */
char *strdup(const char *source);
# endif
/* Shared between Linux and Apple kernel stubs. */
void *malloc(size_t size);
void free(void *mem);
void *calloc(size_t num, size_t len);
void *realloc(void *ptr, size_t newSize);
#elif defined(_WIN32) /* } else if (_WIN32) { */
#if (_WIN32_WINNT == 0x0400)
/* The following declarations are missing on NT4. */
typedef unsigned int UINT_PTR;
typedef unsigned int SIZE_T;
/* No free with tag availaible on NT4 kernel! */
#define KRNL_STUBS_FREE(P,T) ExFreePool((P))
#else /* _WIN32_WINNT */
#define KRNL_STUBS_FREE(P,T) ExFreePoolWithTag((P),(T))
/* Win 2K and later useful kernel function, documented but not declared! */
NTKERNELAPI VOID ExFreePoolWithTag(IN PVOID P, IN ULONG Tag);
#endif /* _WIN32_WINNT */
#elif defined(__FreeBSD__) /* } else if (FreeBSD) { */
/* Kernel memory on FreeBSD is tagged for statistics and sanity checking. */
MALLOC_DECLARE(M_VMWARE_TEMP);
/*
* On FreeBSD, the general memory allocator for both userland and the kernel is named
* malloc, but the kernel malloc() takes more arguments. The following alias & macros
* work around this, to provide the standard malloc() API for userspace code that is
* being used in the kernel.
*/
# undef malloc
static INLINE void *
__compat_malloc(unsigned long size, struct malloc_type *type, int flags) {
return malloc(size, type, flags);
}
# define malloc(size) __compat_malloc(size, M_VMWARE_TEMP, M_NOWAIT)
# define calloc(count, size) __compat_malloc((count) * (size), \
M_VMWARE_TEMP, M_NOWAIT|M_ZERO)
# define realloc(buf, size) realloc(buf, size, M_VMWARE_TEMP, M_NOWAIT)
# define free(buf) free(buf, M_VMWARE_TEMP)
# define strchr(s,c) index(s,c)
# define strrchr(s,c) rindex(s,c)
#endif /* } */
/*
* Stub functions we provide.
*/
void Panic(const char *fmt, ...);
char *Str_Strcpy(char *buf, const char *src, size_t maxSize);
int Str_Vsnprintf(char *str, size_t size, const char *format,
va_list arguments);
char *Str_Vasprintf(size_t *length, const char *format,
va_list arguments);
char *Str_Asprintf(size_t *length, const char *Format, ...);
/*
* Functions the driver must implement for the stubs.
*/
EXTERN void Debug(const char *fmt, ...);
#endif /* __KERNELSTUBS_H__ */
vmmemctl-only/shared/compat_autoconf.h 0000444 0000000 0000000 00000002641 12275350061 017161 0 ustar root root /*********************************************************
* Copyright (C) 2009 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_AUTOCONF_H__
# define __COMPAT_AUTOCONF_H__
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMKDRIVERS
#include "includeCheck.h"
#ifndef LINUX_VERSION_CODE
# error "Include compat_version.h before compat_autoconf.h"
#endif
/* autoconf.h moved from linux/autoconf.h to generated/autoconf.h in 2.6.33-rc1. */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
# include <linux/autoconf.h>
#else
# include <generated/autoconf.h>
#endif
#endif /* __COMPAT_AUTOCONF_H__ */
vmmemctl-only/shared/x86cpuid.h 0000644 0000000 0000000 00000203361 12275350062 015457 0 ustar root root /*********************************************************
* Copyright (C) 1998-2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef _X86CPUID_H_
#define _X86CPUID_H_
/* http://www.sandpile.org/ia32/cpuid.htm */
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMX
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMMON
#include "includeCheck.h"
#include "vm_basic_types.h"
#include "community_source.h"
#include "x86vendor.h"
#include "vm_assert.h"
/*
* The linux kernel's ptrace.h stupidly defines the bare
* EAX/EBX/ECX/EDX, which wrecks havoc with our preprocessor tricks.
*/
#undef EAX
#undef EBX
#undef ECX
#undef EDX
typedef struct CPUIDRegs {
uint32 eax, ebx, ecx, edx;
} CPUIDRegs;
typedef union CPUIDRegsUnion {
uint32 array[4];
CPUIDRegs regs;
} CPUIDRegsUnion;
/*
* Results of calling cpuid(eax, ecx) on all host logical CPU.
*/
#ifdef _MSC_VER
#pragma warning (disable :4200) // non-std extension: zero-sized array in struct
#endif
typedef
#include "vmware_pack_begin.h"
struct CPUIDReply {
/*
* Unique host logical CPU identifier. It does not change across queries, so
* we use it to correlate the replies of multiple queries.
*/
uint64 tag; // OUT
CPUIDRegs regs; // OUT
}
#include "vmware_pack_end.h"
CPUIDReply;
typedef
#include "vmware_pack_begin.h"
struct CPUIDQuery {
uint32 eax; // IN
uint32 ecx; // IN
uint32 numLogicalCPUs; // IN/OUT
CPUIDReply logicalCPUs[0]; // OUT
}
#include "vmware_pack_end.h"
CPUIDQuery;
/*
* CPUID levels the monitor caches and ones that are not cached, but
* have fields defined below (short name and actual value).
*
* The first parameter defines whether the level has its default masks
* generated from the values in this file. Any level which is marked
* as FALSE here *must* have all monitor support types set to NA. A
* static assert in lib/cpuidcompat/cpuidcompat.c will check this.
*/
#define CPUID_CACHED_LEVELS \
CPUIDLEVEL(TRUE, 0, 0) \
CPUIDLEVEL(TRUE, 1, 1) \
CPUIDLEVEL(FALSE, 5, 5) \
CPUIDLEVEL(TRUE, 7, 7) \
CPUIDLEVEL(FALSE, A, 0xA) \
CPUIDLEVEL(TRUE, D, 0xD) \
CPUIDLEVEL(FALSE,400, 0x40000000) \
CPUIDLEVEL(FALSE,401, 0x40000001) \
CPUIDLEVEL(FALSE,402, 0x40000002) \
CPUIDLEVEL(FALSE,403, 0x40000003) \
CPUIDLEVEL(FALSE,404, 0x40000004) \
CPUIDLEVEL(FALSE,405, 0x40000005) \
CPUIDLEVEL(FALSE,406, 0x40000006) \
CPUIDLEVEL(FALSE,410, 0x40000010) \
CPUIDLEVEL(FALSE, 80, 0x80000000) \
CPUIDLEVEL(TRUE, 81, 0x80000001) \
CPUIDLEVEL(FALSE, 87, 0x80000007) \
CPUIDLEVEL(FALSE, 88, 0x80000008) \
CPUIDLEVEL(TRUE, 8A, 0x8000000A)
#define CPUID_UNCACHED_LEVELS \
CPUIDLEVEL(FALSE, 4, 4) \
CPUIDLEVEL(FALSE, 6, 6) \
CPUIDLEVEL(FALSE, B, 0xB) \
CPUIDLEVEL(FALSE, 85, 0x80000005) \
CPUIDLEVEL(FALSE, 86, 0x80000006) \
CPUIDLEVEL(FALSE, 819, 0x80000019) \
CPUIDLEVEL(FALSE, 81A, 0x8000001A) \
CPUIDLEVEL(FALSE, 81B, 0x8000001B) \
CPUIDLEVEL(FALSE, 81C, 0x8000001C) \
CPUIDLEVEL(FALSE, 81D, 0x8000001D) \
CPUIDLEVEL(FALSE, 81E, 0x8000001E)
#define CPUID_ALL_LEVELS \
CPUID_CACHED_LEVELS \
CPUID_UNCACHED_LEVELS
/* Define cached CPUID levels in the form: CPUID_LEVEL_<ShortName> */
typedef enum {
#define CPUIDLEVEL(t, s, v) CPUID_LEVEL_##s,
CPUID_CACHED_LEVELS
#undef CPUIDLEVEL
CPUID_NUM_CACHED_LEVELS
} CpuidCachedLevel;
/* Enum to translate between shorthand name and actual CPUID level value. */
enum {
#define CPUIDLEVEL(t, s, v) CPUID_LEVEL_VAL_##s = v,
CPUID_ALL_LEVELS
#undef CPUIDLEVEL
};
/* Named feature leaves */
#define CPUID_FEATURE_INFORMATION 0x01
#define CPUID_PROCESSOR_TOPOLOGY 4
#define CPUID_MWAIT_FEATURES 5
#define CPUID_XSAVE_FEATURES 0xd
#define CPUID_HYPERVISOR_LEVEL_0 0x40000000
#define CPUID_SVM_FEATURES 0x8000000a
/*
* CPUID result registers
*/
#define CPUID_REGS \
CPUIDREG(EAX, eax) \
CPUIDREG(EBX, ebx) \
CPUIDREG(ECX, ecx) \
CPUIDREG(EDX, edx)
typedef enum {
#define CPUIDREG(uc, lc) CPUID_REG_##uc,
CPUID_REGS
#undef CPUIDREG
CPUID_NUM_REGS
} CpuidReg;
#define CPUID_INTEL_VENDOR_STRING "GenuntelineI"
#define CPUID_AMD_VENDOR_STRING "AuthcAMDenti"
#define CPUID_CYRIX_VENDOR_STRING "CyriteadxIns"
#define CPUID_VIA_VENDOR_STRING "CentaulsaurH"
#define CPUID_HYPERV_HYPERVISOR_VENDOR_STRING "Microsoft Hv"
#define CPUID_KVM_HYPERVISOR_VENDOR_STRING "KVMKVMKVM\0\0\0"
#define CPUID_VMWARE_HYPERVISOR_VENDOR_STRING "VMwareVMware"
#define CPUID_XEN_HYPERVISOR_VENDOR_STRING "XenVMMXenVMM"
#define CPUID_INTEL_VENDOR_STRING_FIXED "GenuineIntel"
#define CPUID_AMD_VENDOR_STRING_FIXED "AuthenticAMD"
#define CPUID_CYRIX_VENDOR_STRING_FIXED "CyrixInstead"
#define CPUID_VIA_VENDOR_STRING_FIXED "CentaurHauls"
/*
* FIELD can be defined to process the CPUID information provided
* in the following CPUID_FIELD_DATA macro. The first parameter is
* the CPUID level of the feature (must be defined in
* CPUID_ALL_LEVELS, above. The second parameter is the CPUID result
* register in which the field is returned (defined in CPUID_REGS).
* The third field is the vendor(s) this feature applies to. "COMMON"
* means all vendors apply. UNKNOWN may not be used here. The fourth
* and fifth parameters are the bit position of the field and the
* width, respectively. The sixth is the text name of the field.
*
* The seventh parameters specifies the monitor support
* characteristics for this field. The value must be a valid
* CpuidFieldSupported value (omitting CPUID_FIELD_SUPPORT_ for
* convenience). The meaning of those values are described below.
*
* The eighth parameter describes whether the feature is capable of
* being used by usermode code (TRUE), or just CPL0 kernel code
* (FALSE).
*
* FLAG is defined identically to FIELD, but its accessors are more
* appropriate for 1-bit flags, and compile-time asserts enforce that
* the size is 1 bit wide.
*/
/*
* CpuidFieldSupported is made up of the following values:
*
* NO: A feature/field that IS NOT SUPPORTED by the monitor. Even
* if the host supports this feature, we will never expose it to
* the guest.
*
* YES: A feature/field that IS SUPPORTED by the monitor. If the
* host supports this feature, we will expose it to the guest. If
* not, then we will not set the feature.
*
* ANY: A feature/field that IS ALWAYS SUPPORTED by the monitor.
* Even if the host does not support the feature, the monitor can
* expose the feature to the guest.
*
* NA: Only legal for levels not masked/tested by default (see
* above for this definition). Such fields must always be marked
* as NA.
*
* These distinctions, when combined with the feature's CPL3
* properties can be translated into a common CPUID mask string as
* follows:
*
* NO + CPL3 --> "R" (Reserved). We don't support the feature,
* but we can't properly hide this from applications when using
* direct execution or HV with apps that do try/catch/fail, so we
* must still perform compatibility checks.
*
* NO + !CPL3 --> "0" (Masked). We can hide this from the guest.
*
* YES --> "H" (Host). We support the feature, so show it to the
* guest if the host has the feature.
*
* ANY/NA --> "X" (Ignore). By default, don't perform checks for
* this feature bit. Per-GOS masks may choose to set this bit in
* the guest. (e.g. the APIC feature bit is always set to 1.)
*
* See lib/cpuidcompat/cpuidcompat.c for any possible overrides to
* these defaults.
*/
typedef enum {
CPUID_FIELD_SUPPORTED_NO,
CPUID_FIELD_SUPPORTED_YES,
CPUID_FIELD_SUPPORTED_ANY,
CPUID_FIELD_SUPPORTED_NA,
CPUID_NUM_FIELD_SUPPORTEDS
} CpuidFieldSupported;
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_0 \
FIELD( 0, 0, EAX, 0, 32, NUMLEVELS, ANY, FALSE) \
FIELD( 0, 0, EBX, 0, 32, VENDOR1, YES, TRUE) \
FIELD( 0, 0, ECX, 0, 32, VENDOR3, YES, TRUE) \
FIELD( 0, 0, EDX, 0, 32, VENDOR2, YES, TRUE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_1 \
FIELD( 1, 0, EAX, 0, 4, STEPPING, ANY, FALSE) \
FIELD( 1, 0, EAX, 4, 4, MODEL, ANY, FALSE) \
FIELD( 1, 0, EAX, 8, 4, FAMILY, YES, FALSE) \
FIELD( 1, 0, EAX, 12, 2, TYPE, ANY, FALSE) \
FIELD( 1, 0, EAX, 16, 4, EXTENDED_MODEL, ANY, FALSE) \
FIELD( 1, 0, EAX, 20, 8, EXTENDED_FAMILY, YES, FALSE) \
FIELD( 1, 0, EBX, 0, 8, BRAND_ID, ANY, FALSE) \
FIELD( 1, 0, EBX, 8, 8, CLFL_SIZE, ANY, FALSE) \
FIELD( 1, 0, EBX, 16, 8, LCPU_COUNT, ANY, FALSE) \
FIELD( 1, 0, EBX, 24, 8, APICID, ANY, FALSE) \
FLAG( 1, 0, ECX, 0, 1, SSE3, YES, TRUE) \
FLAG( 1, 0, ECX, 1, 1, PCLMULQDQ, YES, TRUE) \
FLAG( 1, 0, ECX, 2, 1, DTES64, NO, FALSE) \
FLAG( 1, 0, ECX, 3, 1, MWAIT, YES, FALSE) \
FLAG( 1, 0, ECX, 4, 1, DSCPL, NO, FALSE) \
FLAG( 1, 0, ECX, 5, 1, VMX, YES, FALSE) \
FLAG( 1, 0, ECX, 6, 1, SMX, NO, FALSE) \
FLAG( 1, 0, ECX, 7, 1, EIST, NO, FALSE) \
FLAG( 1, 0, ECX, 8, 1, TM2, NO, FALSE) \
FLAG( 1, 0, ECX, 9, 1, SSSE3, YES, TRUE) \
FLAG( 1, 0, ECX, 10, 1, CNXTID, NO, FALSE) \
FLAG( 1, 0, ECX, 11, 1, NDA11, NO, FALSE) \
FLAG( 1, 0, ECX, 12, 1, FMA, YES, TRUE) \
FLAG( 1, 0, ECX, 13, 1, CMPXCHG16B, YES, TRUE) \
FLAG( 1, 0, ECX, 14, 1, xTPR, NO, FALSE) \
FLAG( 1, 0, ECX, 15, 1, PDCM, NO, FALSE) \
FLAG( 1, 0, ECX, 17, 1, PCID, YES, FALSE) \
FLAG( 1, 0, ECX, 18, 1, DCA, NO, FALSE) \
FLAG( 1, 0, ECX, 19, 1, SSE41, YES, TRUE) \
FLAG( 1, 0, ECX, 20, 1, SSE42, YES, TRUE) \
FLAG( 1, 0, ECX, 21, 1, x2APIC, ANY, FALSE) \
FLAG( 1, 0, ECX, 22, 1, MOVBE, YES, TRUE) \
FLAG( 1, 0, ECX, 23, 1, POPCNT, YES, TRUE) \
FLAG( 1, 0, ECX, 24, 1, TSC_DEADLINE, NO, FALSE) \
FLAG( 1, 0, ECX, 25, 1, AES, YES, TRUE) \
FLAG( 1, 0, ECX, 26, 1, XSAVE, YES, FALSE) \
FLAG( 1, 0, ECX, 27, 1, OSXSAVE, ANY, FALSE) \
FLAG( 1, 0, ECX, 28, 1, AVX, YES, FALSE) \
FLAG( 1, 0, ECX, 29, 1, F16C, YES, TRUE) \
FLAG( 1, 0, ECX, 30, 1, RDRAND, YES, TRUE) \
FLAG( 1, 0, ECX, 31, 1, HYPERVISOR, ANY, TRUE) \
FLAG( 1, 0, EDX, 0, 1, FPU, YES, TRUE) \
FLAG( 1, 0, EDX, 1, 1, VME, YES, FALSE) \
FLAG( 1, 0, EDX, 2, 1, DE, YES, FALSE) \
FLAG( 1, 0, EDX, 3, 1, PSE, YES, FALSE) \
FLAG( 1, 0, EDX, 4, 1, TSC, YES, TRUE) \
FLAG( 1, 0, EDX, 5, 1, MSR, YES, FALSE) \
FLAG( 1, 0, EDX, 6, 1, PAE, YES, FALSE) \
FLAG( 1, 0, EDX, 7, 1, MCE, YES, FALSE) \
FLAG( 1, 0, EDX, 8, 1, CX8, YES, TRUE) \
FLAG( 1, 0, EDX, 9, 1, APIC, ANY, FALSE) \
FLAG( 1, 0, EDX, 11, 1, SEP, YES, TRUE) \
FLAG( 1, 0, EDX, 12, 1, MTRR, YES, FALSE) \
FLAG( 1, 0, EDX, 13, 1, PGE, YES, FALSE) \
FLAG( 1, 0, EDX, 14, 1, MCA, YES, FALSE) \
FLAG( 1, 0, EDX, 15, 1, CMOV, YES, TRUE) \
FLAG( 1, 0, EDX, 16, 1, PAT, YES, FALSE) \
FLAG( 1, 0, EDX, 17, 1, PSE36, YES, FALSE) \
FLAG( 1, 0, EDX, 18, 1, PSN, YES, FALSE) \
FLAG( 1, 0, EDX, 19, 1, CLFSH, YES, TRUE) \
FLAG( 1, 0, EDX, 21, 1, DS, YES, FALSE) \
FLAG( 1, 0, EDX, 22, 1, ACPI, ANY, FALSE) \
FLAG( 1, 0, EDX, 23, 1, MMX, YES, TRUE) \
FLAG( 1, 0, EDX, 24, 1, FXSR, YES, TRUE) \
FLAG( 1, 0, EDX, 25, 1, SSE, YES, TRUE) \
FLAG( 1, 0, EDX, 26, 1, SSE2, YES, TRUE) \
FLAG( 1, 0, EDX, 27, 1, SS, YES, FALSE) \
FLAG( 1, 0, EDX, 28, 1, HTT, ANY, FALSE) \
FLAG( 1, 0, EDX, 29, 1, TM, NO, FALSE) \
FLAG( 1, 0, EDX, 30, 1, IA64, NO, FALSE) \
FLAG( 1, 0, EDX, 31, 1, PBE, NO, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_4 \
FIELD( 4, 0, EAX, 0, 5, LEAF4_CACHE_TYPE, NA, FALSE) \
FIELD( 4, 0, EAX, 5, 3, LEAF4_CACHE_LEVEL, NA, FALSE) \
FLAG( 4, 0, EAX, 8, 1, LEAF4_CACHE_SELF_INIT, NA, FALSE) \
FLAG( 4, 0, EAX, 9, 1, LEAF4_CACHE_FULLY_ASSOC, NA, FALSE) \
FIELD( 4, 0, EAX, 14, 12, LEAF4_CACHE_NUMHT_SHARING, NA, FALSE) \
FIELD( 4, 0, EAX, 26, 6, LEAF4_CORE_COUNT, NA, FALSE) \
FIELD( 4, 0, EBX, 0, 12, LEAF4_CACHE_LINE, NA, FALSE) \
FIELD( 4, 0, EBX, 12, 10, LEAF4_CACHE_PART, NA, FALSE) \
FIELD( 4, 0, EBX, 22, 10, LEAF4_CACHE_WAYS, NA, FALSE) \
FIELD( 4, 0, ECX, 0, 32, LEAF4_CACHE_SETS, NA, FALSE) \
FLAG( 4, 0, EDX, 0, 1, LEAF4_CACHE_WBINVD_NOT_GUARANTEED, NA, FALSE) \
FLAG( 4, 0, EDX, 1, 1, LEAF4_CACHE_IS_INCLUSIVE, NA, FALSE) \
FLAG( 4, 0, EDX, 2, 1, LEAF4_CACHE_COMPLEX_INDEXING, NA, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_5 \
FIELD( 5, 0, EAX, 0, 16, MWAIT_MIN_SIZE, NA, FALSE) \
FIELD( 5, 0, EBX, 0, 16, MWAIT_MAX_SIZE, NA, FALSE) \
FLAG( 5, 0, ECX, 0, 1, MWAIT_EXTENSIONS, NA, FALSE) \
FLAG( 5, 0, ECX, 1, 1, MWAIT_INTR_BREAK, NA, FALSE) \
FIELD( 5, 0, EDX, 0, 4, MWAIT_C0_SUBSTATE, NA, FALSE) \
FIELD( 5, 0, EDX, 4, 4, MWAIT_C1_SUBSTATE, NA, FALSE) \
FIELD( 5, 0, EDX, 8, 4, MWAIT_C2_SUBSTATE, NA, FALSE) \
FIELD( 5, 0, EDX, 12, 4, MWAIT_C3_SUBSTATE, NA, FALSE) \
FIELD( 5, 0, EDX, 16, 4, MWAIT_C4_SUBSTATE, NA, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_6 \
FLAG( 6, 0, EAX, 0, 1, THERMAL_SENSOR, NA, FALSE) \
FLAG( 6, 0, EAX, 1, 1, TURBO_MODE, NA, FALSE) \
FLAG( 6, 0, EAX, 2, 1, APIC_INVARIANT, NA, FALSE) \
FIELD( 6, 0, EBX, 0, 4, NUM_INTR_THRESHOLDS, NA, FALSE) \
FLAG( 6, 0, ECX, 0, 1, HW_COORD_FEEDBACK, NA, FALSE) \
FLAG( 6, 0, ECX, 3, 1, ENERGY_PERF_BIAS, NA, FALSE)
#define CPUID_7_EBX_13
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_7 \
FLAG( 7, 0, EBX, 0, 1, FSGSBASE, YES, FALSE) \
FLAG( 7, 0, EBX, 3, 1, BMI1, YES, TRUE) \
FLAG( 7, 0, EBX, 4, 1, HLE, NO, TRUE) \
FLAG( 7, 0, EBX, 5, 1, AVX2, NO, TRUE) \
FLAG( 7, 0, EBX, 7, 1, SMEP, YES, FALSE) \
FLAG( 7, 0, EBX, 8, 1, BMI2, NO, TRUE) \
FLAG( 7, 0, EBX, 9, 1, ENFSTRG, YES, FALSE) \
FLAG( 7, 0, EBX, 10, 1, INVPCID, NO, FALSE) \
FLAG( 7, 0, EBX, 11, 1, RTM, NO, TRUE) \
CPUID_7_EBX_13
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_A \
FIELD( A, 0, EAX, 0, 8, PMC_VERSION, NA, FALSE) \
FIELD( A, 0, EAX, 8, 8, PMC_NUM_GEN, NA, FALSE) \
FIELD( A, 0, EAX, 16, 8, PMC_WIDTH_GEN, NA, FALSE) \
FIELD( A, 0, EAX, 24, 8, PMC_EBX_LENGTH, NA, FALSE) \
FLAG( A, 0, EBX, 0, 1, PMC_CORE_CYCLES, NA, FALSE) \
FLAG( A, 0, EBX, 1, 1, PMC_INSTR_RETIRED, NA, FALSE) \
FLAG( A, 0, EBX, 2, 1, PMC_REF_CYCLES, NA, FALSE) \
FLAG( A, 0, EBX, 3, 1, PMC_LAST_LVL_CREF, NA, FALSE) \
FLAG( A, 0, EBX, 4, 1, PMC_LAST_LVL_CMISS, NA, FALSE) \
FLAG( A, 0, EBX, 5, 1, PMC_BR_INST_RETIRED, NA, FALSE) \
FLAG( A, 0, EBX, 6, 1, PMC_BR_MISS_RETIRED, NA, FALSE) \
FIELD( A, 0, EDX, 0, 5, PMC_NUM_FIXED, NA, FALSE) \
FIELD( A, 0, EDX, 5, 8, PMC_WIDTH_FIXED, NA, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_B \
FIELD( B, 0, EAX, 0, 5, TOPOLOGY_MASK_WIDTH, NA, FALSE) \
FIELD( B, 0, EBX, 0, 16, TOPOLOGY_CPUS_SHARING_LEVEL, NA, FALSE) \
FIELD( B, 0, ECX, 0, 8, TOPOLOGY_LEVEL_NUMBER, NA, FALSE) \
FIELD( B, 0, ECX, 8, 8, TOPOLOGY_LEVEL_TYPE, NA, FALSE) \
FIELD( B, 0, EDX, 0, 32, TOPOLOGY_X2APIC_ID, NA, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_D \
FLAG( D, 0, EAX, 0, 1, XCR0_MASTER_LEGACY_FP, YES, FALSE) \
FLAG( D, 0, EAX, 1, 1, XCR0_MASTER_SSE, YES, FALSE) \
FLAG( D, 0, EAX, 2, 1, XCR0_MASTER_YMM_H, YES, FALSE) \
FIELD( D, 0, EAX, 3, 29, XCR0_MASTER_LOWER, NO, FALSE) \
FIELD( D, 0, EBX, 0, 32, XSAVE_ENABLED_SIZE, ANY, FALSE) \
FIELD( D, 0, ECX, 0, 32, XSAVE_MAX_SIZE, YES, FALSE) \
FIELD( D, 0, EDX, 0, 29, XCR0_MASTER_UPPER, NO, FALSE) \
FLAG( D, 0, EDX, 30, 1, XCR0_MASTER_LWP, NO, FALSE) \
FLAG( D, 0, EDX, 31, 1, XCR0_MASTER_EXTENDED_XSAVE, NO, FALSE) \
FLAG( D, 1, EAX, 0, 1, XSAVEOPT, NO, FALSE) \
FIELD( D, 2, EAX, 0, 32, XSAVE_YMM_SIZE, YES, FALSE) \
FIELD( D, 2, EBX, 0, 32, XSAVE_YMM_OFFSET, YES, FALSE) \
FIELD( D, 2, ECX, 0, 32, XSAVE_YMM_RSVD1, YES, FALSE) \
FIELD( D, 2, EDX, 0, 32, XSAVE_YMM_RSVD2, YES, FALSE) \
FIELD( D, 62, EAX, 0, 32, XSAVE_LWP_SIZE, NO, FALSE) \
FIELD( D, 62, EBX, 0, 32, XSAVE_LWP_OFFSET, NO, FALSE) \
FIELD( D, 62, ECX, 0, 32, XSAVE_LWP_RSVD1, NO, FALSE) \
FIELD( D, 62, EDX, 0, 32, XSAVE_LWP_RSVD2, NO, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_400 \
FIELD(400, 0, EAX, 0, 32, NUM_HYP_LEVELS, NA, FALSE) \
FIELD(400, 0, EBX, 0, 32, HYPERVISOR1, NA, FALSE) \
FIELD(400, 0, ECX, 0, 32, HYPERVISOR2, NA, FALSE) \
FIELD(400, 0, EDX, 0, 32, HYPERVISOR3, NA, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_410 \
FIELD(410, 0, EAX, 0, 32, TSC_HZ, NA, FALSE) \
FIELD(410, 0, EBX, 0, 32, ACPIBUS_HZ, NA, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_80 \
FIELD( 80, 0, EAX, 0, 32, NUM_EXT_LEVELS, NA, FALSE) \
FIELD( 80, 0, EBX, 0, 32, LEAF80_VENDOR1, NA, FALSE) \
FIELD( 80, 0, ECX, 0, 32, LEAF80_VENDOR3, NA, FALSE) \
FIELD( 80, 0, EDX, 0, 32, LEAF80_VENDOR2, NA, FALSE)
#define CPUID_81_ECX_17
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_81 \
FIELD( 81, 0, EAX, 0, 32, UNKNOWN81EAX, ANY, FALSE) \
FIELD( 81, 0, EAX, 0, 4, LEAF81_STEPPING, ANY, FALSE) \
FIELD( 81, 0, EAX, 4, 4, LEAF81_MODEL, ANY, FALSE) \
FIELD( 81, 0, EAX, 8, 4, LEAF81_FAMILY, ANY, FALSE) \
FIELD( 81, 0, EAX, 12, 2, LEAF81_TYPE, ANY, FALSE) \
FIELD( 81, 0, EAX, 16, 4, LEAF81_EXTENDED_MODEL, ANY, FALSE) \
FIELD( 81, 0, EAX, 20, 8, LEAF81_EXTENDED_FAMILY, ANY, FALSE) \
FIELD( 81, 0, EBX, 0, 32, UNKNOWN81EBX, ANY, FALSE) \
FIELD( 81, 0, EBX, 0, 16, LEAF81_BRAND_ID, ANY, FALSE) \
FIELD( 81, 0, EBX, 16, 16, UNDEF, ANY, FALSE) \
FLAG( 81, 0, ECX, 0, 1, LAHF64, YES, TRUE) \
FLAG( 81, 0, ECX, 1, 1, CMPLEGACY, ANY, FALSE) \
FLAG( 81, 0, ECX, 2, 1, SVM, YES, FALSE) \
FLAG( 81, 0, ECX, 3, 1, EXTAPICSPC, YES, FALSE) \
FLAG( 81, 0, ECX, 4, 1, CR8AVAIL, YES, FALSE) \
FLAG( 81, 0, ECX, 5, 1, ABM, YES, TRUE) \
FLAG( 81, 0, ECX, 6, 1, SSE4A, YES, TRUE) \
FLAG( 81, 0, ECX, 7, 1, MISALIGNED_SSE, YES, TRUE) \
FLAG( 81, 0, ECX, 8, 1, 3DNPREFETCH, YES, TRUE) \
FLAG( 81, 0, ECX, 9, 1, OSVW, ANY, FALSE) \
FLAG( 81, 0, ECX, 10, 1, IBS, NO, FALSE) \
FLAG( 81, 0, ECX, 11, 1, XOP, YES, TRUE) \
FLAG( 81, 0, ECX, 12, 1, SKINIT, NO, FALSE) \
FLAG( 81, 0, ECX, 13, 1, WATCHDOG, NO, FALSE) \
FLAG( 81, 0, ECX, 15, 1, LWP, NO, FALSE) \
FLAG( 81, 0, ECX, 16, 1, FMA4, YES, TRUE) \
CPUID_81_ECX_17 \
FLAG( 81, 0, ECX, 19, 1, NODEID_MSR, NO, FALSE) \
FLAG( 81, 0, ECX, 21, 1, TBM, YES, TRUE) \
FLAG( 81, 0, ECX, 22, 1, TOPOLOGY, NO, FALSE) \
FLAG( 81, 0, ECX, 23, 1, PERFCORE, ANY, TRUE) \
FLAG( 81, 0, EDX, 0, 1, LEAF81_FPU, YES, TRUE) \
FLAG( 81, 0, EDX, 1, 1, LEAF81_VME, YES, FALSE) \
FLAG( 81, 0, EDX, 2, 1, LEAF81_DE, YES, FALSE) \
FLAG( 81, 0, EDX, 3, 1, LEAF81_PSE, YES, FALSE) \
FLAG( 81, 0, EDX, 4, 1, LEAF81_TSC, YES, TRUE) \
FLAG( 81, 0, EDX, 5, 1, LEAF81_MSR, YES, FALSE) \
FLAG( 81, 0, EDX, 6, 1, LEAF81_PAE, YES, FALSE) \
FLAG( 81, 0, EDX, 7, 1, LEAF81_MCE, YES, FALSE) \
FLAG( 81, 0, EDX, 8, 1, LEAF81_CX8, YES, TRUE) \
FLAG( 81, 0, EDX, 9, 1, LEAF81_APIC, ANY, FALSE) \
FLAG( 81, 0, EDX, 11, 1, SYSC, ANY, TRUE) \
FLAG( 81, 0, EDX, 12, 1, LEAF81_MTRR, YES, FALSE) \
FLAG( 81, 0, EDX, 13, 1, LEAF81_PGE, YES, FALSE) \
FLAG( 81, 0, EDX, 14, 1, LEAF81_MCA, YES, FALSE) \
FLAG( 81, 0, EDX, 15, 1, LEAF81_CMOV, YES, TRUE) \
FLAG( 81, 0, EDX, 16, 1, LEAF81_PAT, YES, FALSE) \
FLAG( 81, 0, EDX, 17, 1, LEAF81_PSE36, YES, FALSE) \
FLAG( 81, 0, EDX, 20, 1, NX, YES, FALSE) \
FLAG( 81, 0, EDX, 22, 1, MMXEXT, YES, TRUE) \
FLAG( 81, 0, EDX, 23, 1, LEAF81_MMX, YES, TRUE) \
FLAG( 81, 0, EDX, 24, 1, LEAF81_FXSR, YES, TRUE) \
FLAG( 81, 0, EDX, 25, 1, FFXSR, YES, FALSE) \
FLAG( 81, 0, EDX, 26, 1, PDPE1GB, YES, FALSE) \
FLAG( 81, 0, EDX, 27, 1, RDTSCP, YES, TRUE) \
FLAG( 81, 0, EDX, 29, 1, LM, YES, FALSE) \
FLAG( 81, 0, EDX, 30, 1, 3DNOWPLUS, YES, TRUE) \
FLAG( 81, 0, EDX, 31, 1, 3DNOW, YES, TRUE)
#define CPUID_8A_EDX_11 \
FLAG( 8A, 0, EDX, 11, 1, SVMEDX_RSVD1, NO, FALSE)
#define CPUID_8A_EDX_13_31 \
FIELD( 8A, 0, EDX, 13, 19, SVMEDX_RSVD2, NO, FALSE)
/* LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_8x \
FIELD( 85, 0, EAX, 0, 8, ITLB_ENTRIES_2M4M_PGS, NA, FALSE) \
FIELD( 85, 0, EAX, 8, 8, ITLB_ASSOC_2M4M_PGS, NA, FALSE) \
FIELD( 85, 0, EAX, 16, 8, DTLB_ENTRIES_2M4M_PGS, NA, FALSE) \
FIELD( 85, 0, EAX, 24, 8, DTLB_ASSOC_2M4M_PGS, NA, FALSE) \
FIELD( 85, 0, EBX, 0, 8, ITLB_ENTRIES_4K_PGS, NA, FALSE) \
FIELD( 85, 0, EBX, 8, 8, ITLB_ASSOC_4K_PGS, NA, FALSE) \
FIELD( 85, 0, EBX, 16, 8, DTLB_ENTRIES_4K_PGS, NA, FALSE) \
FIELD( 85, 0, EBX, 24, 8, DTLB_ASSOC_4K_PGS, NA, FALSE) \
FIELD( 85, 0, ECX, 0, 8, L1_DCACHE_LINE_SIZE, NA, FALSE) \
FIELD( 85, 0, ECX, 8, 8, L1_DCACHE_LINES_PER_TAG, NA, FALSE) \
FIELD( 85, 0, ECX, 16, 8, L1_DCACHE_ASSOC, NA, FALSE) \
FIELD( 85, 0, ECX, 24, 8, L1_DCACHE_SIZE, NA, FALSE) \
FIELD( 85, 0, EDX, 0, 8, L1_ICACHE_LINE_SIZE, NA, FALSE) \
FIELD( 85, 0, EDX, 8, 8, L1_ICACHE_LINES_PER_TAG, NA, FALSE) \
FIELD( 85, 0, EDX, 16, 8, L1_ICACHE_ASSOC, NA, FALSE) \
FIELD( 85, 0, EDX, 24, 8, L1_ICACHE_SIZE, NA, FALSE) \
FIELD( 86, 0, EAX, 0, 12, L2_ITLB_ENTRIES_2M4M_PGS, NA, FALSE) \
FIELD( 86, 0, EAX, 12, 4, L2_ITLB_ASSOC_2M4M_PGS, NA, FALSE) \
FIELD( 86, 0, EAX, 16, 12, L2_DTLB_ENTRIES_2M4M_PGS, NA, FALSE) \
FIELD( 86, 0, EAX, 28, 4, L2_DTLB_ASSOC_2M4M_PGS, NA, FALSE) \
FIELD( 86, 0, EBX, 0, 12, L2_ITLB_ENTRIES_4K_PGS, NA, FALSE) \
FIELD( 86, 0, EBX, 12, 4, L2_ITLB_ASSOC_4K_PGS, NA, FALSE) \
FIELD( 86, 0, EBX, 16, 12, L2_DTLB_ENTRIES_4K_PGS, NA, FALSE) \
FIELD( 86, 0, EBX, 28, 4, L2_DTLB_ASSOC_4K_PGS, NA, FALSE) \
FIELD( 86, 0, ECX, 0, 8, L2CACHE_LINE, NA, FALSE) \
FIELD( 86, 0, ECX, 8, 4, L2CACHE_LINE_PER_TAG, NA, FALSE) \
FIELD( 86, 0, ECX, 12, 4, L2CACHE_WAYS, NA, FALSE) \
FIELD( 86, 0, ECX, 16, 16, L2CACHE_SIZE, NA, FALSE) \
FIELD( 86, 0, EDX, 0, 8, L3CACHE_LINE, NA, FALSE) \
FIELD( 86, 0, EDX, 8, 4, L3CACHE_LINE_PER_TAG, NA, FALSE) \
FIELD( 86, 0, EDX, 12, 4, L3CACHE_WAYS, NA, FALSE) \
FIELD( 86, 0, EDX, 18, 14, L3CACHE_SIZE, NA, FALSE) \
FLAG( 87, 0, EDX, 0, 1, TS, NA, FALSE) \
FLAG( 87, 0, EDX, 1, 1, FID, NA, FALSE) \
FLAG( 87, 0, EDX, 2, 1, VID, NA, FALSE) \
FLAG( 87, 0, EDX, 3, 1, TTP, NA, FALSE) \
FLAG( 87, 0, EDX, 4, 1, LEAF87_TM, NA, FALSE) \
FLAG( 87, 0, EDX, 5, 1, STC, NA, FALSE) \
FLAG( 87, 0, EDX, 6, 1, 100MHZSTEPS, NA, FALSE) \
FLAG( 87, 0, EDX, 7, 1, HWPSTATE, NA, FALSE) \
FLAG( 87, 0, EDX, 8, 1, TSC_INVARIANT, NA, FALSE) \
FLAG( 87, 0, EDX, 9, 1, CORE_PERF_BOOST, NA, FALSE) \
FIELD( 88, 0, EAX, 0, 8, PHYS_BITS, NA, FALSE) \
FIELD( 88, 0, EAX, 8, 8, VIRT_BITS, NA, FALSE) \
FIELD( 88, 0, EAX, 16, 8, GUEST_PHYS_ADDR_SZ, NA, FALSE) \
FIELD( 88, 0, ECX, 0, 8, LEAF88_CORE_COUNT, NA, FALSE) \
FIELD( 88, 0, ECX, 12, 4, APICID_COREID_SIZE, NA, FALSE) \
FIELD( 8A, 0, EAX, 0, 8, SVM_REVISION, YES, FALSE) \
FLAG( 8A, 0, EAX, 8, 1, SVM_HYPERVISOR, NO, FALSE) \
FIELD( 8A, 0, EAX, 9, 23, SVMEAX_RSVD, NO, FALSE) \
FIELD( 8A, 0, EBX, 0, 32, SVM_NUM_ASIDS, YES, FALSE) \
FIELD( 8A, 0, ECX, 0, 32, SVMECX_RSVD, NO, FALSE) \
FLAG( 8A, 0, EDX, 0, 1, SVM_NPT, YES, FALSE) \
FLAG( 8A, 0, EDX, 1, 1, SVM_LBR, NO, FALSE) \
FLAG( 8A, 0, EDX, 2, 1, SVM_LOCK, ANY, FALSE) \
FLAG( 8A, 0, EDX, 3, 1, SVM_NRIP, YES, FALSE) \
FLAG( 8A, 0, EDX, 4, 1, SVM_TSC_RATE_MSR, NO, FALSE) \
FLAG( 8A, 0, EDX, 5, 1, SVM_VMCB_CLEAN, YES, FALSE) \
FLAG( 8A, 0, EDX, 6, 1, SVM_FLUSH_BY_ASID, YES, FALSE) \
FLAG( 8A, 0, EDX, 7, 1, SVM_DECODE_ASSISTS, YES, FALSE) \
FIELD( 8A, 0, EDX, 8, 2, SVMEDX_RSVD0, NO, FALSE) \
FLAG( 8A, 0, EDX, 10, 1, SVM_PAUSE_FILTER, NO, FALSE) \
CPUID_8A_EDX_11 \
FLAG( 8A, 0, EDX, 12, 1, SVM_PAUSE_THRESHOLD, NO, FALSE) \
CPUID_8A_EDX_13_31
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_81x \
FIELD(819, 0, EAX, 0, 12, L1_ITLB_ENTRIES_1G_PGS, NA, FALSE) \
FIELD(819, 0, EAX, 12, 4, L1_ITLB_ASSOC_1G_PGS, NA, FALSE) \
FIELD(819, 0, EAX, 16, 12, L1_DTLB_ENTRIES_1G_PGS, NA, FALSE) \
FIELD(819, 0, EAX, 28, 4, L1_DTLB_ASSOC_1G_PGS, NA, FALSE) \
FIELD(819, 0, EBX, 0, 12, L2_ITLB_ENTRIES_1G_PGS, NA, FALSE) \
FIELD(819, 0, EBX, 12, 4, L2_ITLB_ASSOC_1G_PGS, NA, FALSE) \
FIELD(819, 0, EBX, 16, 12, L2_DTLB_ENTRIES_1G_PGS, NA, FALSE) \
FIELD(819, 0, EBX, 28, 4, L2_DTLB_ASSOC_1G_PGS, NA, FALSE) \
FLAG( 81A, 0, EAX, 0, 1, FP128, NA, FALSE) \
FLAG( 81A, 0, EAX, 1, 1, MOVU, NA, FALSE) \
FLAG( 81B, 0, EAX, 0, 1, IBS_FFV, NA, FALSE) \
FLAG( 81B, 0, EAX, 1, 1, IBS_FETCHSAM, NA, FALSE) \
FLAG( 81B, 0, EAX, 2, 1, IBS_OPSAM, NA, FALSE) \
FLAG( 81B, 0, EAX, 3, 1, RW_OPCOUNT, NA, FALSE) \
FLAG( 81B, 0, EAX, 4, 1, OPCOUNT, NA, FALSE) \
FLAG( 81B, 0, EAX, 5, 1, BRANCH_TARGET_ADDR, NA, FALSE) \
FLAG( 81B, 0, EAX, 6, 1, OPCOUNT_EXT, NA, FALSE) \
FLAG( 81B, 0, EAX, 7, 1, RIP_INVALID_CHECK, NA, FALSE) \
FLAG( 81C, 0, EAX, 0, 1, LWP_AVAIL, NA, FALSE) \
FLAG( 81C, 0, EAX, 1, 1, LWP_VAL_AVAIL, NA, FALSE) \
FLAG( 81C, 0, EAX, 2, 1, LWP_IRE_AVAIL, NA, FALSE) \
FLAG( 81C, 0, EAX, 3, 1, LWP_BRE_AVAIL, NA, FALSE) \
FLAG( 81C, 0, EAX, 4, 1, LWP_DME_AVAIL, NA, FALSE) \
FLAG( 81C, 0, EAX, 5, 1, LWP_CNH_AVAIL, NA, FALSE) \
FLAG( 81C, 0, EAX, 6, 1, LWP_RNH_AVAIL, NA, FALSE) \
FLAG( 81C, 0, EAX, 31, 1, LWP_INT_AVAIL, NA, FALSE) \
FIELD(81C, 0, EBX, 0, 8, LWP_CB_SIZE, NA, FALSE) \
FIELD(81C, 0, EBX, 8, 8, LWP_EVENT_SIZE, NA, FALSE) \
FIELD(81C, 0, EBX, 16, 8, LWP_MAX_EVENTS, NA, FALSE) \
FIELD(81C, 0, EBX, 24, 8, LWP_EVENT_OFFSET, NA, FALSE) \
FIELD(81C, 0, ECX, 0, 4, LWP_LATENCY_MAX, NA, FALSE) \
FLAG( 81C, 0, ECX, 5, 1, LWP_DATA_ADDR_VALID, NA, FALSE) \
FIELD(81C, 0, ECX, 6, 3, LWP_LATENCY_ROUND, NA, FALSE) \
FIELD(81C, 0, ECX, 9, 7, LWP_VERSION, NA, FALSE) \
FIELD(81C, 0, ECX, 16, 8, LWP_MIN_BUF_SIZE, NA, FALSE) \
FLAG( 81C, 0, ECX, 28, 1, LWP_BRANCH_PRED, NA, FALSE) \
FLAG( 81C, 0, ECX, 29, 1, LWP_IP_FILTERING, NA, FALSE) \
FLAG( 81C, 0, ECX, 30, 1, LWP_CACHE_LEVEL, NA, FALSE) \
FLAG( 81C, 0, ECX, 31, 1, LWP_CACHE_LATENCY, NA, FALSE) \
FLAG( 81C, 0, EDX, 0, 1, LWP_SUPPORTED, NA, FALSE) \
FLAG( 81C, 0, EDX, 1, 1, LWP_VAL_SUPPORTED, NA, FALSE) \
FLAG( 81C, 0, EDX, 2, 1, LWP_IRE_SUPPORTED, NA, FALSE) \
FLAG( 81C, 0, EDX, 3, 1, LWP_BRE_SUPPORTED, NA, FALSE) \
FLAG( 81C, 0, EDX, 4, 1, LWP_DME_SUPPORTED, NA, FALSE) \
FLAG( 81C, 0, EDX, 5, 1, LWP_CNH_SUPPORTED, NA, FALSE) \
FLAG( 81C, 0, EDX, 6, 1, LWP_RNH_SUPPORTED, NA, FALSE) \
FLAG( 81C, 0, EDX, 31, 1, LWP_INT_SUPPORTED, NA, FALSE) \
FIELD(81D, 0, EAX, 0, 5, LEAF81D_CACHE_TYPE, NA, FALSE) \
FIELD(81D, 0, EAX, 5, 3, LEAF81D_CACHE_LEVEL, NA, FALSE) \
FLAG( 81D, 0, EAX, 8, 1, LEAF81D_CACHE_SELF_INIT, NA, FALSE) \
FLAG( 81D, 0, EAX, 9, 1, LEAF81D_CACHE_FULLY_ASSOC, NA, FALSE) \
FIELD(81D, 0, EAX, 14, 12, LEAF81D_NUM_SHARING_CACHE, NA, FALSE) \
FIELD(81D, 0, EBX, 0, 12, LEAF81D_CACHE_LINE_SIZE, NA, FALSE) \
FIELD(81D, 0, EBX, 12, 10, LEAF81D_CACHE_PHYS_PARTITIONS, NA, FALSE) \
FIELD(81D, 0, EBX, 22, 10, LEAF81D_CACHE_WAYS, NA, FALSE) \
FIELD(81D, 0, ECX, 0, 32, LEAF81D_CACHE_NUM_SETS, NA, FALSE) \
FLAG( 81D, 0, EDX, 0, 1, LEAF81D_CACHE_WBINVD, NA, FALSE) \
FLAG( 81D, 0, EDX, 1, 1, LEAF81D_CACHE_INCLUSIVE, NA, FALSE) \
FIELD(81E, 0, EAX, 0, 32, EXTENDED_APICID, NA, FALSE) \
FIELD(81E, 0, EBX, 0, 8, COMPUTE_UNIT_ID, NA, FALSE) \
FIELD(81E, 0, EBX, 8, 2, CORES_PER_COMPUTE_UNIT, NA, FALSE) \
FIELD(81E, 0, ECX, 0, 8, NODEID_VAL, NA, FALSE) \
FIELD(81E, 0, ECX, 8, 3, NODES_PER_PKG, NA, FALSE)
#define INTEL_CPUID_FIELD_DATA
#define AMD_CPUID_FIELD_DATA
#define CPUID_FIELD_DATA \
CPUID_FIELD_DATA_LEVEL_0 \
CPUID_FIELD_DATA_LEVEL_1 \
CPUID_FIELD_DATA_LEVEL_4 \
CPUID_FIELD_DATA_LEVEL_5 \
CPUID_FIELD_DATA_LEVEL_6 \
CPUID_FIELD_DATA_LEVEL_7 \
CPUID_FIELD_DATA_LEVEL_A \
CPUID_FIELD_DATA_LEVEL_B \
CPUID_FIELD_DATA_LEVEL_D \
CPUID_FIELD_DATA_LEVEL_400 \
CPUID_FIELD_DATA_LEVEL_410 \
CPUID_FIELD_DATA_LEVEL_80 \
CPUID_FIELD_DATA_LEVEL_81 \
CPUID_FIELD_DATA_LEVEL_8x \
CPUID_FIELD_DATA_LEVEL_81x \
INTEL_CPUID_FIELD_DATA \
AMD_CPUID_FIELD_DATA
/*
* Define all field and flag values as an enum. The result is a full
* set of values taken from the table above in the form:
*
* CPUID_FEATURE_<vendor>_ID<level><reg>_<name> == mask for feature
* CPUID_<vendor>_ID<level><reg>_<name>_MASK == mask for field
* CPUID_<vendor>_ID<level><reg>_<name>_SHIFT == offset of field
*
* e.g. - CPUID_FEATURE_COMMON_ID1EDX_FPU = 0x1
* - CPUID_COMMON_ID88EAX_VIRT_BITS_MASK = 0xff00
* - CPUID_COMMON_ID88EAX_VIRT_BITS_SHIFT = 8
*
* Note: The FEATURE/MASK definitions must use some gymnastics to get
* around a warning when shifting left by 32.
*/
#define VMW_BIT_MASK(shift) (((1 << (shift - 1)) << 1) - 1)
#define FIELD(lvl, ecxIn, reg, bitpos, size, name, s, c3) \
CPUID_ID##lvl##reg##_##name##_SHIFT = bitpos, \
CPUID_ID##lvl##reg##_##name##_MASK = VMW_BIT_MASK(size) << bitpos, \
CPUID_FEATURE_ID##lvl##reg##_##name = CPUID_ID##lvl##reg##_##name##_MASK, \
CPUID_INTERNAL_SHIFT_##name = bitpos, \
CPUID_INTERNAL_MASK_##name = VMW_BIT_MASK(size) << bitpos, \
CPUID_INTERNAL_REG_##name = CPUID_REG_##reg, \
CPUID_INTERNAL_EAXIN_##name = CPUID_LEVEL_VAL_##lvl, \
CPUID_INTERNAL_ECXIN_##name = ecxIn,
#define FLAG FIELD
enum {
/* Define data for every CPUID field we have */
CPUID_FIELD_DATA
};
#undef VMW_BIT_MASK
#undef FIELD
#undef FLAG
/* Level D subleaf 1 eax XSAVEOPT */
#define CPUID_COMMON_IDDsub1EAX_XSAVEOPT 1
/*
* Legal CPUID config file mask characters. For a description of the
* cpuid masking system, please see:
*
* http://vmweb.vmware.com/~mts/cgi-bin/view.cgi/Apps/CpuMigrationChecks
*/
#define CPUID_MASK_HIDE_CHR '0'
#define CPUID_MASK_HIDE_STR "0"
#define CPUID_MASK_FORCE_CHR '1'
#define CPUID_MASK_FORCE_STR "1"
#define CPUID_MASK_PASS_CHR '-'
#define CPUID_MASK_PASS_STR "-"
#define CPUID_MASK_TRUE_CHR 'T'
#define CPUID_MASK_TRUE_STR "T"
#define CPUID_MASK_FALSE_CHR 'F'
#define CPUID_MASK_FALSE_STR "F"
#define CPUID_MASK_IGNORE_CHR 'X'
#define CPUID_MASK_IGNORE_STR "X"
#define CPUID_MASK_HOST_CHR 'H'
#define CPUID_MASK_HOST_STR "H"
#define CPUID_MASK_RSVD_CHR 'R'
#define CPUID_MASK_RSVD_STR "R"
#define CPUID_MASK_INSTALL_CHR 'I'
#define CPUID_MASK_INSTALL_STR "I"
/*
* When LM is disabled, we overlay the following masks onto the
* guest's default masks. Any level that is not defined below should
* be treated as all "-"s
*/
#define CPT_ID1ECX_LM_DISABLED "----:----:----:----:--0-:----:----:----"
#define CPT_ID81EDX_LM_DISABLED "--0-:----:----:----:----:----:----:----"
#define CPT_ID81ECX_LM_DISABLED "----:----:----:----:----:----:----:---0"
#define CPT_GET_LM_DISABLED_MASK(lvl, reg) \
((lvl == 1 && reg == CPUID_REG_ECX) ? CPT_ID1ECX_LM_DISABLED : \
(lvl == 0x80000001 && reg == CPUID_REG_ECX) ? CPT_ID81ECX_LM_DISABLED : \
(lvl == 0x80000001 && reg == CPUID_REG_EDX) ? CPT_ID81EDX_LM_DISABLED : \
NULL)
/*
* CPUID_MASK --
* CPUID_SHIFT --
* CPUID_ISSET --
* CPUID_GET --
* CPUID_SET --
* CPUID_CLEAR --
* CPUID_SETTO --
*
* Accessor macros for all CPUID consts/fields/flags. Level and reg are not
* required, but are used to force compile-time asserts which help verify that
* the flag is being used on the right CPUID input and result register.
*
* Note: ASSERT_ON_COMPILE is duplicated rather than factored into its own
* macro, because token concatenation does not work as expected if an input is
* #defined (e.g. APIC) when macros are nested. Also, compound statements
* within parenthes is a GCC extension, so we must use runtime asserts with
* other compilers.
*/
#if defined(__GNUC__) && !defined(__clang__)
#define CPUID_MASK(eaxIn, reg, flag) \
({ \
ASSERT_ON_COMPILE(eaxIn == CPUID_INTERNAL_EAXIN_##flag && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##flag); \
CPUID_INTERNAL_MASK_##flag; \
})
#define CPUID_SHIFT(eaxIn, reg, flag) \
({ \
ASSERT_ON_COMPILE(eaxIn == CPUID_INTERNAL_EAXIN_##flag && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##flag); \
CPUID_INTERNAL_SHIFT_##flag; \
})
#define CPUID_ISSET(eaxIn, reg, flag, data) \
({ \
ASSERT_ON_COMPILE(eaxIn == CPUID_INTERNAL_EAXIN_##flag && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##flag); \
(((data) & CPUID_INTERNAL_MASK_##flag) != 0); \
})
#define CPUID_GET(eaxIn, reg, field, data) \
({ \
ASSERT_ON_COMPILE(eaxIn == CPUID_INTERNAL_EAXIN_##field && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##field); \
(((uint32)(data) & CPUID_INTERNAL_MASK_##field) >> \
CPUID_INTERNAL_SHIFT_##field); \
})
#else
/*
* CPUIDCheck --
*
* Return val after verifying parameters.
*/
static INLINE uint32
CPUIDCheck(uint32 eaxIn, uint32 eaxInCheck,
CpuidReg reg, CpuidReg regCheck, uint32 val)
{
ASSERT(eaxIn == eaxInCheck && reg == regCheck);
return val;
}
#define CPUID_MASK(eaxIn, reg, flag) \
CPUIDCheck((uint32)eaxIn, CPUID_INTERNAL_EAXIN_##flag, \
CPUID_REG_##reg, (CpuidReg)CPUID_INTERNAL_REG_##flag, \
CPUID_INTERNAL_MASK_##flag)
#define CPUID_SHIFT(eaxIn, reg, flag) \
CPUIDCheck((uint32)eaxIn, CPUID_INTERNAL_EAXIN_##flag, \
CPUID_REG_##reg, (CpuidReg)CPUID_INTERNAL_REG_##flag, \
CPUID_INTERNAL_SHIFT_##flag)
#define CPUID_ISSET(eaxIn, reg, flag, data) \
(CPUIDCheck((uint32)eaxIn, CPUID_INTERNAL_EAXIN_##flag, \
CPUID_REG_##reg, (CpuidReg)CPUID_INTERNAL_REG_##flag, \
CPUID_INTERNAL_MASK_##flag & (data)) != 0)
#define CPUID_GET(eaxIn, reg, field, data) \
CPUIDCheck((uint32)eaxIn, CPUID_INTERNAL_EAXIN_##field, \
CPUID_REG_##reg, (CpuidReg)CPUID_INTERNAL_REG_##field, \
((uint32)(data) & CPUID_INTERNAL_MASK_##field) >> \
CPUID_INTERNAL_SHIFT_##field)
#endif
#define CPUID_SET(eaxIn, reg, flag, dataPtr) \
do { \
ASSERT_ON_COMPILE( \
(uint32)eaxIn == (uint32)CPUID_INTERNAL_EAXIN_##flag && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##flag); \
*(dataPtr) |= CPUID_INTERNAL_MASK_##flag; \
} while (0)
#define CPUID_CLEAR(eaxIn, reg, flag, dataPtr) \
do { \
ASSERT_ON_COMPILE( \
(uint32)eaxIn == (uint32)CPUID_INTERNAL_EAXIN_##flag && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##flag); \
*(dataPtr) &= ~CPUID_INTERNAL_MASK_##flag; \
} while (0)
#define CPUID_SETTO(eaxIn, reg, field, dataPtr, val) \
do { \
uint32 _v = val; \
uint32 *_d = dataPtr; \
ASSERT_ON_COMPILE( \
(uint32)eaxIn == (uint32)CPUID_INTERNAL_EAXIN_##field && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##field); \
*_d = (*_d & ~CPUID_INTERNAL_MASK_##field) | \
(_v << CPUID_INTERNAL_SHIFT_##field); \
ASSERT(_v == (*_d & CPUID_INTERNAL_MASK_##field) >> \
CPUID_INTERNAL_SHIFT_##field); \
} while (0)
#define CPUID_SETTO_SAFE(eaxIn, reg, field, dataPtr, val) \
do { \
uint32 _v = val & \
(CPUID_INTERNAL_MASK_##field >> CPUID_INTERNAL_SHIFT_##field); \
uint32 *_d = dataPtr; \
ASSERT_ON_COMPILE( \
(uint32)eaxIn == (uint32)CPUID_INTERNAL_EAXIN_##field && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##field); \
*_d = (*_d & ~CPUID_INTERNAL_MASK_##field) | \
(_v << CPUID_INTERNAL_SHIFT_##field); \
} while (0)
/*
* Definitions of various fields' values and more complicated
* macros/functions for reading cpuid fields.
*/
#define CPUID_FAMILY_EXTENDED 15
/* Effective Intel CPU Families */
#define CPUID_FAMILY_486 4
#define CPUID_FAMILY_P5 5
#define CPUID_FAMILY_P6 6
#define CPUID_FAMILY_P4 15
/* Effective AMD CPU Families */
#define CPUID_FAMILY_5x86 4
#define CPUID_FAMILY_K5 5
#define CPUID_FAMILY_K6 5
#define CPUID_FAMILY_K7 6
#define CPUID_FAMILY_K8 15
#define CPUID_FAMILY_K8L 16
#define CPUID_FAMILY_K8MOBILE 17
#define CPUID_FAMILY_LLANO 18
#define CPUID_FAMILY_BOBCAT 20
#define CPUID_FAMILY_BULLDOZER 21
/* Effective VIA CPU Families */
#define CPUID_FAMILY_C7 6
/* Intel model information */
#define CPUID_MODEL_PPRO 1
#define CPUID_MODEL_PII_03 3
#define CPUID_MODEL_PII_05 5
#define CPUID_MODEL_CELERON_06 6
#define CPUID_MODEL_PM_09 9
#define CPUID_MODEL_PM_0D 13
#define CPUID_MODEL_PM_0E 14 // Yonah / Sossaman
#define CPUID_MODEL_CORE_0F 15 // Conroe / Merom
#define CPUID_MODEL_CORE_17 0x17 // Penryn
#define CPUID_MODEL_NEHALEM_1A 0x1a // Nehalem / Gainestown
#define CPUID_MODEL_ATOM_1C 0x1c // Silverthorne / Diamondville
#define CPUID_MODEL_CORE_1D 0x1d // Dunnington
#define CPUID_MODEL_NEHALEM_1E 0x1e // Lynnfield
#define CPUID_MODEL_NEHALEM_1F 0x1f // Havendale
#define CPUID_MODEL_NEHALEM_25 0x25 // Westmere / Clarkdale
#define CPUID_MODEL_SANDYBRIDGE_2A 0x2a // Sandybridge (desktop/mobile)
#define CPUID_MODEL_SANDYBRIDGE_2D 0x2d // Sandybridge-EP
#define CPUID_MODEL_NEHALEM_2C 0x2c // Westmere-EP
#define CPUID_MODEL_NEHALEM_2E 0x2e // Nehalem-EX
#define CPUID_MODEL_NEHALEM_2F 0x2f // Westmere-EX
#define CPUID_MODEL_SANDYBRIDGE_3A 0x3a // Ivy Bridge
#define CPUID_MODEL_HASWELL_3C 0x3c // Haswell DT
#define CPUID_MODEL_HASWELL_45 0x45 // Haswell Ultrathin
#define CPUID_MODEL_PIII_07 7
#define CPUID_MODEL_PIII_08 8
#define CPUID_MODEL_PIII_0A 10
/* AMD model information */
#define CPUID_MODEL_BARCELONA_02 0x02 // Barcelona (Opteron & Phenom)
#define CPUID_MODEL_SHANGHAI_04 0x04 // Shanghai RB
#define CPUID_MODEL_SHANGHAI_05 0x05 // Shanghai BL
#define CPUID_MODEL_SHANGHAI_06 0x06 // Shanghai DA
#define CPUID_MODEL_ISTANBUL_MAGNY_08 0x08 // Istanbul (6 core) & Magny-cours (12) HY
#define CPUID_MODEL_ISTANBUL_MAGNY_09 0x09 // HY - G34 package
#define CPUID_MODEL_PHAROAH_HOUND_0A 0x0A // Pharoah Hound
#define CPUID_MODEL_OPTERON_REVF_41 0x41 // family == CPUID_FAMILY_K8
/* VIA model information */
#define CPUID_MODEL_NANO 15 // Isaiah
/*
*----------------------------------------------------------------------
*
* CPUID_IsVendor{AMD,Intel,VIA} --
*
* Determines if the vendor string in cpuid id0 is from {AMD,Intel,VIA}.
*
* Results:
* True iff vendor string is CPUID_{AMD,INTEL,VIA}_VENDOR_STRING
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static INLINE Bool
CPUID_IsRawVendor(CPUIDRegs *id0, const char* vendor)
{
// hard to get strcmp() in some environments, so do it in the raw
return (id0->ebx == *(const uint32 *) (vendor + 0) &&
id0->ecx == *(const uint32 *) (vendor + 4) &&
id0->edx == *(const uint32 *) (vendor + 8));
}
static INLINE Bool
CPUID_IsVendorAMD(CPUIDRegs *id0)
{
return CPUID_IsRawVendor(id0, CPUID_AMD_VENDOR_STRING);
}
static INLINE Bool
CPUID_IsVendorIntel(CPUIDRegs *id0)
{
return CPUID_IsRawVendor(id0, CPUID_INTEL_VENDOR_STRING);
}
static INLINE Bool
CPUID_IsVendorVIA(CPUIDRegs *id0)
{
return CPUID_IsRawVendor(id0, CPUID_VIA_VENDOR_STRING);
}
static INLINE uint32
CPUID_EFFECTIVE_FAMILY(uint32 v) /* %eax from CPUID with %eax=1. */
{
uint32 f = CPUID_GET(1, EAX, FAMILY, v);
return f != CPUID_FAMILY_EXTENDED ? f : f +
CPUID_GET(1, EAX, EXTENDED_FAMILY, v);
}
/* Normally only used when FAMILY==CPUID_FAMILY_EXTENDED, but Intel is
* now using the extended model field for FAMILY==CPUID_FAMILY_P6 to
* refer to the newer Core2 CPUs
*/
static INLINE uint32
CPUID_EFFECTIVE_MODEL(uint32 v) /* %eax from CPUID with %eax=1. */
{
uint32 m = CPUID_GET(1, EAX, MODEL, v);
uint32 em = CPUID_GET(1, EAX, EXTENDED_MODEL, v);
return m + (em << 4);
}
/*
* Notice that CPUID families for Intel and AMD overlap. The following macros
* should only be used AFTER the manufacturer has been established (through
* the use of CPUID standard function 0).
*/
static INLINE Bool
CPUID_FAMILY_IS_486(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_486;
}
static INLINE Bool
CPUID_FAMILY_IS_P5(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_P5;
}
static INLINE Bool
CPUID_FAMILY_IS_P6(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_P6;
}
static INLINE Bool
CPUID_FAMILY_IS_PENTIUM4(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_P4;
}
/*
* Intel Pentium M processors are Yonah/Sossaman or an older P-M
*/
static INLINE Bool
CPUID_UARCH_IS_PENTIUM_M(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
return CPUID_FAMILY_IS_P6(v) &&
(CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_PM_09 ||
CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_PM_0D ||
CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_PM_0E);
}
/*
* Intel Core processors are Merom, Conroe, Woodcrest, Clovertown,
* Penryn, Dunnington, Kentsfield, Yorktown, Harpertown, ........
*/
static INLINE Bool
CPUID_UARCH_IS_CORE(uint32 v) // IN: %eax from CPUID with %eax=1.
{
uint32 model = CPUID_EFFECTIVE_MODEL(v);
/* Assumes the CPU manufacturer is Intel. */
return CPUID_FAMILY_IS_P6(v) &&
model >= CPUID_MODEL_CORE_0F &&
(model < CPUID_MODEL_NEHALEM_1A ||
model == CPUID_MODEL_CORE_1D);
}
/*
* Intel Nehalem processors are: Nehalem, Gainestown, Lynnfield, Clarkdale.
*/
static INLINE Bool
CPUID_UARCH_IS_NEHALEM(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v);
return CPUID_FAMILY_IS_P6(v) &&
(effectiveModel == CPUID_MODEL_NEHALEM_1A ||
effectiveModel == CPUID_MODEL_NEHALEM_1E ||
effectiveModel == CPUID_MODEL_NEHALEM_1F ||
effectiveModel == CPUID_MODEL_NEHALEM_25 ||
effectiveModel == CPUID_MODEL_NEHALEM_2C ||
effectiveModel == CPUID_MODEL_NEHALEM_2E ||
effectiveModel == CPUID_MODEL_NEHALEM_2F);
}
static INLINE Bool
CPUID_UARCH_IS_SANDYBRIDGE(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v);
return CPUID_FAMILY_IS_P6(v) &&
(effectiveModel == CPUID_MODEL_SANDYBRIDGE_2A ||
effectiveModel == CPUID_MODEL_SANDYBRIDGE_2D ||
effectiveModel == CPUID_MODEL_SANDYBRIDGE_3A);
}
static INLINE Bool
CPUID_MODEL_IS_CENTERTON(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
return CPUID_FAMILY_IS_P6(v) &&
CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_ATOM_1C;
}
static INLINE Bool
CPUID_MODEL_IS_WESTMERE(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v);
return CPUID_FAMILY_IS_P6(v) &&
(effectiveModel == CPUID_MODEL_NEHALEM_25 || // Clarkdale
effectiveModel == CPUID_MODEL_NEHALEM_2C || // Westmere-EP
effectiveModel == CPUID_MODEL_NEHALEM_2F); // Westmere-EX
}
static INLINE Bool
CPUID_MODEL_IS_SANDYBRIDGE(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v);
return CPUID_FAMILY_IS_P6(v) &&
(effectiveModel == CPUID_MODEL_SANDYBRIDGE_2A ||
effectiveModel == CPUID_MODEL_SANDYBRIDGE_2D);
}
static INLINE Bool
CPUID_MODEL_IS_IVYBRIDGE(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v);
return CPUID_FAMILY_IS_P6(v) && (
effectiveModel == CPUID_MODEL_SANDYBRIDGE_3A);
}
static INLINE Bool
CPUID_FAMILY_IS_K7(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_K7;
}
static INLINE Bool
CPUID_FAMILY_IS_K8(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_K8;
}
static INLINE Bool
CPUID_FAMILY_IS_K8EXT(uint32 eax)
{
/*
* We check for this pattern often enough that it's
* worth a separate function, for syntactic sugar.
*/
return CPUID_FAMILY_IS_K8(eax) &&
CPUID_GET(1, EAX, EXTENDED_MODEL, eax) != 0;
}
static INLINE Bool
CPUID_FAMILY_IS_K8L(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_K8L ||
CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_LLANO;
}
static INLINE Bool
CPUID_FAMILY_IS_LLANO(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_LLANO;
}
static INLINE Bool
CPUID_FAMILY_IS_K8MOBILE(uint32 eax)
{
/* Essentially a K8 (not K8L) part, but with mobile features. */
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_K8MOBILE;
}
static INLINE Bool
CPUID_FAMILY_IS_K8STAR(uint32 eax)
{
/*
* Read function name as "K8*", as in wildcard.
* Matches K8 or K8L or K8MOBILE
*/
return CPUID_FAMILY_IS_K8(eax) || CPUID_FAMILY_IS_K8L(eax) ||
CPUID_FAMILY_IS_K8MOBILE(eax);
}
static INLINE Bool
CPUID_FAMILY_IS_BOBCAT(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_BOBCAT;
}
static INLINE Bool
CPUID_FAMILY_IS_BULLDOZER(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_BULLDOZER;
}
/*
* AMD Barcelona (of either Opteron or Phenom kind).
*/
static INLINE Bool
CPUID_MODEL_IS_BARCELONA(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is AMD. */
return CPUID_EFFECTIVE_FAMILY(v) == CPUID_FAMILY_K8L &&
CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_BARCELONA_02;
}
static INLINE Bool
CPUID_MODEL_IS_SHANGHAI(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is AMD. */
return CPUID_EFFECTIVE_FAMILY(v) == CPUID_FAMILY_K8L &&
(CPUID_MODEL_SHANGHAI_04 <= CPUID_EFFECTIVE_MODEL(v) &&
CPUID_EFFECTIVE_MODEL(v) <= CPUID_MODEL_SHANGHAI_06);
}
static INLINE Bool
CPUID_MODEL_IS_ISTANBUL_MAGNY(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is AMD. */
return CPUID_EFFECTIVE_FAMILY(v) == CPUID_FAMILY_K8L &&
(CPUID_MODEL_ISTANBUL_MAGNY_08 <= CPUID_EFFECTIVE_MODEL(v) &&
CPUID_EFFECTIVE_MODEL(v) <= CPUID_MODEL_ISTANBUL_MAGNY_09);
}
static INLINE Bool
CPUID_MODEL_IS_PHAROAH_HOUND(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is AMD. */
return CPUID_EFFECTIVE_FAMILY(v) == CPUID_FAMILY_K8L &&
CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_PHAROAH_HOUND_0A;
}
static INLINE Bool
CPUID_MODEL_IS_BULLDOZER(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_BULLDOZER;
}
#define CPUID_TYPE_PRIMARY 0
#define CPUID_TYPE_OVERDRIVE 1
#define CPUID_TYPE_SECONDARY 2
#define CPUID_INTEL_ID4EAX_LEAF4_CACHE_TYPE_NULL 0
#define CPUID_INTEL_ID4EAX_LEAF4_CACHE_TYPE_DATA 1
#define CPUID_INTEL_ID4EAX_LEAF4_CACHE_TYPE_INST 2
#define CPUID_INTEL_ID4EAX_LEAF4_CACHE_TYPE_UNIF 3
#define CPUID_LEAF4_CACHE_TYPE_NULL 0
#define CPUID_LEAF4_CACHE_TYPE_DATA 1
#define CPUID_LEAF4_CACHE_TYPE_INST 2
#define CPUID_LEAF4_CACHE_TYPE_UNIF 3
#define CPUID_LEAF4_CACHE_INDEXING_DIRECT 0
#define CPUID_LEAF4_CACHE_INDEXING_COMPLEX 1
#define CPUID_INTEL_ID4EAX_LEAF4_CACHE_SELF_INIT 0x00000100
#define CPUID_INTEL_ID4EAX_LEAF4_CACHE_FULLY_ASSOC 0x00000200
#define CPUID_LEAF4_CACHE_SELF_INIT 0x00000100
#define CPUID_LEAF4_CACHE_FULLY_ASSOC 0x00000200
#define CPUID_INTEL_IDBECX_LEVEL_TYPE_INVALID 0
#define CPUID_INTEL_IDBECX_LEVEL_TYPE_SMT 1
#define CPUID_INTEL_IDBECX_LEVEL_TYPE_CORE 2
#define CPUID_TOPOLOGY_LEVEL_TYPE_INVALID 0
#define CPUID_TOPOLOGY_LEVEL_TYPE_SMT 1
#define CPUID_TOPOLOGY_LEVEL_TYPE_CORE 2
/*
* For certain AMD processors, an lfence instruction is necessary at various
* places to ensure ordering.
*/
static INLINE Bool
CPUID_VendorRequiresFence(CpuidVendor vendor)
{
return vendor == CPUID_VENDOR_AMD;
}
static INLINE Bool
CPUID_VersionRequiresFence(uint32 version)
{
return CPUID_EFFECTIVE_FAMILY(version) == CPUID_FAMILY_K8 &&
CPUID_EFFECTIVE_MODEL(version) < 0x40;
}
static INLINE Bool
CPUID_ID0RequiresFence(CPUIDRegs *id0)
{
if (id0->eax == 0) {
return FALSE;
}
return CPUID_IsVendorAMD(id0);
}
static INLINE Bool
CPUID_ID1RequiresFence(CPUIDRegs *id1)
{
return CPUID_VersionRequiresFence(id1->eax);
}
static INLINE Bool
CPUID_RequiresFence(CpuidVendor vendor, // IN
uint32 version) // IN: %eax from CPUID with %eax=1.
{
return CPUID_VendorRequiresFence(vendor) &&
CPUID_VersionRequiresFence(version);
}
/*
* The following low-level functions compute the number of
* cores per cpu. They should be used cautiously because
* they do not necessarily work on all types of CPUs.
* High-level functions that are correct for all CPUs are
* available elsewhere: see lib/cpuidInfo/cpuidInfo.c.
*/
static INLINE uint32
CPUID_IntelCoresPerPackage(uint32 v) /* %eax from CPUID with %eax=4 and %ecx=0. */
{
// Note: This is not guaranteed to work on older Intel CPUs.
return 1 + CPUID_GET(4, EAX, LEAF4_CORE_COUNT, v);
}
static INLINE uint32
CPUID_AMDCoresPerPackage(uint32 v) /* %ecx from CPUID with %eax=0x80000008. */
{
// Note: This is not guaranteed to work on older AMD CPUs.
return 1 + CPUID_GET(0x80000008, ECX, LEAF88_CORE_COUNT, v);
}
/*
* Hypervisor CPUID space is 0x400000XX.
*/
static INLINE Bool
CPUID_IsHypervisorLevel(uint32 level)
{
return (level & 0xffffff00) == 0x40000000;
}
/*
*----------------------------------------------------------------------
*
* CPUID_LevelUsesEcx --
*
* Returns TRUE for leaves that support input ECX != 0 (subleaves).
*
*----------------------------------------------------------------------
*/
static INLINE Bool
CPUID_LevelUsesEcx(uint32 level) {
return level == 4 || level == 7 || level == 0xb || level == 0xd ||
level == 0x8000001d;
}
/*
*----------------------------------------------------------------------
*
* CPUID_IsValid*Subleaf --
*
* Functions to determine the last subleaf for the level specified
*
*----------------------------------------------------------------------
*/
static INLINE Bool
CPUID_IsValidBSubleaf(uint32 ebx) // IN: %ebx = cpuid.b.sublevel.ebx
{
return ebx != 0;
}
static INLINE Bool
CPUID_IsValid4Subleaf(uint32 eax) // IN: %eax = cpuid.4.sublevel.eax
{
return eax != 0;
}
static INLINE Bool
CPUID_IsValid7Subleaf(uint32 eax, uint32 subleaf) // IN: %eax = cpuid.7.0.eax
{
/*
* cpuid.7.0.eax is the max ecx (subleaf) index
*/
return subleaf <= eax;
}
/*
*----------------------------------------------------------------------
*
* CPUID_IsValidDSubleaf --
*
* It is the caller's repsonsibility to determine if the processor
* supports XSAVE and therefore has D sub-leaves.
*
*----------------------------------------------------------------------
*/
static INLINE Bool
CPUID_IsValidDSubleaf(uint32 subleaf) // IN: subleaf to check
{
return subleaf <= 63;
}
/*
*----------------------------------------------------------------------
*
* CPUID_SupportsMsrPlatformInfo --
*
* Uses vendor and cpuid.1.0.eax to determine if the processor
* supports MSR_PLATFORM_INFO.
*
*----------------------------------------------------------------------
*/
static INLINE Bool
CPUID_SupportsMsrPlatformInfo(CpuidVendor vendor, uint32 version)
{
return vendor == CPUID_VENDOR_INTEL &&
(CPUID_UARCH_IS_NEHALEM(version) ||
CPUID_UARCH_IS_SANDYBRIDGE(version));
}
#endif
vmmemctl-only/shared/vm_basic_types.h 0000444 0000000 0000000 00000063115 12275350062 017013 0 ustar root root /*********************************************************
* Copyright (C) 1998-2009 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
*
* vm_basic_types.h --
*
* basic data types.
*/
#ifndef _VM_BASIC_TYPES_H_
#define _VM_BASIC_TYPES_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_VMKDRIVERS
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMIROM
#include "includeCheck.h"
/* STRICT ANSI means the Xserver build and X defines Bool differently. */
#if !defined(_XTYPEDEF_BOOL) && \
(!defined(__STRICT_ANSI__) || defined(__FreeBSD__) || defined(__MINGW32__))
#define _XTYPEDEF_BOOL
typedef char Bool;
#endif
#ifndef FALSE
#define FALSE 0
#endif
#ifndef TRUE
#define TRUE 1
#endif
#define IsBool(x) (((x) & ~1) == 0)
#define IsBool2(x, y) ((((x) | (y)) & ~1) == 0)
/*
* Macros __i386__ and __ia64 are intrinsically defined by GCC
*/
#if defined _MSC_VER && defined _M_X64
# define __x86_64__
#elif defined _MSC_VER && defined _M_IX86
# define __i386__
#endif
#ifdef __i386__
#define VM_I386
#endif
#ifdef __x86_64__
#define VM_X86_64
#define VM_I386
#define vm_x86_64 (1)
#else
#define vm_x86_64 (0)
#endif
#ifdef _MSC_VER
#pragma warning (3 :4505) // unreferenced local function
#pragma warning (disable :4018) // signed/unsigned mismatch
#pragma warning (disable :4761) // integral size mismatch in argument; conversion supplied
#pragma warning (disable :4305) // truncation from 'const int' to 'short'
#pragma warning (disable :4244) // conversion from 'unsigned short' to 'unsigned char'
#pragma warning (disable :4267) // truncation of 'size_t'
#pragma warning (disable :4146) // unary minus operator applied to unsigned type, result still unsigned
#pragma warning (disable :4142) // benign redefinition of type
#endif
#if defined(__APPLE__) || defined(HAVE_STDINT_H)
/*
* TODO: This is a C99 standard header. We should be able to test for
* #if __STDC_VERSION__ >= 199901L, but that breaks the Netware build
* (which doesn't have stdint.h).
*/
#include <stdint.h>
typedef uint64_t uint64;
typedef int64_t int64;
typedef uint32_t uint32;
typedef int32_t int32;
typedef uint16_t uint16;
typedef int16_t int16;
typedef uint8_t uint8;
typedef int8_t int8;
/*
* Note: C does not specify whether char is signed or unsigned, and
* both gcc and msvc implement processor-specific signedness. With
* three types:
* typeof(char) != typeof(signed char) != typeof(unsigned char)
*
* Be careful here, because gcc (4.0.1 and others) likes to warn about
* conversions between signed char * and char *.
*/
#else /* !HAVE_STDINT_H */
#ifdef _MSC_VER
typedef unsigned __int64 uint64;
typedef signed __int64 int64;
#elif __GNUC__
/* The Xserver source compiles with -ansi -pendantic */
# if !defined(__STRICT_ANSI__) || defined(__FreeBSD__)
# if defined(VM_X86_64)
typedef unsigned long uint64;
typedef long int64;
# else
typedef unsigned long long uint64;
typedef long long int64;
# endif
# endif
#else
# error - Need compiler define for int64/uint64
#endif /* _MSC_VER */
typedef unsigned int uint32;
typedef unsigned short uint16;
typedef unsigned char uint8;
typedef int int32;
typedef short int16;
typedef signed char int8;
#endif /* HAVE_STDINT_H */
/*
* FreeBSD (for the tools build) unconditionally defines these in
* sys/inttypes.h so don't redefine them if this file has already
* been included. [greg]
*
* This applies to Solaris as well.
*/
/*
* Before trying to do the includes based on OS defines, see if we can use
* feature-based defines to get as much functionality as possible
*/
#ifdef HAVE_INTTYPES_H
#include <inttypes.h>
#endif
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_SYS_INTTYPES_H
#include <sys/inttypes.h>
#endif
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
#ifdef __FreeBSD__
#include <sys/param.h> /* For __FreeBSD_version */
#endif
#if !defined(USING_AUTOCONF)
# if defined(__FreeBSD__) || defined(sun)
# ifdef KLD_MODULE
# include <sys/types.h>
# else
# if __FreeBSD_version >= 500043
# if !defined(VMKERNEL)
# include <inttypes.h>
# endif
# include <sys/types.h>
# else
# include <sys/inttypes.h>
# endif
# endif
# elif defined __APPLE__
# if KERNEL
# include <sys/unistd.h>
# include <sys/types.h> /* mostly for size_t */
# include <stdint.h>
# else
# include <unistd.h>
# include <inttypes.h>
# include <stdlib.h>
# include <stdint.h>
# endif
# elif defined __ANDROID__
# include <stdint.h>
# else
# if !defined(__intptr_t_defined) && !defined(intptr_t)
# ifdef VM_I386
# define __intptr_t_defined
# ifdef VM_X86_64
typedef int64 intptr_t;
# else
typedef int32 intptr_t;
# endif
# elif defined(__arm__)
typedef int32 intptr_t;
# endif
# endif
# ifndef _STDINT_H
# ifdef VM_I386
# ifdef VM_X86_64
typedef uint64 uintptr_t;
# else
typedef uint32 uintptr_t;
# endif
# elif defined(__arm__)
typedef uint32 uintptr_t;
# endif
# endif
# endif
#endif
/*
* Time
* XXX These should be cleaned up. -- edward
*/
typedef int64 VmTimeType; /* Time in microseconds */
typedef int64 VmTimeRealClock; /* Real clock kept in microseconds */
typedef int64 VmTimeVirtualClock; /* Virtual Clock kept in CPU cycles */
/*
* Printf format specifiers for size_t and 64-bit number.
* Use them like this:
* printf("%"FMT64"d\n", big);
*
* FMTH is for handles/fds.
*/
#ifdef _MSC_VER
#define FMT64 "I64"
#ifdef VM_X86_64
#define FMTSZ "I64"
#define FMTPD "I64"
#define FMTH "I64"
#else
#define FMTSZ "I"
#define FMTPD "I"
#define FMTH "I"
#endif
#elif defined __APPLE__
/* Mac OS hosts use the same formatters for 32- and 64-bit. */
#define FMT64 "ll"
#if KERNEL
#define FMTSZ "l"
#else
#define FMTSZ "z"
#endif
#define FMTPD "l"
#define FMTH ""
#elif __GNUC__
#define FMTH ""
#if defined(N_PLAT_NLM) || defined(sun) || \
(defined(__FreeBSD__) && (__FreeBSD__ + 0) && ((__FreeBSD__ + 0) < 5))
/*
* Why (__FreeBSD__ + 0)? See bug 141008.
* Yes, we really need to test both (__FreeBSD__ + 0) and
* ((__FreeBSD__ + 0) < 5). No, we can't remove "+ 0" from
* ((__FreeBSD__ + 0) < 5).
*/
#ifdef VM_X86_64
#define FMTSZ "l"
#define FMTPD "l"
#else
#define FMTSZ ""
#define FMTPD ""
#endif
#elif defined(__linux__) \
|| (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 200112L) \
|| (defined(_POSIX_VERSION) && _POSIX_VERSION >= 200112L) \
|| (defined(_POSIX2_VERSION) && _POSIX2_VERSION >= 200112L)
/* BSD, Linux */
#define FMTSZ "z"
#if defined(VM_X86_64)
#define FMTPD "l"
#else
#define FMTPD ""
#endif
#else
/* Systems with a pre-C99 libc */
#define FMTSZ "Z"
#ifdef VM_X86_64
#define FMTPD "l"
#else
#define FMTPD ""
#endif
#endif
#ifdef VM_X86_64
#define FMT64 "l"
#elif defined(sun) || defined(__FreeBSD__)
#define FMT64 "ll"
#else
#define FMT64 "L"
#endif
#else
#error - Need compiler define for FMT64 and FMTSZ
#endif
/*
* Suffix for 64-bit constants. Use it like this:
* CONST64(0x7fffffffffffffff) for signed or
* CONST64U(0x7fffffffffffffff) for unsigned.
*
* 2004.08.30(thutt):
* The vmcore/asm64/gen* programs are compiled as 32-bit
* applications, but must handle 64 bit constants. If the
* 64-bit-constant defining macros are already defined, the
* definition will not be overwritten.
*/
#if !defined(CONST64) || !defined(CONST64U)
#ifdef _MSC_VER
#define CONST64(c) c##I64
#define CONST64U(c) c##uI64
#elif defined __APPLE__
#define CONST64(c) c##LL
#define CONST64U(c) c##uLL
#elif __GNUC__
#ifdef VM_X86_64
#define CONST64(c) c##L
#define CONST64U(c) c##uL
#else
#define CONST64(c) c##LL
#define CONST64U(c) c##uLL
#endif
#else
#error - Need compiler define for CONST64
#endif
#endif
/*
* Use CONST3264/CONST3264U if you want a constant to be
* treated as a 32-bit number on 32-bit compiles and
* a 64-bit number on 64-bit compiles. Useful in the case
* of shifts, like (CONST3264U(1) << x), where x could be
* more than 31 on a 64-bit compile.
*/
#ifdef VM_X86_64
#define CONST3264(a) CONST64(a)
#define CONST3264U(a) CONST64U(a)
#else
#define CONST3264(a) (a)
#define CONST3264U(a) (a)
#endif
#define MIN_INT8 ((int8)0x80)
#define MAX_INT8 ((int8)0x7f)
#define MIN_UINT8 ((uint8)0)
#define MAX_UINT8 ((uint8)0xff)
#define MIN_INT16 ((int16)0x8000)
#define MAX_INT16 ((int16)0x7fff)
#define MIN_UINT16 ((uint16)0)
#define MAX_UINT16 ((uint16)0xffff)
#define MIN_INT32 ((int32)0x80000000)
#define MAX_INT32 ((int32)0x7fffffff)
#define MIN_UINT32 ((uint32)0)
#define MAX_UINT32 ((uint32)0xffffffff)
#define MIN_INT64 (CONST64(0x8000000000000000))
#define MAX_INT64 (CONST64(0x7fffffffffffffff))
#define MIN_UINT64 (CONST64U(0))
#define MAX_UINT64 (CONST64U(0xffffffffffffffff))
typedef uint8 *TCA; /* Pointer into TC (usually). */
/*
* Type big enough to hold an integer between 0..100
*/
typedef uint8 Percent;
#define AsPercent(v) ((Percent)(v))
typedef uintptr_t VA;
typedef uintptr_t VPN;
typedef uint64 PA;
typedef uint32 PPN;
typedef uint64 PhysMemOff;
typedef uint64 PhysMemSize;
/* The Xserver source compiles with -ansi -pendantic */
#ifndef __STRICT_ANSI__
typedef uint64 BA;
#endif
typedef uint32 BPN;
typedef uint32 PageNum;
typedef unsigned MemHandle;
typedef unsigned int IoHandle;
typedef int32 World_ID;
/* !! do not alter the definition of INVALID_WORLD_ID without ensuring
* that the values defined in both bora/public/vm_basic_types.h and
* lib/vprobe/vm_basic_types.h are the same. Additionally, the definition
* of VMK_INVALID_WORLD_ID in vmkapi_world.h also must be defined with
* the same value
*/
#define INVALID_WORLD_ID ((World_ID)0)
typedef World_ID User_CartelID;
#define INVALID_CARTEL_ID INVALID_WORLD_ID
typedef User_CartelID User_SessionID;
#define INVALID_SESSION_ID INVALID_CARTEL_ID
typedef User_CartelID User_CartelGroupID;
#define INVALID_CARTELGROUP_ID INVALID_CARTEL_ID
typedef uint32 Worldlet_ID;
#define INVALID_WORLDLET_ID ((Worldlet_ID)-1)
/* The Xserver source compiles with -ansi -pendantic */
#ifndef __STRICT_ANSI__
typedef uint64 MA;
typedef uint32 MPN;
#endif
/*
* This type should be used for variables that contain sector
* position/quantity.
*/
typedef uint64 SectorType;
/*
* Linear address
*/
typedef uintptr_t LA;
typedef uintptr_t LPN;
#define LA_2_LPN(_la) ((_la) >> PAGE_SHIFT)
#define LPN_2_LA(_lpn) ((_lpn) << PAGE_SHIFT)
#define LAST_LPN ((((LA) 1) << (8 * sizeof(LA) - PAGE_SHIFT)) - 1)
#define LAST_LPN32 ((((LA32)1) << (8 * sizeof(LA32) - PAGE_SHIFT)) - 1)
#define LAST_LPN64 ((((LA64)1) << (8 * sizeof(LA64) - PAGE_SHIFT)) - 1)
/* Valid bits in a LPN. */
#define LPN_MASK LAST_LPN
#define LPN_MASK32 LAST_LPN32
#define LPN_MASK64 LAST_LPN64
/*
* On 64 bit platform, address and page number types default
* to 64 bit. When we need to represent a 32 bit address, we use
* types defined below.
*
* On 32 bit platform, the following types are the same as the
* default types.
*/
typedef uint32 VA32;
typedef uint32 VPN32;
typedef uint32 LA32;
typedef uint32 LPN32;
typedef uint32 PA32;
typedef uint32 PPN32;
typedef uint32 MA32;
typedef uint32 MPN32;
/*
* On 64 bit platform, the following types are the same as the
* default types.
*/
typedef uint64 VA64;
typedef uint64 VPN64;
typedef uint64 LA64;
typedef uint64 LPN64;
typedef uint64 PA64;
typedef uint64 PPN64;
typedef uint64 MA64;
typedef uint64 MPN64;
/*
* VA typedefs for user world apps.
*/
typedef VA32 UserVA32;
typedef VA64 UserVA64;
typedef UserVA64 UserVAConst; /* Userspace ptr to data that we may only read. */
typedef UserVA32 UserVA32Const; /* Userspace ptr to data that we may only read. */
typedef UserVA64 UserVA64Const; /* Used by 64-bit syscalls until conversion is finished. */
#ifdef VMKERNEL
typedef UserVA64 UserVA;
#else
typedef void * UserVA;
#endif
/*
* Maximal possible PPN value (errors too) that PhysMem can handle.
* Must be at least as large as MAX_PPN which is the maximum PPN
* for any region other than buserror.
*/
#define PHYSMEM_MAX_PPN ((PPN)0xffffffff)
#define MAX_PPN ((PPN)0x3fffffff) /* Maximal observable PPN value. */
#define INVALID_PPN ((PPN)0xffffffff)
#define APIC_INVALID_PPN ((PPN)0xfffffffe)
#define INVALID_BPN ((BPN)0x3fffffff)
#define RESERVED_MPN ((MPN) 0)
#define INVALID_MPN ((MPN)-1)
#define MEMREF_MPN ((MPN)-2)
#define RELEASED_MPN ((MPN)-3)
/* 0xfffffffc to account for special MPNs defined above. */
#define MAX_MPN ((MPN)0xfffffffc) /* 44 bits of address space. */
#define INVALID_LPN ((LPN)-1)
#define INVALID_VPN ((VPN)-1)
#define INVALID_LPN64 ((LPN64)-1)
#define INVALID_PAGENUM ((PageNum)-1)
#define INVALID_MPN64 ((MPN64)-1)
/*
* Format modifier for printing VA, LA, and VPN.
* Use them like this: Log("%#"FMTLA"x\n", laddr)
*/
#if defined(VMM) || defined(FROBOS64) || vm_x86_64 || defined __APPLE__
# define FMTLA "l"
# define FMTVA "l"
# define FMTVPN "l"
#else
# define FMTLA ""
# define FMTVA ""
# define FMTVPN ""
#endif
#ifndef EXTERN
#define EXTERN extern
#endif
#define CONST const
#ifndef INLINE
# ifdef _MSC_VER
# define INLINE __inline
# else
# define INLINE inline
# endif
#endif
/*
* Annotation for data that may be exported into a DLL and used by other
* apps that load that DLL and import the data.
*/
#if defined(_WIN32) && defined(VMX86_IMPORT_DLLDATA)
# define VMX86_EXTERN_DATA extern __declspec(dllimport)
#else // !_WIN32
# define VMX86_EXTERN_DATA extern
#endif
#if defined(_WIN32) && !defined(VMX86_NO_THREADS)
#define THREADSPECIFIC __declspec(thread)
#else
#define THREADSPECIFIC
#endif
/*
* Due to the wonderful "registry redirection" feature introduced in
* 64-bit Windows, if you access any key under HKLM\Software in 64-bit
* code, you need to open/create/delete that key with
* VMKEY_WOW64_32KEY if you want a consistent view with 32-bit code.
*/
#ifdef _WIN32
#ifdef _WIN64
#define VMW_KEY_WOW64_32KEY KEY_WOW64_32KEY
#else
#define VMW_KEY_WOW64_32KEY 0x0
#endif
#endif
/*
* At present, we effectively require a compiler that is at least
* gcc-3.3 (circa 2003). Enforce this here, various things below
* this line depend upon it.
*
* In practice, most things presently compile with gcc-4.1 or gcc-4.4.
* The various linux kernel modules may use older (gcc-3.3) compilers.
*/
#if defined __GNUC__ && (__GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3))
#error "gcc version is to old to compile assembly, need gcc-3.3 or better"
#endif
/*
* Consider the following reasons functions are inlined:
*
* 1) inlined for performance reasons
* 2) inlined because it's a single-use function
*
* Functions which meet only condition 2 should be marked with this
* inline macro; It is not critical to be inlined (but there is a
* code-space & runtime savings by doing so), so when other callers
* are added the inline-ness should be removed.
*/
#if defined __GNUC__
/*
* Starting at version 3.3, gcc does not always inline functions marked
* 'inline' (it depends on their size and other factors). To force gcc
* to inline a function, one must use the __always_inline__ attribute.
* This attribute should be used sparingly and with care. It is usually
* preferable to let gcc make its own inlining decisions
*/
# define INLINE_ALWAYS INLINE __attribute__((__always_inline__))
#else
# define INLINE_ALWAYS INLINE
#endif
#define INLINE_SINGLE_CALLER INLINE_ALWAYS
/*
* Used when a hard guaranteed of no inlining is needed. Very few
* instances need this since the absence of INLINE is a good hint
* that gcc will not do inlining.
*/
#if defined(__GNUC__) && (defined(VMM) || defined (VMKERNEL) || defined (VMKBOOT))
#define ABSOLUTELY_NOINLINE __attribute__((__noinline__))
#endif
/*
* Used when a function has no effects except the return value and the
* return value depends only on the parameters and/or global variables
* Such a function can be subject to common subexpression elimination
* and loop optimization just as an arithmetic operator would be.
*/
#if defined(__GNUC__) && (defined(VMM) || defined (VMKERNEL))
#define SIDE_EFFECT_FREE __attribute__((__pure__))
#else
#define SIDE_EFFECT_FREE
#endif
/*
* Attributes placed on function declarations to tell the compiler
* that the function never returns.
*/
#ifdef _MSC_VER
#define NORETURN __declspec(noreturn)
#elif defined __GNUC__
#define NORETURN __attribute__((__noreturn__))
#else
#define NORETURN
#endif
/*
* Static profiling hints for functions.
* A function can be either hot, cold, or neither.
* It is an error to specify both hot and cold for the same function.
* Note that there is no annotation for "neither."
*/
#if defined __GNUC__ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define HOT __attribute__((hot))
#define COLD __attribute__((cold))
#else
#define HOT
#define COLD
#endif
/*
* Branch prediction hints:
* LIKELY(exp) - Expression exp is likely TRUE.
* UNLIKELY(exp) - Expression exp is likely FALSE.
* Usage example:
* if (LIKELY(excCode == EXC_NONE)) {
* or
* if (UNLIKELY(REAL_MODE(vc))) {
*
* We know how to predict branches on gcc3 and later (hopefully),
* all others we don't so we do nothing.
*/
#if defined __GNUC__
/*
* gcc3 uses __builtin_expect() to inform the compiler of an expected value.
* We use this to inform the static branch predictor. The '!!' in LIKELY
* will convert any !=0 to a 1.
*/
#define LIKELY(_exp) __builtin_expect(!!(_exp), 1)
#define UNLIKELY(_exp) __builtin_expect((_exp), 0)
#else
#define LIKELY(_exp) (_exp)
#define UNLIKELY(_exp) (_exp)
#endif
/*
* GCC's argument checking for printf-like functions
* This is conditional until we have replaced all `"%x", void *'
* with `"0x%08x", (uint32) void *'. Note that %p prints different things
* on different platforms. Argument checking is enabled for the
* vmkernel, which has already been cleansed.
*
* fmtPos is the position of the format string argument, beginning at 1
* varPos is the position of the variable argument, beginning at 1
*/
#if defined(__GNUC__)
# define PRINTF_DECL(fmtPos, varPos) __attribute__((__format__(__printf__, fmtPos, varPos)))
#else
# define PRINTF_DECL(fmtPos, varPos)
#endif
#if defined(__GNUC__)
# define SCANF_DECL(fmtPos, varPos) __attribute__((__format__(__scanf__, fmtPos, varPos)))
#else
# define SCANF_DECL(fmtPos, varPos)
#endif
/*
* UNUSED_PARAM should surround the parameter name and type declaration,
* e.g. "int MyFunction(int var1, UNUSED_PARAM(int var2))"
*
*/
#ifndef UNUSED_PARAM
# if defined(__GNUC__)
# define UNUSED_PARAM(_parm) _parm __attribute__((__unused__))
# else
# define UNUSED_PARAM(_parm) _parm
# endif
#endif
#ifndef UNUSED_TYPE
// XXX _Pragma would better but doesn't always work right now.
# define UNUSED_TYPE(_parm) UNUSED_PARAM(_parm)
#endif
#ifndef UNUSED_VARIABLE
// XXX is there a better way?
# define UNUSED_VARIABLE(_var) (void)_var
#endif
/*
* gcc can warn us if we're ignoring returns
*/
#if defined(__GNUC__)
# define MUST_CHECK_RETURN __attribute__((warn_unused_result))
#else
# define MUST_CHECK_RETURN
#endif
/*
* ALIGNED specifies minimum alignment in "n" bytes.
*/
#ifdef __GNUC__
#define ALIGNED(n) __attribute__((__aligned__(n)))
#else
#define ALIGNED(n)
#endif
/*
* Once upon a time, this was used to silence compiler warnings that
* get generated when the compiler thinks that a function returns
* when it is marked noreturn. Don't do it. Use NOT_REACHED().
*/
#define INFINITE_LOOP() do { } while (1)
/*
* On FreeBSD (for the tools build), size_t is typedef'd if _BSD_SIZE_T_
* is defined. Use the same logic here so we don't define it twice. [greg]
*/
#ifdef __FreeBSD__
# ifdef _BSD_SIZE_T_
# undef _BSD_SIZE_T_
# ifdef VM_I386
# ifdef VM_X86_64
typedef uint64 size_t;
# else
typedef uint32 size_t;
# endif
# endif /* VM_I386 */
# endif
# ifdef _BSD_SSIZE_T_
# undef _BSD_SSIZE_T_
# ifdef VM_I386
# ifdef VM_X86_64
typedef int64 ssize_t;
# else
typedef int32 ssize_t;
# endif
# endif /* VM_I386 */
# endif
#else
# if !defined(_SIZE_T) && !defined(_SIZE_T_DEFINED)
# ifdef VM_I386
# define _SIZE_T
# ifdef VM_X86_64
typedef uint64 size_t;
# else
typedef uint32 size_t;
# endif
# elif defined(__arm__)
# define _SIZE_T
typedef uint32 size_t;
# endif
# endif
# if !defined(FROBOS) && !defined(_SSIZE_T) && !defined(_SSIZE_T_) && \
!defined(ssize_t) && !defined(__ssize_t_defined) && \
!defined(_SSIZE_T_DECLARED) && !defined(_SSIZE_T_DEFINED) && \
!defined(_SSIZE_T_DEFINED_)
# ifdef VM_I386
# define _SSIZE_T
# define __ssize_t_defined
# define _SSIZE_T_DECLARED
# ifdef VM_X86_64
typedef int64 ssize_t;
# else
typedef int32 ssize_t;
# endif
# elif defined(__arm__)
# define _SSIZE_T
# define __ssize_t_defined
# define _SSIZE_T_DECLARED
typedef int32 ssize_t;
# endif
# endif
#endif
/*
* Format modifier for printing pid_t. On sun the pid_t is a ulong, but on
* Linux it's an int.
* Use this like this: printf("The pid is %"FMTPID".\n", pid);
*/
#ifdef sun
# ifdef VM_X86_64
# define FMTPID "d"
# else
# define FMTPID "lu"
# endif
#else
# define FMTPID "d"
#endif
/*
* Format modifier for printing uid_t. On Solaris 10 and earlier, uid_t
* is a ulong, but on other platforms it's an unsigned int.
* Use this like this: printf("The uid is %"FMTUID".\n", uid);
*/
#if defined(sun) && !defined(SOL11)
# ifdef VM_X86_64
# define FMTUID "u"
# else
# define FMTUID "lu"
# endif
#else
# define FMTUID "u"
#endif
/*
* Format modifier for printing mode_t. On sun the mode_t is a ulong, but on
* Linux it's an int.
* Use this like this: printf("The mode is %"FMTMODE".\n", mode);
*/
#ifdef sun
# ifdef VM_X86_64
# define FMTMODE "o"
# else
# define FMTMODE "lo"
# endif
#else
# define FMTMODE "o"
#endif
/*
* Format modifier for printing time_t. Most platforms define a time_t to be
* a long int, but on FreeBSD (as of 5.0, it seems), the time_t is a signed
* size quantity. Refer to the definition of FMTSZ to see why we need silly
* preprocessor arithmetic.
* Use this like this: printf("The mode is %"FMTTIME".\n", time);
*/
#if defined(__FreeBSD__) && (__FreeBSD__ + 0) && ((__FreeBSD__ + 0) >= 5)
# define FMTTIME FMTSZ"d"
#else
# if defined(_MSC_VER)
# ifndef _SAFETIME_H_
# if (_MSC_VER < 1400) || defined(_USE_32BIT_TIME_T)
# define FMTTIME "ld"
# else
# define FMTTIME FMT64"d"
# endif
# else
# ifndef FMTTIME
# error "safetime.h did not define FMTTIME"
# endif
# endif
# else
# define FMTTIME "ld"
# endif
#endif
#ifdef __APPLE__
/*
* Format specifier for all these annoying types such as {S,U}Int32
* which are 'long' in 32-bit builds
* and 'int' in 64-bit builds.
*/
# ifdef __LP64__
# define FMTLI ""
# else
# define FMTLI "l"
# endif
/*
* Format specifier for all these annoying types such as NS[U]Integer
* which are 'int' in 32-bit builds
* and 'long' in 64-bit builds.
*/
# ifdef __LP64__
# define FMTIL "l"
# else
# define FMTIL ""
# endif
#endif
/*
* Define MXSemaHandle here so both vmmon and vmx see this definition.
*/
#ifdef _WIN32
typedef uintptr_t MXSemaHandle;
#else
typedef int MXSemaHandle;
#endif
/*
* Define type for poll device handles.
*/
typedef int64 PollDevHandle;
/*
* Define the utf16_t type.
*/
#if defined(_WIN32) && defined(_NATIVE_WCHAR_T_DEFINED)
typedef wchar_t utf16_t;
#else
typedef uint16 utf16_t;
#endif
/*
* Define for point and rectangle types. Defined here so they
* can be used by other externally facing headers in bora/public.
*/
typedef struct VMPoint {
int x, y;
} VMPoint;
#if defined _WIN32 && defined USERLEVEL
struct tagRECT;
typedef struct tagRECT VMRect;
#else
typedef struct VMRect {
int left;
int top;
int right;
int bottom;
} VMRect;
#endif
/*
* ranked locks "everywhere"
*/
typedef uint32 MX_Rank;
#endif /* _VM_BASIC_TYPES_H_ */
vmmemctl-only/shared/compat_fs.h 0000444 0000000 0000000 00000024277 12275350061 015764 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_FS_H__
# define __COMPAT_FS_H__
#include <linux/fs.h>
/*
* 2.6.5+ kernels define FS_BINARY_MOUNTDATA. Since it didn't exist and
* wasn't used prior, it's safe to define it to zero.
*/
#ifndef FS_BINARY_MOUNTDATA
#define FS_BINARY_MOUNTDATA 0
#endif
/*
* MAX_LFS_FILESIZE wasn't defined until 2.5.4.
*/
#ifndef MAX_LFS_FILESIZE
# include <linux/pagemap.h>
# if BITS_PER_LONG == 32
# define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG - 1)) - 1)
# elif BITS_PER_LONG == 64
# define MAX_LFS_FILESIZE 0x7fffffffffffffffUL
# endif
#endif
/*
* sendfile as a VFS op was born in 2.5.30. Unfortunately, it also changed
* signatures, first in 2.5.47, then again in 2.5.70, then again in 2.6.8.
* Luckily, the 2.6.8+ signature is the same as the 2.5.47 signature. And
* as of 2.6.23-rc1 sendfile is gone, replaced by splice_read...
*
* Let's not support sendfile from 2.5.30 to 2.5.47, because the 2.5.30
* signature is much different and file_send_actor isn't externed.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
#define VMW_SENDFILE_NONE
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8)
#define VMW_SENDFILE_NEW
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 70)
#define VMW_SENDFILE_OLD
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 47)
#define VMW_SENDFILE_NEW
#else
#define VMW_SENDFILE_NONE
#endif
/*
* splice_read is there since 2.6.17, but let's avoid 2.6.17-rcX kernels...
* After all nobody is using splice system call until 2.6.23 using it to
* implement sendfile.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
#define VMW_SPLICE_READ 1
#endif
/*
* Filesystems wishing to use generic page cache read/write routines are
* supposed to implement aio_read and aio_write (calling into
* generic_file_aio_read() and generic_file_aio_write() if necessary).
*
* The VFS exports do_sync_read() and do_sync_write() as the "new"
* generic_file_read() and generic_file_write(), but filesystems need not
* actually implement read and write- the VFS will automatically call
* do_sync_write() and do_sync_read() when applications invoke the standard
* read() and write() system calls.
*
* In 2.6.19, generic_file_read() and generic_file_write() were removed,
* necessitating this change. AIO dates as far back as 2.5.42, but the API has
* changed over time, so for simplicity, we'll only enable it from 2.6.19 and
* on.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
# define VMW_USE_AIO
#endif
/*
* The alloc_inode and destroy_inode VFS ops didn't exist prior to 2.4.21.
* Without these functions, file systems can't embed inodes.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 21)
# define VMW_EMBED_INODE
#endif
/*
* iget() was removed from the VFS as of 2.6.25-rc1. The replacement for iget()
* is iget_locked() which was added in 2.5.17.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 17)
# define VMW_USE_IGET_LOCKED
#endif
/*
* parent_ino was born in 2.5.5. For older kernels, let's use 2.5.5
* implementation. It uses the dcache lock which is OK because per-dentry
* locking appeared after 2.5.5.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 5)
#define compat_parent_ino(dentry) parent_ino(dentry)
#else
#define compat_parent_ino(dentry) \
({ \
ino_t res; \
spin_lock(&dcache_lock); \
res = dentry->d_parent->d_inode->i_ino; \
spin_unlock(&dcache_lock); \
res; \
})
#endif
/*
* putname changed to __putname in 2.6.6.
*/
#define compat___getname() __getname()
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 6)
#define compat___putname(name) putname(name)
#else
#define compat___putname(name) __putname(name)
#endif
/*
* inc_nlink, drop_nlink, and clear_nlink were added in 2.6.19.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
#define compat_inc_nlink(inode) ((inode)->i_nlink++)
#define compat_drop_nlink(inode) ((inode)->i_nlink--)
#define compat_clear_nlink(inode) ((inode)->i_nlink = 0)
#else
#define compat_inc_nlink(inode) inc_nlink(inode)
#define compat_drop_nlink(inode) drop_nlink(inode)
#define compat_clear_nlink(inode) clear_nlink(inode)
#endif
/*
* i_size_write and i_size_read were introduced in 2.6.0-test1
* (though we'll look for them as of 2.6.1). They employ slightly different
* locking in order to guarantee atomicity, depending on the length of a long,
* whether the kernel is SMP, or whether the kernel is preemptible. Prior to
* i_size_write and i_size_read, there was no such locking, so that's the
* behavior we'll emulate.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 1)
#define compat_i_size_read(inode) ((inode)->i_size)
#define compat_i_size_write(inode, size) ((inode)->i_size = size)
#else
#define compat_i_size_read(inode) i_size_read(inode)
#define compat_i_size_write(inode, size) i_size_write(inode, size)
#endif
/*
* filemap_fdatawrite was introduced in 2.5.12. Prior to that, modules used
* filemap_fdatasync instead. In 2.4.18, both filemap_fdatawrite and
* filemap_fdatawait began returning status codes. Prior to that, they were
* void functions, so we'll just have them return 0.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 18)
#define compat_filemap_fdatawrite(mapping) \
({ \
int result = 0; \
filemap_fdatasync(mapping); \
result; \
})
#define compat_filemap_fdatawait(mapping) \
({ \
int result = 0; \
filemap_fdatawait(mapping); \
result; \
})
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 12)
#define compat_filemap_fdatawrite(mapping) filemap_fdatasync(mapping)
#define compat_filemap_fdatawait(mapping) filemap_fdatawait(mapping)
#else
#define compat_filemap_fdatawrite(mapping) filemap_fdatawrite(mapping)
#define compat_filemap_fdatawait(mapping) filemap_fdatawait(mapping)
#endif
/*
* filemap_write_and_wait was introduced in 2.6.6 and exported for module use
* in 2.6.16. It's really just a simple wrapper around filemap_fdatawrite and
* and filemap_fdatawait, which initiates a flush of all dirty pages, then
* waits for the pages to flush. The implementation here is a simplified form
* of the one found in 2.6.20-rc3.
*
* Unfortunately, it just isn't possible to implement this prior to 2.4.5, when
* neither filemap_fdatawait nor filemap_fdatasync were exported for module
* use. So we'll define it out and hope for the best.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 5)
#define compat_filemap_write_and_wait(mapping)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16)
#define compat_filemap_write_and_wait(mapping) \
({ \
int result = 0; \
if (mapping->nrpages) { \
result = compat_filemap_fdatawrite(mapping); \
if (result != -EIO) { \
int result2 = compat_filemap_fdatawait(mapping); \
if (!result) { \
result = result2; \
} \
} \
} \
result; \
})
#else
#define compat_filemap_write_and_wait(mapping) filemap_write_and_wait(mapping)
#endif
/*
* invalidate_remote_inode was introduced in 2.6.0-test5. Prior to that,
* filesystems wishing to invalidate pages belonging to an inode called
* invalidate_inode_pages.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
#define compat_invalidate_remote_inode(inode) invalidate_inode_pages(inode)
#else
#define compat_invalidate_remote_inode(inode) invalidate_remote_inode(inode)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
#define VMW_FSYNC_OLD
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
typedef umode_t compat_umode_t;
#else
typedef int compat_umode_t;
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
#define d_make_root(inode) ({ \
struct dentry * ____res = d_alloc_root(inode); \
if (!____res) { \
iput(inode); \
} \
____res; \
})
#endif
#endif /* __COMPAT_FS_H__ */
vmmemctl-only/shared/compat_scsi.h 0000444 0000000 0000000 00000003024 12275350061 016300 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_SCSI_H__
# define __COMPAT_SCSI_H__
/* The scsi_bufflen() API appeared somewhere in time --hpreg */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)
# define scsi_bufflen(cmd) ((cmd)->request_bufflen)
# define scsi_sg_count(cmd) ((cmd)->use_sg)
# define scsi_sglist(cmd) ((struct scatterlist *)(cmd)->request_buffer)
# define scsi_set_resid(cmd, _resid) ((cmd)->resid = _resid)
#endif
/*
* Using scsi_sglist to access the request buffer looks strange
* so instead we define this macro. What happened is later kernel
* put all SCSI data in sglists, since it simplifies passing buffers
*/
#define scsi_request_buffer(cmd) scsi_sglist(cmd)
#endif /* __COMPAT_SCSI_H__ */
vmmemctl-only/shared/compat_string.h 0000444 0000000 0000000 00000003563 12275350061 016655 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_STRING_H__
# define __COMPAT_STRING_H__
#include <linux/string.h>
/*
* kstrdup was born in 2.6.13. This implementation is almost identical to the
* one found there.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13)
#define compat_kstrdup(s, gfp) kstrdup(s, gfp)
#else
#define compat_kstrdup(s, gfp) \
({ \
size_t len; \
char *buf; \
len = strlen(s) + 1; \
buf = kmalloc(len, gfp); \
memcpy(buf, s, len); \
buf; \
})
#endif
#endif /* __COMPAT_STRING_H__ */
vmmemctl-only/shared/guest_msg_def.h 0000444 0000000 0000000 00000005642 12275350062 016620 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* guest_msg_def.h --
*
* Second layer of the internal communication channel between guest
* applications and vmware
*
*/
#ifndef _GUEST_MSG_DEF_H_
#define _GUEST_MSG_DEF_H_
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_USERLEVEL
#include "includeCheck.h"
/* Basic request types */
typedef enum {
MESSAGE_TYPE_OPEN,
MESSAGE_TYPE_SENDSIZE,
MESSAGE_TYPE_SENDPAYLOAD,
MESSAGE_TYPE_RECVSIZE,
MESSAGE_TYPE_RECVPAYLOAD,
MESSAGE_TYPE_RECVSTATUS,
MESSAGE_TYPE_CLOSE,
} MessageType;
/* Reply statuses */
/* The basic request succeeded */
#define MESSAGE_STATUS_SUCCESS 0x0001
/* vmware has a message available for its party */
#define MESSAGE_STATUS_DORECV 0x0002
/* The channel has been closed */
#define MESSAGE_STATUS_CLOSED 0x0004
/* vmware removed the message before the party fetched it */
#define MESSAGE_STATUS_UNSENT 0x0008
/* A checkpoint occurred */
#define MESSAGE_STATUS_CPT 0x0010
/* An underlying device is powering off */
#define MESSAGE_STATUS_POWEROFF 0x0020
/* vmware has detected a timeout on the channel */
#define MESSAGE_STATUS_TIMEOUT 0x0040
/* vmware supports high-bandwidth for sending and receiving the payload */
#define MESSAGE_STATUS_HB 0x0080
/*
* This mask defines the status bits that the guest is allowed to set;
* we use this to mask out all other bits when receiving the status
* from the guest. Otherwise, the guest can manipulate VMX state by
* setting status bits that are only supposed to be changed by the
* VMX. See bug 45385.
*/
#define MESSAGE_STATUS_GUEST_MASK MESSAGE_STATUS_SUCCESS
/*
* Max number of channels.
* Unfortunately this has to be public because the monitor part
* of the backdoor needs it for its trivial-case optimization. [greg]
*/
#define GUESTMSG_MAX_CHANNEL 8
/* Flags to open a channel. --hpreg */
#define GUESTMSG_FLAG_COOKIE 0x80000000
#define GUESTMSG_FLAG_ALL GUESTMSG_FLAG_COOKIE
/*
* Maximum size of incoming message. This is to prevent denial of host service
* attacks from guest applications.
*/
#define GUESTMSG_MAX_IN_SIZE (64 * 1024)
#endif /* _GUEST_MSG_DEF_H_ */
vmmemctl-only/shared/compat_ethtool.h 0000444 0000000 0000000 00000003662 12275350061 017025 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef _COMPAT_ETHTOOL_H
#define _COMPAT_ETHTOOL_H
/*
* ethtool is a userspace utility for getting and setting ethernet device
* settings. Kernel support for it was first published in 2.4.0-test11, but
* only in 2.4.15 were the ethtool_value struct and the ETHTOOL_GLINK ioctl
* added to ethtool.h (together, because the ETHTOOL_GLINK ioctl expects a
* single value response).
*
* Likewise, ioctls for getting and setting TSO were published in 2.4.22.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0)
# include <linux/ethtool.h>
# ifndef ETHTOOL_GLINK
# define ETHTOOL_GLINK 0x0a
typedef struct {
__u32 cmd;
__u32 data;
} compat_ethtool_value;
# else
typedef struct ethtool_value compat_ethtool_value;
# endif
# ifndef ETHTOOL_GTSO
# define ETHTOOL_GTSO 0x1E
# define ETHTOOL_STSO 0x1F
# endif
#endif
#if COMPAT_LINUX_VERSION_CHECK_LT(3, 3, 0)
# define compat_ethtool_rxfh_indir_default(i, num_queues) (i % num_queues)
#else
# define compat_ethtool_rxfh_indir_default(i, num_queues) ethtool_rxfh_indir_default(i, num_queues)
#endif
#endif /* _COMPAT_ETHTOOL_H */
vmmemctl-only/shared/vm_basic_math.h 0000444 0000000 0000000 00000004176 12275350062 016602 0 ustar root root /*********************************************************
* Copyright (C) 2008 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vm_basic_math.h --
*
* Standard mathematical macros for VMware source code.
*/
#ifndef _VM_BASIC_MATH_H_
#define _VM_BASIC_MATH_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_VMKDRIVERS
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMIROM
#include "includeCheck.h"
#include "vm_basic_types.h" // For INLINE.
#include "vm_basic_asm.h" // For Div64...
static INLINE uint32
RatioOf(uint32 numer1, uint32 numer2, uint32 denom)
{
uint64 numer = (uint64)numer1 * numer2;
/* Calculate "(numer1 * numer2) / denom" avoiding round-off errors. */
#if defined(VMM) || !(defined(__i386__) || defined(__x86_64__))
return numer / denom;
#else
uint32 ratio;
uint32 unused;
Div643232(numer, denom, &ratio, &unused);
return ratio;
#endif
}
static INLINE uint32
ExponentialAvg(uint32 avg, uint32 value, uint32 gainNumer, uint32 gainDenom)
{
uint32 term1 = gainNumer * avg;
uint32 term2 = (gainDenom - gainNumer) * value;
return (term1 + term2) / gainDenom;
}
static INLINE Bool
IsPowerOfTwo(uint32 x)
{
/* Does not check for zero. Callers depend on this. */
return !(x & (x - 1));
}
#endif // ifndef _VM_BASIC_MATH_H_
vmmemctl-only/shared/circList.h 0000444 0000000 0000000 00000023650 12275350060 015556 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* circList.h --
*
* macros, prototypes and struct definitions for double-linked
* circular lists.
*/
#ifndef _CIRCLIST_H_
#define _CIRCLIST_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#include "includeCheck.h"
#include "vmware.h"
typedef struct ListItem {
struct ListItem *prev;
struct ListItem *next;
} ListItem;
/* A list with no elements is a null pointer. */
#define LIST_ITEM_DEF(name) \
ListItem * name = NULL
#define LIST_EMPTY(l) ((l) == NULL)
/* initialize list item */
#define INIT_LIST_ITEM(p) \
do { \
(p)->prev = (p)->next = (p); \
} while (0)
/* check if initialized */
#define IS_LIST_ITEM_INITIALIZED(li) \
(((li) == (li)->prev) && ((li) == (li)->next))
/* return first element in the list */
#define LIST_FIRST(l) (l)
#define LIST_FIRST_CHK(l) (l)
/* return last element in the list */
#define LIST_LAST(l) ((l)->prev)
#define LIST_LAST_CHK(l) (LIST_EMPTY(l) ? NULL : LIST_LAST(l))
/*
* LIST_CONTAINER - get the struct for this entry (like list_entry)
* @ptr: the &struct ListItem pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list struct within the struct.
*/
#define LIST_CONTAINER(ptr, type, member) \
VMW_CONTAINER_OF(ptr, type, member)
/*
* delete item from the list
*/
#define LIST_DEL DelListItem
/*
* link two lists together
*/
#define LIST_SPLICE SpliceLists
/*
* Split a list into two lists
*/
#define LIST_SPLIT SplitLists
/*
* Add item to front of stack. List pointer points to new head.
*/
#define LIST_PUSH PushListItem
/*
* Add item at back of queue. List pointer only changes if list was empty.
*/
#define LIST_QUEUE QueueListItem
/*
* Get the list size.
*/
#define LIST_SIZE GetListSize
/*
* LIST_SCAN_FROM scans the list from "from" up until "until".
* The loop variable p should not be destroyed in the process.
* "from" is an element in the list where to start scanning.
* "until" is the element where search should stop.
* member is the field to use for the search - either "next" or "prev".
*/
#define LIST_SCAN_FROM(p, from, until, member) \
for (p = (from); (p) != NULL; \
(p) = (((p)->member == (until)) ? NULL : (p)->member))
/* scan the entire list (non-destructively) */
#define LIST_SCAN(p, l) \
LIST_SCAN_FROM(p, LIST_FIRST(l), LIST_FIRST(l), next)
/* scan a list backward from last element to first (non-destructively) */
#define LIST_SCAN_BACK(p, l) \
LIST_SCAN_FROM(p, LIST_LAST_CHK(l), LIST_LAST(l), prev)
/* scan the entire list where loop element may be destroyed */
#define LIST_SCAN_SAFE(p, pn, l) \
if (!LIST_EMPTY(l)) \
for (p = (l), (pn) = NextListItem(p, l); (p) != NULL; \
(p) = (pn), (pn) = NextListItem(p, l))
/* scan the entire list backwards where loop element may be destroyed */
#define LIST_SCAN_BACK_SAFE(p, pn, l) \
if (!LIST_EMPTY(l)) \
for (p = LIST_LAST(l), (pn) = PrevListItem(p, l); (p) != NULL; \
(p) = (pn), (pn) = PrevListItem(p, l))
/* function definitions */
/*
*----------------------------------------------------------------------
*
* NextListItem --
*
* Returns the next member of a doubly linked list, or NULL if last.
* Assumes: p is member of the list headed by head.
*
* Result:
* If head or p is NULL, return NULL. Otherwise,
* next list member (or null if last).
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static INLINE ListItem *
NextListItem(ListItem *p, // IN
ListItem *head) // IN
{
if (head == NULL || p == NULL) {
return NULL;
}
/* both p and head are non-null */
p = p->next;
return p == head ? NULL : p;
}
/*
*----------------------------------------------------------------------
*
* PrevListItem --
*
* Returns the prev member of a doubly linked list, or NULL if first.
* Assumes: p is member of the list headed by head.
*
* Result:
* If head or prev is NULL, return NULL. Otherwise,
* prev list member (or null if first).
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static INLINE ListItem *
PrevListItem(ListItem *p, // IN
ListItem *head) // IN
{
if (head == NULL || p == NULL) {
return NULL;
}
/* both p and head are non-null */
return p == head ? NULL : p->prev;
}
/*
*----------------------------------------------------------------------
*
* DelListItem --
*
* Deletes a member of a doubly linked list, possibly modifies the
* list header itself.
* Assumes neither p nor headp is null and p is a member of *headp.
*
* Result:
* None
*
* Side effects:
* Modifies *headp.
*
*----------------------------------------------------------------------
*/
static INLINE void
DelListItem(ListItem *p, // IN
ListItem **headp) // IN/OUT
{
ListItem *next;
ASSERT(p);
ASSERT(headp);
next = p->next;
if (p == next) {
*headp = NULL;
} else {
next->prev = p->prev;
p->prev->next = next;
if (*headp == p) {
*headp = next;
}
}
}
/*
*----------------------------------------------------------------------
*
* QueueListItem --
*
* Adds a new member to the back of a doubly linked list (queue)
* Assumes neither p nor headp is null and p is not a member of *headp.
*
* Result:
* None
*
* Side effects:
* Modifies *headp.
*
*----------------------------------------------------------------------
*/
static INLINE void
QueueListItem(ListItem *p, // IN
ListItem **headp) // IN/OUT
{
ListItem *head;
head = *headp;
if (LIST_EMPTY(head)) {
INIT_LIST_ITEM(p);
*headp = p;
} else {
p->prev = head->prev;
p->next = head;
p->prev->next = p;
head->prev = p;
}
}
/*
*----------------------------------------------------------------------
*
* PushListItem --
*
* Adds a new member to the front of a doubly linked list (stack)
* Assumes neither p nor headp is null and p is not a member of *headp.
*
* Result:
* None
*
* Side effects:
* Modifies *headp.
*
*----------------------------------------------------------------------
*/
static INLINE void
PushListItem(ListItem *p, // IN
ListItem **headp) // IN/OUT
{
QueueListItem(p, headp);
*headp = p;
}
/*
*----------------------------------------------------------------------
*
* SpliceLists --
*
* Make a single list {l1 l2} from {l1} and {l2} and return it.
* It is okay for one or both lists to be NULL.
* No checking is done. It is assumed that l1 and l2 are two
* distinct lists.
*
* Result:
* A list { l1 l2 }.
*
* Side effects:
* Modifies l1 and l2 list pointers.
*
*----------------------------------------------------------------------
*/
static INLINE ListItem *
SpliceLists(ListItem *l1, // IN
ListItem *l2) // IN
{
ListItem *l1Last, *l2Last;
if (LIST_EMPTY(l1)) {
return l2;
}
if (LIST_EMPTY(l2)) {
return l1;
}
l1Last = l1->prev; /* last elem of l1 */
l2Last = l2->prev; /* last elem of l2 */
/*
* l1 -> ... -> l1Last l2 -> ... l2Last
*/
l1Last->next = l2;
l2->prev = l1Last;
l1->prev = l2Last;
l2Last->next = l1;
return l1;
}
/*
*----------------------------------------------------------------------
*
* SplitLists --
*
* Make a list l = {l1 l2} into two separate lists {l1} and {l2}, where:
* l = { ... x -> p -> ... } split into:
* l1 = { ... -> x }
* l2 = { p -> ... }
* Assumes neither p nor l is null and p is a member of l.
* If p is the first element of l, then l1 will be NULL.
*
* Result:
* None.
*
* Side effects:
* Sets *l1p and *l2p to the resulting two lists.
* Modifies l's pointers.
*
*----------------------------------------------------------------------
*/
static INLINE void
SplitLists(ListItem *p, // IN
ListItem *l, // IN
ListItem **l1p, // OUT
ListItem **l2p) // OUT
{
ListItem *last;
if (p == LIST_FIRST(l)) { /* first element */
*l1p = NULL;
*l2p = l;
return;
}
last = l->prev;
*l1p = l;
p->prev->next = l;
l->prev = p->prev;
*l2p = p;
p->prev = last;
last->next = p;
}
/*
*----------------------------------------------------------------------
*
* GetListSize --
*
* Return the number of items in the list.
*
* Result:
* The number of items in the list.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static INLINE int
GetListSize(ListItem *head) // IN
{
ListItem *li;
int ret = 0;
LIST_SCAN(li, head) {
ret++;
}
return ret;
}
#endif /* _CIRCLIST_H_ */
vmmemctl-only/shared/compat_timer.h 0000444 0000000 0000000 00000006551 12275350061 016467 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_TIMER_H__
# define __COMPAT_TIMER_H__
/*
* The del_timer_sync() API appeared in 2.3.43
* It became reliable in 2.4.0-test3
*
* --hpreg
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0)
# define compat_del_timer_sync(timer) del_timer_sync(timer)
#else
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43)
/* 2.3.43 removed asm/softirq.h's reference to bh_base. */
# include <linux/interrupt.h>
# endif
# include <asm/softirq.h>
static inline int
compat_del_timer_sync(struct timer_list *timer) // IN
{
int wasPending;
start_bh_atomic();
wasPending = del_timer(timer);
end_bh_atomic();
return wasPending;
}
#endif
/*
* The msleep_interruptible() API appeared in 2.6.9.
* It is based on the msleep() API, which appeared in 2.4.29.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9)
# include <linux/delay.h>
# define compat_msleep_interruptible(msecs) msleep_interruptible(msecs)
# define compat_msleep(msecs) msleep(msecs)
#else
# include <linux/sched.h>
/*
* msecs_to_jiffies appeared in 2.6.7. For earlier kernels,
* fall back to slow-case code (we don't use this operation
* enough to need the performance).
*/
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7)
# define msecs_to_jiffies(msecs) (((msecs) * HZ + 999) / 1000)
# endif
/*
* set_current_state appeared in 2.2.18.
*/
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)
# define set_current_state(a) do { current->state = (a); } while(0)
# endif
static inline void
compat_msleep_interruptible(unsigned long msecs) // IN
{
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(msecs) + 1);
}
static inline void
compat_msleep(unsigned long msecs) // IN
{
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(msecs) + 1);
}
#endif
/*
* There is init_timer_deferrable() since 2.6.22.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
# define compat_init_timer_deferrable(timer) init_timer_deferrable(timer)
#else
# define compat_init_timer_deferrable(timer) init_timer(timer)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
static inline void compat_setup_timer(struct timer_list * timer,
void (*function)(unsigned long),
unsigned long data)
{
timer->function = function;
timer->data = data;
init_timer(timer);
}
#else
# define compat_setup_timer(timer, function, data) \
setup_timer(timer, function, data)
#endif
#endif /* __COMPAT_TIMER_H__ */
vmmemctl-only/shared/compat_pci_mapping.h 0000444 0000000 0000000 00000004741 12275350061 017634 0 ustar root root /*********************************************************
* Copyright (C) 2008 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_PCI_MAPPING_H__
#define __COMPAT_PCI_MAPPING_H__
#include <asm/types.h>
#include <asm/io.h>
#include <linux/pci.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,41)
typedef u32 dma_addr_t;
static __inline__ int
get_order(unsigned long size)
{
int order;
size = (size - 1) >> (PAGE_SHIFT - 1);
order = -1;
do {
size >>= 1;
order++;
} while (size);
return order;
}
static inline void *
compat_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
{
void *ptr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size));
if (ptr) {
memset(ptr, 0, size);
*dma_handle = virt_to_phys(ptr);
}
return ptr;
}
static inline void
compat_pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
free_pages((unsigned long)vaddr, get_order(size));
}
static inline dma_addr_t
compat_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
{
return virt_to_phys(ptr);
}
static inline void
compat_pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
{
}
#else
#define compat_pci_alloc_consistent(hwdev, size, dma_handle) \
pci_alloc_consistent(hwdev, size, dma_handle)
#define compat_pci_free_consistent(hwdev, size, vaddr, dma_handle) \
pci_free_consistent(hwdev, size, vaddr, dma_handle)
#define compat_pci_map_single(hwdev, ptr, size, direction) \
pci_map_single(hwdev, ptr, size, direction)
#define compat_pci_unmap_single(hwdev, dma_addr, size, direction) \
pci_unmap_single(hwdev, dma_addr, size, direction)
#endif
#endif
vmmemctl-only/shared/compat_sock.h 0000444 0000000 0000000 00000006002 12275350061 016275 0 ustar root root /*********************************************************
* Copyright (C) 2003 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_SOCK_H__
# define __COMPAT_SOCK_H__
#include <linux/stddef.h> /* for NULL */
#include <net/sock.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
static inline wait_queue_head_t *sk_sleep(struct sock *sk)
{
return sk->sk_sleep;
}
#endif
/*
* Prior to 2.6.24, there was no sock network namespace member. In 2.6.26, it
* was hidden behind accessor functions so that its behavior could vary
* depending on the value of CONFIG_NET_NS.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
# define compat_sock_net(sk) sock_net(sk)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
# define compat_sock_net(sk) sk->sk_net
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16)
#ifndef CONFIG_FILTER
# define sk_filter(sk, skb, needlock) 0
#endif
/* Taken from 2.6.16's sock.h and modified for macro. */
# define compat_sk_receive_skb(sk, skb, nested) \
({ \
int rc = NET_RX_SUCCESS; \
\
if (sk_filter(sk, skb, 0)) { \
kfree_skb(skb); \
} else { \
skb->dev = NULL; \
bh_lock_sock(sk); \
if (!sock_owned_by_user(sk)) { \
rc = (sk)->sk_backlog_rcv(sk, skb); \
} else { \
sk_add_backlog(sk, skb); \
} \
bh_unlock_sock(sk); \
} \
\
sock_put(sk); \
rc; \
})
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
# define compat_sk_receive_skb(sk, skb, nested) sk_receive_skb(sk, skb)
#else
# define compat_sk_receive_skb(sk, skb, nested) sk_receive_skb(sk, skb, nested)
#endif
#endif /* __COMPAT_SOCK_H__ */
vmmemctl-only/shared/vmciKernelAPI.h 0000444 0000000 0000000 00000002410 12275350062 016424 0 ustar root root /*********************************************************
* Copyright (C) 2010 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmciKernelAPI.h --
*
* Kernel API (current) exported from the VMCI host and guest drivers.
*/
#ifndef __VMCI_KERNELAPI_H__
#define __VMCI_KERNELAPI_H__
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#include "includeCheck.h"
/* With this file you always get the latest version. */
#include "vmciKernelAPI1.h"
#include "vmciKernelAPI2.h"
#endif /* !__VMCI_KERNELAPI_H__ */
vmmemctl-only/shared/compat_sched.h 0000444 0000000 0000000 00000024236 12275350061 016435 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_SCHED_H__
# define __COMPAT_SCHED_H__
#include <linux/sched.h>
/* CLONE_KERNEL available in 2.5.35 and higher. */
#ifndef CLONE_KERNEL
#define CLONE_KERNEL CLONE_FILES | CLONE_FS | CLONE_SIGHAND
#endif
/* TASK_COMM_LEN become available in 2.6.11. */
#ifndef TASK_COMM_LEN
#define TASK_COMM_LEN 16
#endif
/* The capable() API appeared in 2.1.92 --hpreg */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 1, 92)
# define capable(_capability) suser()
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0)
# define need_resched() need_resched
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 3)
# define need_resched() (current->need_resched)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 3)
# define cond_resched() (need_resched() ? schedule() : (void) 0)
#endif
/* Oh well. We need yield... Happy us! */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 20)
# ifdef __x86_64__
# define compat_yield() there_is_nothing_like_yield()
# else
# include <linux/unistd.h>
# include <linux/kernel.h>
/*
* Used by _syscallX macros. Note that this is global variable, so
* do not rely on its contents too much. As exit() is only function
* we use, and we never check return value from exit(), we have
* no problem...
*/
extern int errno;
/*
* compat_exit() provides an access to the exit() function. It must
* be named compat_exit(), as exit() (with different signature) is
* provided by x86-64, arm and other (but not by i386).
*/
# define __NR_compat_yield __NR_sched_yield
static inline _syscall0(int, compat_yield);
# endif
#else
# define compat_yield() yield()
#endif
/*
* Since 2.5.34 there are two methods to enumerate tasks:
* for_each_process(p) { ... } which enumerates only tasks and
* do_each_thread(g,t) { ... } while_each_thread(g,t) which enumerates
* also threads even if they share same pid.
*/
#ifndef for_each_process
# define for_each_process(p) for_each_task(p)
#endif
#ifndef do_each_thread
# define do_each_thread(g, t) for_each_task(g) { t = g; do
# define while_each_thread(g, t) while (0) }
#endif
/*
* Lock for signal mask is moving target...
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 40) && defined(CLONE_PID)
/* 2.4.x without NPTL patches or early 2.5.x */
#define compat_sigmask_lock sigmask_lock
#define compat_dequeue_signal_current(siginfo_ptr) \
dequeue_signal(¤t->blocked, (siginfo_ptr))
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 60) && !defined(INIT_SIGHAND)
/* RedHat's 2.4.x with first version of NPTL support, or 2.5.40 to 2.5.59 */
#define compat_sigmask_lock sig->siglock
#define compat_dequeue_signal_current(siginfo_ptr) \
dequeue_signal(¤t->blocked, (siginfo_ptr))
#else
/* RedHat's 2.4.x with second version of NPTL support, or 2.5.60+. */
#define compat_sigmask_lock sighand->siglock
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
#define compat_dequeue_signal_current(siginfo_ptr) \
dequeue_signal(¤t->blocked, (siginfo_ptr))
#else
#define compat_dequeue_signal_current(siginfo_ptr) \
dequeue_signal(current, ¤t->blocked, (siginfo_ptr))
#endif
#endif
/*
* recalc_sigpending() had task argument in the past
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 29) && defined(CLONE_PID)
/* 2.4.x without NPTL patches or early 2.5.x */
#define compat_recalc_sigpending() recalc_sigpending(current)
#else
/* RedHat's 2.4.x with NPTL support, or 2.5.29+ */
#define compat_recalc_sigpending() recalc_sigpending()
#endif
/*
* reparent_to_init() was introduced in 2.4.8. In 2.5.38 (or possibly
* earlier, but later than 2.5.31) a call to it was added into
* daemonize(), so compat_daemonize no longer needs to call it.
*
* In 2.4.x kernels reparent_to_init() forgets to do correct refcounting
* on current->user. It is better to count one too many than one too few...
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 8) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 38)
#define compat_reparent_to_init() do { \
reparent_to_init(); \
atomic_inc(¤t->user->__count); \
} while (0)
#else
#define compat_reparent_to_init() do {} while (0)
#endif
/*
* daemonize appeared in 2.2.18. Except 2.2.17-4-RH7.0, which has it too.
* Fortunately 2.2.17-4-RH7.0 uses versioned symbols, so we can check
* its existence with defined().
*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)) && !defined(daemonize)
static inline void daemonize(void) {
struct fs_struct *fs;
exit_mm(current);
current->session = 1;
current->pgrp = 1;
exit_fs(current);
fs = init_task.fs;
current->fs = fs;
atomic_inc(&fs->count);
}
#endif
/*
* flush_signals acquires sighand->siglock since 2.5.61... Verify RH's kernels!
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 61)
#define compat_flush_signals(task) do { \
spin_lock_irq(&task->compat_sigmask_lock); \
flush_signals(task); \
spin_unlock_irq(&task->compat_sigmask_lock); \
} while (0)
#else
#define compat_flush_signals(task) flush_signals(task)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 61)
#define compat_allow_signal(signr) do { \
spin_lock_irq(¤t->compat_sigmask_lock); \
sigdelset(¤t->blocked, signr); \
compat_recalc_sigpending(); \
spin_unlock_irq(¤t->compat_sigmask_lock); \
} while (0)
#else
#define compat_allow_signal(signr) allow_signal(signr)
#endif
/*
* daemonize can set process name since 2.5.61. Prior to 2.5.61, daemonize
* didn't block signals on our behalf.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 61)
#define compat_daemonize(x...) \
({ \
/* Beware! No snprintf here, so verify arguments! */ \
sprintf(current->comm, x); \
\
/* Block all signals. */ \
spin_lock_irq(¤t->compat_sigmask_lock); \
sigfillset(¤t->blocked); \
compat_recalc_sigpending(); \
spin_unlock_irq(¤t->compat_sigmask_lock); \
compat_flush_signals(current); \
\
daemonize(); \
compat_reparent_to_init(); \
})
#else
#define compat_daemonize(x...) daemonize(x)
#endif
/*
* try to freeze a process. For kernels 2.6.11 or newer, we know how to choose
* the interface. The problem is that the oldest interface, introduced in
* 2.5.18, was backported to 2.4.x kernels. So if we're older than 2.6.11,
* we'll decide what to do based on whether or not swsusp was configured
* for the kernel. For kernels 2.6.20 and newer, we'll also need to include
* freezer.h since the try_to_freeze definition was pulled out of sched.h.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
#include <linux/freezer.h>
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) || defined(VMW_TL10S64_WORKAROUND)
#define compat_try_to_freeze() try_to_freeze()
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
#define compat_try_to_freeze() try_to_freeze(PF_FREEZE)
#elif defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_SOFTWARE_SUSPEND2)
#include "compat_mm.h"
#include <linux/errno.h>
#include <linux/suspend.h>
static inline int compat_try_to_freeze(void) {
if (current->flags & PF_FREEZE) {
refrigerator(PF_FREEZE);
return 1;
} else {
return 0;
}
}
#else
static inline int compat_try_to_freeze(void) { return 0; }
#endif
/*
* As of 2.6.23-rc1, kernel threads are no longer freezable by
* default. Instead, kernel threads that need to be frozen must opt-in
* by calling set_freezable() as soon as the thread is created.
*/
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22)
#define compat_set_freezable() do { set_freezable(); } while (0)
#else
#define compat_set_freezable() do {} while (0)
#endif
/*
* Around 2.6.27 kernel stopped sending signals to kernel
* threads being frozen, instead threads have to check
* freezing() or use wait_event_freezable(). Unfortunately
* wait_event_freezable() completely hides the fact that
* thread was frozen from calling code and sometimes we do
* want to know that.
*/
#ifdef PF_FREEZER_NOSIG
#define compat_wait_check_freezing() freezing(current)
#else
#define compat_wait_check_freezing() (0)
#endif
/*
* Since 2.6.27-rc2 kill_proc() is gone... Replacement (GPL-only!)
* API is available since 2.6.19. Use them from 2.6.27-rc1 up.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
typedef int compat_pid;
#define compat_find_get_pid(pid) (pid)
#define compat_put_pid(pid) do { } while (0)
#define compat_kill_pid(pid, sig, flag) kill_proc(pid, sig, flag)
#else
typedef struct pid * compat_pid;
#define compat_find_get_pid(pid) find_get_pid(pid)
#define compat_put_pid(pid) put_pid(pid)
#define compat_kill_pid(pid, sig, flag) kill_pid(pid, sig, flag)
#endif
#endif /* __COMPAT_SCHED_H__ */
vmmemctl-only/shared/compat_namei.h 0000444 0000000 0000000 00000003416 12275350061 016435 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_NAMEI_H__
# define __COMPAT_NAMEI_H__
#include <linux/namei.h>
/*
* In 2.6.25-rc2, dentry and mount objects were removed from the nameidata
* struct. They were both replaced with a struct path.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
#define compat_vmw_nd_to_dentry(nd) (nd).path.dentry
#else
#define compat_vmw_nd_to_dentry(nd) (nd).dentry
#endif
/* In 2.6.25-rc2, path_release(&nd) was replaced with path_put(&nd.path). */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
#define compat_path_release(nd) path_put(&(nd)->path)
#else
#define compat_path_release(nd) path_release(nd)
#endif
/* path_lookup was removed in 2.6.39 merge window VFS merge */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
#define compat_path_lookup(name, flags, nd) kern_path(name, flags, &((nd)->path))
#else
#define compat_path_lookup(name, flags, nd) path_lookup(name, flags, nd)
#endif
#endif /* __COMPAT_NAMEI_H__ */
vmmemctl-only/shared/compat_pagemap.h 0000444 0000000 0000000 00000002535 12275350061 016757 0 ustar root root /*********************************************************
* Copyright (C) 2009 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_PAGEMAP_H__
# define __COMPAT_PAGEMAP_H__
#include <linux/pagemap.h>
/*
* AOP_FLAG_NOFS was defined in the same changeset that
* grab_cache_page_write_begin() was introduced.
*/
#ifdef AOP_FLAG_NOFS
#define compat_grab_cache_page_write_begin(mapping, index, flags) \
grab_cache_page_write_begin((mapping), (index), (flags))
#else
#define compat_grab_cache_page_write_begin(mapping, index, flags) \
__grab_cache_page((mapping), (index));
#endif
#endif /* __COMPAT_PAGEMAP_H__ */
vmmemctl-only/shared/x86cpuid_asm.h 0000444 0000000 0000000 00000022267 12275350062 016321 0 ustar root root /*********************************************************
* Copyright (C) 2003-2009 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* x86cpuid_asm.h
*
* CPUID-related assembly functions.
*/
#ifndef _X86CPUID_ASM_H_
#define _X86CPUID_ASM_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#include "includeCheck.h"
#include "vm_basic_asm.h"
#include "x86cpuid.h"
/*
* x86-64 windows doesn't support inline asm so we have to use these
* intrinsic functions defined in the compiler. Not all of these are well
* documented. There is an array in the compiler dll (c1.dll) which has
* an array of the names of all the intrinsics minus the leading
* underscore. Searching around in the ntddk.h file can also be helpful.
*
* The declarations for the intrinsic functions were taken from the DDK.
* Our declarations must match the ddk's otherwise the 64-bit c++ compiler
* will complain about second linkage of the intrinsic functions.
* We define the intrinsic using the basic types corresponding to the
* Windows typedefs. This avoids having to include windows header files
* to get to the windows types.
*/
#ifdef _MSC_VER
#ifdef __cplusplus
extern "C" {
#endif
#ifdef VM_X86_64
/*
* intrinsic functions only supported by x86-64 windows as of 2k3sp1
*/
void __cpuid(unsigned int*, unsigned int);
#pragma intrinsic(__cpuid)
#endif /* VM_X86_64 */
#ifdef __cplusplus
}
#endif
#endif /* _MSC_VER */
#ifdef __GNUC__ // {
/*
* Checked against the Intel manual and GCC --hpreg
*
* Need __volatile__ and "memory" since CPUID has a synchronizing effect.
* The CPUID may also change at runtime (APIC flag, etc).
*
*/
/*
* %ebx is reserved on i386 PIC. Apple's gcc-5493 (gcc 4.0) compiling
* for x86_64 incorrectly errors out saying %ebx is reserved. This is
* Apple bug 7304232.
*/
#if vm_x86_64 ? (defined __APPLE_CC__ && __APPLE_CC__ == 5493) : defined __PIC__
#if vm_x86_64
/*
* Note that this generates movq %rbx,%rbx; cpuid; xchgq %rbx,%rbx ...
* Unfortunately Apple's assembler does not have .ifnes, and I cannot
* figure out how to do that with .if. If we ever enable this code
* on other 64bit systems, both movq & xchgq should be surrounded by
* .ifnes \"%%rbx\", \"%q1\" & .endif
*/
#define VM_CPUID_BLOCK "movq %%rbx, %q1\n\t" \
"cpuid\n\t" \
"xchgq %%rbx, %q1\n\t"
#define VM_EBX_OUT(reg) "=&r"(reg)
#else
#define VM_CPUID_BLOCK "movl %%ebx, %1\n\t" \
"cpuid\n\t" \
"xchgl %%ebx, %1\n\t"
#define VM_EBX_OUT(reg) "=&rm"(reg)
#endif
#else
#define VM_CPUID_BLOCK "cpuid"
#define VM_EBX_OUT(reg) "=b"(reg)
#endif
static INLINE void
__GET_CPUID(int eax, // IN
CPUIDRegs *regs) // OUT
{
__asm__ __volatile__(
VM_CPUID_BLOCK
: "=a" (regs->eax), VM_EBX_OUT(regs->ebx), "=c" (regs->ecx), "=d" (regs->edx)
: "a" (eax)
: "memory"
);
}
static INLINE void
__GET_CPUID2(int eax, // IN
int ecx, // IN
CPUIDRegs *regs) // OUT
{
__asm__ __volatile__(
VM_CPUID_BLOCK
: "=a" (regs->eax), VM_EBX_OUT(regs->ebx), "=c" (regs->ecx), "=d" (regs->edx)
: "a" (eax), "c" (ecx)
: "memory"
);
}
static INLINE uint32
__GET_EAX_FROM_CPUID(int eax) // IN
{
uint32 ebx;
__asm__ __volatile__(
VM_CPUID_BLOCK
: "=a" (eax), VM_EBX_OUT(ebx)
: "a" (eax)
: "memory", "%ecx", "%edx"
);
return eax;
}
static INLINE uint32
__GET_EBX_FROM_CPUID(int eax) // IN
{
uint32 ebx;
__asm__ __volatile__(
VM_CPUID_BLOCK
: "=a" (eax), VM_EBX_OUT(ebx)
: "a" (eax)
: "memory", "%ecx", "%edx"
);
return ebx;
}
static INLINE uint32
__GET_ECX_FROM_CPUID(int eax) // IN
{
uint32 ecx;
uint32 ebx;
__asm__ __volatile__(
VM_CPUID_BLOCK
: "=a" (eax), VM_EBX_OUT(ebx), "=c" (ecx)
: "a" (eax)
: "memory", "%edx"
);
return ecx;
}
static INLINE uint32
__GET_EDX_FROM_CPUID(int eax) // IN
{
uint32 edx;
uint32 ebx;
__asm__ __volatile__(
VM_CPUID_BLOCK
: "=a" (eax), VM_EBX_OUT(ebx), "=d" (edx)
: "a" (eax)
: "memory", "%ecx"
);
return edx;
}
static INLINE uint32
__GET_EAX_FROM_CPUID4(int ecx) // IN
{
uint32 eax;
uint32 ebx;
__asm__ __volatile__(
VM_CPUID_BLOCK
: "=a" (eax), VM_EBX_OUT(ebx), "=c" (ecx)
: "a" (4), "c" (ecx)
: "memory", "%edx"
);
return eax;
}
#undef VM_CPUID_BLOCK
#undef VM_EBX_OUT
#elif defined(_MSC_VER) // } {
static INLINE void
__GET_CPUID(int input, CPUIDRegs *regs)
{
#ifdef VM_X86_64
__cpuid((unsigned int *)regs, input);
#else
__asm push esi
__asm push ebx
__asm push ecx
__asm push edx
__asm mov eax, input
__asm mov esi, regs
__asm _emit 0x0f __asm _emit 0xa2
__asm mov 0x0[esi], eax
__asm mov 0x4[esi], ebx
__asm mov 0x8[esi], ecx
__asm mov 0xC[esi], edx
__asm pop edx
__asm pop ecx
__asm pop ebx
__asm pop esi
#endif
}
#ifdef VM_X86_64
/*
* No inline assembly in Win64. Implemented in bora/lib/misc in
* cpuidMasm64.asm.
*/
extern void
__GET_CPUID2(int inputEax, int inputEcx, CPUIDRegs *regs);
#else // VM_X86_64
static INLINE void
__GET_CPUID2(int inputEax, int inputEcx, CPUIDRegs *regs)
{
__asm push esi
__asm push ebx
__asm push ecx
__asm push edx
__asm mov eax, inputEax
__asm mov ecx, inputEcx
__asm mov esi, regs
__asm _emit 0x0f __asm _emit 0xa2
__asm mov 0x0[esi], eax
__asm mov 0x4[esi], ebx
__asm mov 0x8[esi], ecx
__asm mov 0xC[esi], edx
__asm pop edx
__asm pop ecx
__asm pop ebx
__asm pop esi
}
#endif
static INLINE uint32
__GET_EAX_FROM_CPUID(int input)
{
#ifdef VM_X86_64
CPUIDRegs regs;
__cpuid((unsigned int *)®s, input);
return regs.eax;
#else
uint32 output;
//NOT_TESTED();
__asm push ebx
__asm push ecx
__asm push edx
__asm mov eax, input
__asm _emit 0x0f __asm _emit 0xa2
__asm mov output, eax
__asm pop edx
__asm pop ecx
__asm pop ebx
return output;
#endif
}
static INLINE uint32
__GET_EBX_FROM_CPUID(int input)
{
#ifdef VM_X86_64
CPUIDRegs regs;
__cpuid((unsigned int *)®s, input);
return regs.ebx;
#else
uint32 output;
//NOT_TESTED();
__asm push ebx
__asm push ecx
__asm push edx
__asm mov eax, input
__asm _emit 0x0f __asm _emit 0xa2
__asm mov output, ebx
__asm pop edx
__asm pop ecx
__asm pop ebx
return output;
#endif
}
static INLINE uint32
__GET_ECX_FROM_CPUID(int input)
{
#ifdef VM_X86_64
CPUIDRegs regs;
__cpuid((unsigned int *)®s, input);
return regs.ecx;
#else
uint32 output;
//NOT_TESTED();
__asm push ebx
__asm push ecx
__asm push edx
__asm mov eax, input
__asm _emit 0x0f __asm _emit 0xa2
__asm mov output, ecx
__asm pop edx
__asm pop ecx
__asm pop ebx
return output;
#endif
}
static INLINE uint32
__GET_EDX_FROM_CPUID(int input)
{
#ifdef VM_X86_64
CPUIDRegs regs;
__cpuid((unsigned int *)®s, input);
return regs.edx;
#else
uint32 output;
//NOT_TESTED();
__asm push ebx
__asm push ecx
__asm push edx
__asm mov eax, input
__asm _emit 0x0f __asm _emit 0xa2
__asm mov output, edx
__asm pop edx
__asm pop ecx
__asm pop ebx
return output;
#endif
}
#ifdef VM_X86_64
/*
* No inline assembly in Win64. Implemented in bora/lib/misc in
* cpuidMasm64.asm.
*/
extern uint32
__GET_EAX_FROM_CPUID4(int inputEcx);
#else // VM_X86_64
static INLINE uint32
__GET_EAX_FROM_CPUID4(int inputEcx)
{
uint32 output;
//NOT_TESTED();
__asm push ebx
__asm push ecx
__asm push edx
__asm mov eax, 4
__asm mov ecx, inputEcx
__asm _emit 0x0f __asm _emit 0xa2
__asm mov output, eax
__asm pop edx
__asm pop ecx
__asm pop ebx
return output;
}
#endif // VM_X86_64
#else // }
#error
#endif
#define CPUID_FOR_SIDE_EFFECTS() ((void)__GET_EAX_FROM_CPUID(0))
static INLINE void
__GET_CPUID4(int inputEcx, CPUIDRegs *regs)
{
__GET_CPUID2(4, inputEcx, regs);
}
/* The first parameter is used as an rvalue and then as an lvalue. */
#define GET_CPUID(_ax, _bx, _cx, _dx) { \
CPUIDRegs regs; \
__GET_CPUID(_ax, ®s); \
_ax = regs.eax; \
_bx = regs.ebx; \
_cx = regs.ecx; \
_dx = regs.edx; \
}
#endif
vmmemctl-only/shared/vmci_iocontrols.h 0000444 0000000 0000000 00000062057 12275350062 017221 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmci_iocontrols.h
*
* The VMCI driver io controls.
*/
#ifndef _VMCI_IOCONTROLS_H_
#define _VMCI_IOCONTROLS_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#include "includeCheck.h"
#include "vm_assert.h"
#include "vmci_defs.h"
#if defined(_WIN32) && defined(WINNT_DDK)
/* We need to expose the API through an IOCTL on Windows. Use latest API. */
#include "vmciKernelAPI.h"
#endif // _WIN32 && WINNT_DDK
/*
*-----------------------------------------------------------------------------
*
* VMCIVA64ToPtr --
*
* Convert a VA64 to a pointer.
*
* Results:
* Virtual address.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE void *
VMCIVA64ToPtr(VA64 va64) // IN
{
#ifdef VM_X86_64
ASSERT_ON_COMPILE(sizeof (void *) == 8);
#else
ASSERT_ON_COMPILE(sizeof (void *) == 4);
// Check that nothing of value will be lost.
ASSERT(!(va64 >> 32));
#endif
return (void *)(uintptr_t)va64;
}
/*
*-----------------------------------------------------------------------------
*
* VMCIPtrToVA64 --
*
* Convert a pointer to a VA64.
*
* Results:
* Virtual address.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE VA64
VMCIPtrToVA64(void const *ptr) // IN
{
ASSERT_ON_COMPILE(sizeof ptr <= sizeof (VA64));
return (VA64)(uintptr_t)ptr;
}
/*
* Driver version.
*
* Increment major version when you make an incompatible change.
* Compatibility goes both ways (old driver with new executable
* as well as new driver with old executable).
*/
#define VMCI_VERSION_SHIFT_WIDTH 16 /* Never change this. */
#define VMCI_MAKE_VERSION(_major, _minor) ((_major) << \
VMCI_VERSION_SHIFT_WIDTH | \
(uint16) (_minor))
#define VMCI_VERSION_MAJOR(v) ((uint32) (v) >> VMCI_VERSION_SHIFT_WIDTH)
#define VMCI_VERSION_MINOR(v) ((uint16) (v))
/*
* VMCI_VERSION is always the current version. Subsequently listed
* versions are ways of detecting previous versions of the connecting
* application (i.e., VMX).
*
* VMCI_VERSION_NOVMVM: This version removed support for VM to VM
* communication.
*
* VMCI_VERSION_NOTIFY: This version introduced doorbell notification
* support.
*
* VMCI_VERSION_HOSTQP: This version introduced host end point support
* for hosted products.
*
* VMCI_VERSION_PREHOSTQP: This is the version prior to the adoption of
* support for host end-points.
*
* VMCI_VERSION_PREVERS2: This fictional version number is intended to
* represent the version of a VMX which doesn't call into the driver
* with ioctl VERSION2 and thus doesn't establish its version with the
* driver.
*/
#define VMCI_VERSION VMCI_VERSION_NOVMVM
#define VMCI_VERSION_NOVMVM VMCI_MAKE_VERSION(11, 0)
#define VMCI_VERSION_NOTIFY VMCI_MAKE_VERSION(10, 0)
#define VMCI_VERSION_HOSTQP VMCI_MAKE_VERSION(9, 0)
#define VMCI_VERSION_PREHOSTQP VMCI_MAKE_VERSION(8, 0)
#define VMCI_VERSION_PREVERS2 VMCI_MAKE_VERSION(1, 0)
/*
* VMCISockets driver version. The version is platform-dependent and is
* embedded in vsock_version.h for each platform. It can be obtained via
* VMCISock_Version() (which uses IOCTL_VMCI_SOCKETS_VERSION). The
* following is simply for constructing an unsigned integer value from the
* comma-separated version in the header. This must match the macros defined
* in vmci_sockets.h. An example of using this is:
* uint16 parts[4] = { VSOCK_DRIVER_VERSION_COMMAS };
* uint32 version = VMCI_SOCKETS_MAKE_VERSION(parts);
*/
#define VMCI_SOCKETS_MAKE_VERSION(_p) \
((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2]))
#if defined(__linux__) || defined(SOLARIS) || defined(VMKERNEL)
/*
* Linux defines _IO* macros, but the core kernel code ignore the encoded
* ioctl value. It is up to individual drivers to decode the value (for
* example to look at the size of a structure to determine which version
* of a specific command should be used) or not (which is what we
* currently do, so right now the ioctl value for a given command is the
* command itself).
*
* Hence, we just define the IOCTL_VMCI_foo values directly, with no
* intermediate IOCTLCMD_ representation.
*/
# define IOCTLCMD(_cmd) IOCTL_VMCI_ ## _cmd
#elif defined (__APPLE__)
#include <sys/ioccom.h>
#define IOCTLCMD(_cmd) IOCTL_VMCI_ ## _cmd
#define IOCTLCMD_I(_cmd, _type) \
IOCTL_VMCI_MACOS_ ## _cmd = _IOW('V', IOCTL_VMCI_ ## _cmd, _type)
#define IOCTLCMD_O(_cmd, _type) \
IOCTL_VMCI_MACOS_ ## _cmd = _IOR('V', IOCTL_VMCI_ ## _cmd, _type)
#define IOCTLCMD_IO(_cmd, _type) \
IOCTL_VMCI_MACOS_ ## _cmd = _IOWR('V', IOCTL_VMCI_ ## _cmd, _type)
#else // if defined(__linux__)
/*
* On platforms other than Linux, IOCTLCMD_foo values are just numbers, and
* we build the IOCTL_VMCI_foo values around these using platform-specific
* format for encoding arguments and sizes.
*/
# define IOCTLCMD(_cmd) IOCTLCMD_VMCI_ ## _cmd
#endif
enum IOCTLCmd_VMCI {
/*
* We need to bracket the range of values used for ioctls, because x86_64
* Linux forces us to explicitly register ioctl handlers by value for
* handling 32 bit ioctl syscalls. Hence FIRST and LAST. Pick something
* for FIRST that doesn't collide with vmmon (2001+).
*/
#if defined(__linux__)
IOCTLCMD(FIRST) = 1951,
#else
/* Start at 0. */
IOCTLCMD(FIRST),
#endif
IOCTLCMD(VERSION) = IOCTLCMD(FIRST),
/* BEGIN VMCI */
IOCTLCMD(INIT_CONTEXT),
/*
* The following two were used for process and datagram process creation.
* They are not used anymore and reserved for future use.
* They will fail if issued.
*/
IOCTLCMD(RESERVED1),
IOCTLCMD(RESERVED2),
/*
* The following used to be for shared memory. It is now unused and and is
* reserved for future use. It will fail if issued.
*/
IOCTLCMD(RESERVED3),
/*
* The follwoing three were also used to be for shared memory. An
* old WS6 user-mode client might try to use them with the new
* driver, but since we ensure that only contexts created by VMX'en
* of the appropriate version (VMCI_VERSION_NOTIFY or
* VMCI_VERSION_NEWQP) or higher use these ioctl, everything is
* fine.
*/
IOCTLCMD(QUEUEPAIR_SETVA),
IOCTLCMD(NOTIFY_RESOURCE),
IOCTLCMD(NOTIFICATIONS_RECEIVE),
IOCTLCMD(VERSION2),
IOCTLCMD(QUEUEPAIR_ALLOC),
IOCTLCMD(QUEUEPAIR_SETPAGEFILE),
IOCTLCMD(QUEUEPAIR_DETACH),
IOCTLCMD(DATAGRAM_SEND),
IOCTLCMD(DATAGRAM_RECEIVE),
IOCTLCMD(DATAGRAM_REQUEST_MAP),
IOCTLCMD(DATAGRAM_REMOVE_MAP),
IOCTLCMD(CTX_ADD_NOTIFICATION),
IOCTLCMD(CTX_REMOVE_NOTIFICATION),
IOCTLCMD(CTX_GET_CPT_STATE),
IOCTLCMD(CTX_SET_CPT_STATE),
IOCTLCMD(GET_CONTEXT_ID),
/* END VMCI */
/*
* BEGIN VMCI SOCKETS
*
* We mark the end of the vmci commands and the start of the vmci sockets
* commands since they are used in separate modules on Linux.
* */
IOCTLCMD(LAST),
IOCTLCMD(SOCKETS_FIRST) = IOCTLCMD(LAST),
/*
* This used to be for accept() on Windows and Mac OS, which is now
* redundant (since we now use real handles). It is used instead for
* getting the version. This value is now public, so it cannot change.
*/
IOCTLCMD(SOCKETS_VERSION) = IOCTLCMD(SOCKETS_FIRST),
IOCTLCMD(SOCKETS_BIND),
/*
* This used to be for close() on Windows and Mac OS, but is no longer
* used for the same reason as accept() above. It is used instead for
* sending private symbols to the Mac OS driver.
*/
IOCTLCMD(SOCKETS_SET_SYMBOLS),
IOCTLCMD(SOCKETS_CONNECT),
/*
* The next two values are public (vmci_sockets.h) and cannot be changed.
* That means the number of values above these cannot be changed either
* unless the base index (specified below) is updated accordingly.
*/
IOCTLCMD(SOCKETS_GET_AF_VALUE),
IOCTLCMD(SOCKETS_GET_LOCAL_CID),
IOCTLCMD(SOCKETS_GET_SOCK_NAME),
IOCTLCMD(SOCKETS_GET_SOCK_OPT),
IOCTLCMD(SOCKETS_GET_VM_BY_NAME),
IOCTLCMD(SOCKETS_IOCTL),
IOCTLCMD(SOCKETS_LISTEN),
IOCTLCMD(SOCKETS_RECV),
IOCTLCMD(SOCKETS_RECV_FROM),
IOCTLCMD(SOCKETS_SELECT),
IOCTLCMD(SOCKETS_SEND),
IOCTLCMD(SOCKETS_SEND_TO),
IOCTLCMD(SOCKETS_SET_SOCK_OPT),
IOCTLCMD(SOCKETS_SHUTDOWN),
IOCTLCMD(SOCKETS_SOCKET),
IOCTLCMD(SOCKETS_UUID_2_CID), /* 1991 on Linux. */
/* END VMCI SOCKETS */
/*
* We reserve a range of 3 ioctls for VMCI Sockets to grow. We cannot
* reserve many ioctls here since we are close to overlapping with vmmon
* ioctls. Define a meta-ioctl if running out of this binary space.
*/
// Must be last.
IOCTLCMD(SOCKETS_LAST) = IOCTLCMD(SOCKETS_UUID_2_CID) + 3, /* 1994 on Linux. */
/*
* The VSockets ioctls occupy the block above. We define a new range of
* VMCI ioctls to maintain binary compatibility between the user land and
* the kernel driver. Careful, vmmon ioctls start from 2001, so this means
* we can add only 4 new VMCI ioctls. Define a meta-ioctl if running out of
* this binary space.
*/
IOCTLCMD(FIRST2),
IOCTLCMD(SET_NOTIFY) = IOCTLCMD(FIRST2), /* 1995 on Linux. */
IOCTLCMD(LAST2),
};
#if defined (__APPLE__)
/*
* The size of this must match the size of VSockIoctlPrivSyms in
* modules/vsock/common/vsockIoctl.h.
*/
#include "vmware_pack_begin.h"
struct IOCTLCmd_VMCIMacOS_PrivSyms {
char data[344];
}
#include "vmware_pack_end.h"
;
enum IOCTLCmd_VMCIMacOS {
IOCTLCMD_I(SOCKETS_SET_SYMBOLS, struct IOCTLCmd_VMCIMacOS_PrivSyms),
IOCTLCMD_O(SOCKETS_VERSION, unsigned int),
IOCTLCMD_O(SOCKETS_GET_AF_VALUE, int),
IOCTLCMD_O(SOCKETS_GET_LOCAL_CID, unsigned int),
};
#endif // __APPLE__
#if defined _WIN32
/*
* Windows VMCI ioctl definitions.
*/
/* PUBLIC: For VMCISockets user-mode clients that use CreateFile(). */
#define VMCI_INTERFACE_VSOCK_PUBLIC_NAME TEXT("\\\\.\\VMCI")
/* PUBLIC: For VMCISockets user-mode clients that use NtCreateFile(). */
#define VMCI_INTERFACE_VSOCK_PUBLIC_NAME_NT L"\\??\\VMCI"
/* PUBLIC: For the VMX, which uses CreateFile(). */
#define VMCI_INTERFACE_VMX_PUBLIC_NAME TEXT("\\\\.\\VMCIDev\\VMX")
/* PRIVATE NAMES */
#define VMCI_DEVICE_VMCI_LINK_PATH L"\\DosDevices\\VMCIDev"
#define VMCI_DEVICE_VSOCK_LINK_PATH L"\\DosDevices\\vmci"
#define VMCI_DEVICE_HOST_NAME_PATH L"\\Device\\VMCIHostDev"
#define VMCI_DEVICE_GUEST_NAME_PATH L"\\Device\\VMCIGuestDev"
/* PRIVATE NAMES */
/* These values cannot be changed since some of the ioctl values are public. */
#define FILE_DEVICE_VMCI 0x8103
#define VMCI_IOCTL_BASE_INDEX 0x801
#define VMCIIOCTL_BUFFERED(name) \
CTL_CODE(FILE_DEVICE_VMCI, \
VMCI_IOCTL_BASE_INDEX + IOCTLCMD_VMCI_ ## name, \
METHOD_BUFFERED, \
FILE_ANY_ACCESS)
#define VMCIIOCTL_NEITHER(name) \
CTL_CODE(FILE_DEVICE_VMCI, \
VMCI_IOCTL_BASE_INDEX + IOCTLCMD_VMCI_ ## name, \
METHOD_NEITHER, \
FILE_ANY_ACCESS)
enum IOCTLCmd_VMCIWin32 {
IOCTLCMD(DEVICE_GET) = IOCTLCMD(LAST2) + 1,
IOCTLCMD(SOCKETS_SERVICE_GET),
IOCTLCMD(SOCKETS_STOP),
};
#define IOCTL_VMCI_VERSION VMCIIOCTL_BUFFERED(VERSION)
/* BEGIN VMCI */
#define IOCTL_VMCI_INIT_CONTEXT \
VMCIIOCTL_BUFFERED(INIT_CONTEXT)
#define IOCTL_VMCI_HYPERCALL \
VMCIIOCTL_BUFFERED(HYPERCALL)
#define IOCTL_VMCI_CREATE_DATAGRAM_HANDLE \
VMCIIOCTL_BUFFERED(CREATE_DATAGRAM_HANDLE)
#define IOCTL_VMCI_DESTROY_DATAGRAM_HANDLE \
VMCIIOCTL_BUFFERED(DESTROY_DATAGRAM_HANDLE)
#define IOCTL_VMCI_NOTIFY_RESOURCE \
VMCIIOCTL_BUFFERED(NOTIFY_RESOURCE)
#define IOCTL_VMCI_NOTIFICATIONS_RECEIVE \
VMCIIOCTL_BUFFERED(NOTIFICATIONS_RECEIVE)
#define IOCTL_VMCI_VERSION2 \
VMCIIOCTL_BUFFERED(VERSION2)
#define IOCTL_VMCI_QUEUEPAIR_ALLOC \
VMCIIOCTL_BUFFERED(QUEUEPAIR_ALLOC)
#define IOCTL_VMCI_QUEUEPAIR_SETVA \
VMCIIOCTL_BUFFERED(QUEUEPAIR_SETVA)
#define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE \
VMCIIOCTL_BUFFERED(QUEUEPAIR_SETPAGEFILE)
#define IOCTL_VMCI_QUEUEPAIR_DETACH \
VMCIIOCTL_BUFFERED(QUEUEPAIR_DETACH)
#define IOCTL_VMCI_DATAGRAM_SEND \
VMCIIOCTL_BUFFERED(DATAGRAM_SEND)
#define IOCTL_VMCI_DATAGRAM_RECEIVE \
VMCIIOCTL_NEITHER(DATAGRAM_RECEIVE)
#define IOCTL_VMCI_DATAGRAM_REQUEST_MAP \
VMCIIOCTL_BUFFERED(DATAGRAM_REQUEST_MAP)
#define IOCTL_VMCI_DATAGRAM_REMOVE_MAP \
VMCIIOCTL_BUFFERED(DATAGRAM_REMOVE_MAP)
#define IOCTL_VMCI_CTX_ADD_NOTIFICATION \
VMCIIOCTL_BUFFERED(CTX_ADD_NOTIFICATION)
#define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION \
VMCIIOCTL_BUFFERED(CTX_REMOVE_NOTIFICATION)
#define IOCTL_VMCI_CTX_GET_CPT_STATE \
VMCIIOCTL_BUFFERED(CTX_GET_CPT_STATE)
#define IOCTL_VMCI_CTX_SET_CPT_STATE \
VMCIIOCTL_BUFFERED(CTX_SET_CPT_STATE)
#define IOCTL_VMCI_GET_CONTEXT_ID \
VMCIIOCTL_BUFFERED(GET_CONTEXT_ID)
#define IOCTL_VMCI_DEVICE_GET \
VMCIIOCTL_BUFFERED(DEVICE_GET)
/* END VMCI */
/* BEGIN VMCI SOCKETS */
#define IOCTL_VMCI_SOCKETS_VERSION \
VMCIIOCTL_BUFFERED(SOCKETS_VERSION)
#define IOCTL_VMCI_SOCKETS_BIND \
VMCIIOCTL_BUFFERED(SOCKETS_BIND)
#define IOCTL_VMCI_SOCKETS_CONNECT \
VMCIIOCTL_BUFFERED(SOCKETS_CONNECT)
#define IOCTL_VMCI_SOCKETS_GET_AF_VALUE \
VMCIIOCTL_BUFFERED(SOCKETS_GET_AF_VALUE)
#define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID \
VMCIIOCTL_BUFFERED(SOCKETS_GET_LOCAL_CID)
#define IOCTL_VMCI_SOCKETS_GET_SOCK_NAME \
VMCIIOCTL_BUFFERED(SOCKETS_GET_SOCK_NAME)
#define IOCTL_VMCI_SOCKETS_GET_SOCK_OPT \
VMCIIOCTL_BUFFERED(SOCKETS_GET_SOCK_OPT)
#define IOCTL_VMCI_SOCKETS_GET_VM_BY_NAME \
VMCIIOCTL_BUFFERED(SOCKETS_GET_VM_BY_NAME)
#define IOCTL_VMCI_SOCKETS_IOCTL \
VMCIIOCTL_BUFFERED(SOCKETS_IOCTL)
#define IOCTL_VMCI_SOCKETS_LISTEN \
VMCIIOCTL_BUFFERED(SOCKETS_LISTEN)
#define IOCTL_VMCI_SOCKETS_RECV_FROM \
VMCIIOCTL_BUFFERED(SOCKETS_RECV_FROM)
#define IOCTL_VMCI_SOCKETS_SELECT \
VMCIIOCTL_BUFFERED(SOCKETS_SELECT)
#define IOCTL_VMCI_SOCKETS_SEND_TO \
VMCIIOCTL_BUFFERED(SOCKETS_SEND_TO)
#define IOCTL_VMCI_SOCKETS_SET_SOCK_OPT \
VMCIIOCTL_BUFFERED(SOCKETS_SET_SOCK_OPT)
#define IOCTL_VMCI_SOCKETS_SHUTDOWN \
VMCIIOCTL_BUFFERED(SOCKETS_SHUTDOWN)
#define IOCTL_VMCI_SOCKETS_SERVICE_GET \
VMCIIOCTL_BUFFERED(SOCKETS_SERVICE_GET)
#define IOCTL_VMCI_SOCKETS_STOP \
VMCIIOCTL_NEITHER(SOCKETS_STOP)
/* END VMCI SOCKETS */
#endif // _WIN32
/*
* VMCI driver initialization. This block can also be used to
* pass initial group membership etc.
*/
typedef struct VMCIInitBlock {
VMCIId cid;
VMCIPrivilegeFlags flags;
#ifdef _WIN32
uint64 event; /* Handle for signalling vmci calls on windows. */
#endif // _WIN32
} VMCIInitBlock;
typedef struct VMCISharedMemInfo {
VMCIHandle handle;
uint32 size;
uint32 result;
VA64 va; /* Currently only used in the guest. */
char pageFileName[VMCI_PATH_MAX];
} VMCISharedMemInfo;
typedef struct VMCIQueuePairAllocInfo_VMToVM {
VMCIHandle handle;
VMCIId peer;
uint32 flags;
uint64 produceSize;
uint64 consumeSize;
#if !defined(VMX86_SERVER) && !defined(VMKERNEL)
VA64 producePageFile; /* User VA. */
VA64 consumePageFile; /* User VA. */
uint64 producePageFileSize; /* Size of the file name array. */
uint64 consumePageFileSize; /* Size of the file name array. */
#else
PPN * PPNs;
uint64 numPPNs;
#endif
int32 result;
uint32 _pad;
} VMCIQueuePairAllocInfo_VMToVM;
typedef struct VMCIQueuePairAllocInfo {
VMCIHandle handle;
VMCIId peer;
uint32 flags;
uint64 produceSize;
uint64 consumeSize;
#if !defined(VMX86_SERVER) && !defined(VMKERNEL)
VA64 ppnVA; /* Start VA of queue pair PPNs. */
#else
PPN * PPNs;
#endif
uint64 numPPNs;
int32 result;
uint32 version;
} VMCIQueuePairAllocInfo;
typedef struct VMCIQueuePairSetVAInfo {
VMCIHandle handle;
VA64 va; /* Start VA of queue pair PPNs. */
uint64 numPPNs;
uint32 version;
int32 result;
} VMCIQueuePairSetVAInfo;
/*
* For backwards compatibility, here is a version of the
* VMCIQueuePairPageFileInfo before host support end-points was added.
* Note that the current version of that structure requires VMX to
* pass down the VA of the mapped file. Before host support was added
* there was nothing of the sort. So, when the driver sees the ioctl
* with a parameter that is the sizeof
* VMCIQueuePairPageFileInfo_NoHostQP then it can infer that the version
* of VMX running can't attach to host end points because it doesn't
* provide the VA of the mapped files.
*
* The Linux driver doesn't get an indication of the size of the
* structure passed down from user space. So, to fix a long standing
* but unfiled bug, the _pad field has been renamed to version.
* Existing versions of VMX always initialize the PageFileInfo
* structure so that _pad, er, version is set to 0.
*
* A version value of 1 indicates that the size of the structure has
* been increased to include two UVA's: produceUVA and consumeUVA.
* These UVA's are of the mmap()'d queue contents backing files.
*
* In addition, if when VMX is sending down the
* VMCIQueuePairPageFileInfo structure it gets an error then it will
* try again with the _NoHostQP version of the file to see if an older
* VMCI kernel module is running.
*/
typedef struct VMCIQueuePairPageFileInfo_NoHostQP {
VMCIHandle handle;
VA64 producePageFile; /* User VA. */
VA64 consumePageFile; /* User VA. */
uint64 producePageFileSize; /* Size of the file name array. */
uint64 consumePageFileSize; /* Size of the file name array. */
int32 result;
uint32 version; /* Was _pad. Must be 0. */
} VMCIQueuePairPageFileInfo_NoHostQP;
typedef struct VMCIQueuePairPageFileInfo {
VMCIHandle handle;
#if !defined(VMX86_SERVER) && !defined(VMKERNEL)
VA64 producePageFile; /* User VA. */
VA64 consumePageFile; /* User VA. */
uint64 producePageFileSize; /* Size of the file name array. */
uint64 consumePageFileSize; /* Size of the file name array. */
#endif
int32 result;
uint32 version; /* Was _pad. */
VA64 produceVA; /* User VA of the mapped file. */
VA64 consumeVA; /* User VA of the mapped file. */
} VMCIQueuePairPageFileInfo;
typedef struct VMCIQueuePairDetachInfo {
VMCIHandle handle;
int32 result;
uint32 _pad;
} VMCIQueuePairDetachInfo;
typedef struct VMCIDatagramSendRecvInfo {
VA64 addr;
uint32 len;
int32 result;
} VMCIDatagramSendRecvInfo;
/* Used to add/remove well-known datagram mappings. */
typedef struct VMCIDatagramMapInfo {
VMCIId wellKnownID;
int result;
} VMCIDatagramMapInfo;
/* Used to add/remove remote context notifications. */
typedef struct VMCINotifyAddRemoveInfo {
VMCIId remoteCID;
int result;
} VMCINotifyAddRemoveInfo;
/* Used to set/get current context's checkpoint state. */
typedef struct VMCICptBufInfo {
VA64 cptBuf;
uint32 cptType;
uint32 bufSize;
int32 result;
uint32 _pad;
} VMCICptBufInfo;
/* Used to pass notify flag's address to the host driver. */
typedef struct VMCISetNotifyInfo {
VA64 notifyUVA;
int32 result;
uint32 _pad;
} VMCISetNotifyInfo;
#define VMCI_NOTIFY_RESOURCE_QUEUE_PAIR 0
#define VMCI_NOTIFY_RESOURCE_DOOR_BELL 1
#define VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY 0
#define VMCI_NOTIFY_RESOURCE_ACTION_CREATE 1
#define VMCI_NOTIFY_RESOURCE_ACTION_DESTROY 2
/*
* Used to create and destroy doorbells, and generate a notification
* for a doorbell or queue pair.
*/
typedef struct VMCINotifyResourceInfo {
VMCIHandle handle;
uint16 resource;
uint16 action;
int32 result;
} VMCINotifyResourceInfo;
/*
* Used to recieve pending notifications for doorbells and queue
* pairs.
*/
typedef struct VMCINotificationReceiveInfo {
VA64 dbHandleBufUVA;
uint64 dbHandleBufSize;
VA64 qpHandleBufUVA;
uint64 qpHandleBufSize;
int32 result;
uint32 _pad;
} VMCINotificationReceiveInfo;
#if defined(_WIN32) && defined(WINNT_DDK)
/*
* Used on Windows to expose the API calls that are no longer exported. This
* is kernel-mode only, and both sides will have the same bitness, so we can
* use pointers directly.
*/
/* Version 1. */
typedef struct VMCIDeviceGetInfoVer1 {
VMCI_DeviceReleaseFct *deviceRelease;
VMCIDatagram_CreateHndFct *dgramCreateHnd;
VMCIDatagram_CreateHndPrivFct *dgramCreateHndPriv;
VMCIDatagram_DestroyHndFct *dgramDestroyHnd;
VMCIDatagram_SendFct *dgramSend;
VMCI_GetContextIDFct *getContextId;
VMCI_VersionFct *version;
VMCIEvent_SubscribeFct *eventSubscribe;
VMCIEvent_UnsubscribeFct *eventUnsubscribe;
VMCIQPair_AllocFct *qpairAlloc;
VMCIQPair_DetachFct *qpairDetach;
VMCIQPair_GetProduceIndexesFct *qpairGetProduceIndexes;
VMCIQPair_GetConsumeIndexesFct *qpairGetConsumeIndexes;
VMCIQPair_ProduceFreeSpaceFct *qpairProduceFreeSpace;
VMCIQPair_ProduceBufReadyFct *qpairProduceBufReady;
VMCIQPair_ConsumeFreeSpaceFct *qpairConsumeFreeSpace;
VMCIQPair_ConsumeBufReadyFct *qpairConsumeBufReady;
VMCIQPair_EnqueueFct *qpairEnqueue;
VMCIQPair_DequeueFct *qpairDequeue;
VMCIQPair_PeekFct *qpairPeek;
VMCIQPair_EnqueueVFct *qpairEnqueueV;
VMCIQPair_DequeueVFct *qpairDequeueV;
VMCIQPair_PeekVFct *qpairPeekV;
VMCI_ContextID2HostVmIDFct *contextID2HostVmID;
VMCI_IsContextOwnerFct *isContextOwner;
VMCIContext_GetPrivFlagsFct *contextGetPrivFlags;
} VMCIDeviceGetInfoVer1;
/* Version 2. */
typedef struct VMCIDeviceGetInfoVer2 {
VMCIDoorbell_CreateFct *doorbellCreate;
VMCIDoorbell_DestroyFct *doorbellDestroy;
VMCIDoorbell_NotifyFct *doorbellNotify;
} VMCIDeviceGetInfoVer2;
typedef struct VMCIDeviceGetInfoHdr {
/* Requested API version on input, supported version on output. */
uint32 apiVersion;
VMCI_DeviceShutdownFn *deviceShutdownCB;
void *userData;
void *deviceRegistration;
} VMCIDeviceGetInfoHdr;
/* Combination of all versions. */
typedef struct VMCIDeviceGetInfo {
VMCIDeviceGetInfoHdr hdr;
VMCIDeviceGetInfoVer1 ver1;
VMCIDeviceGetInfoVer2 ver2;
} VMCIDeviceGetInfo;
#endif // _WIN32 && WINNT_DDK
#ifdef __APPLE__
/*
* Mac OS ioctl definitions.
*
* Mac OS defines _IO* macros, and the core kernel code uses the size encoded
* in the ioctl value to copy the memory back and forth (depending on the
* direction encoded in the ioctl value) between the user and kernel address
* spaces.
* See iocontrolsMacOS.h for details on how this is done. We use sockets only
* for vmci.
*/
#include <sys/ioccom.h>
enum VMCrossTalkSockOpt {
VMCI_SO_VERSION = 0,
VMCI_SO_CONTEXT = IOCTL_VMCI_INIT_CONTEXT,
VMCI_SO_NOTIFY_RESOURCE = IOCTL_VMCI_NOTIFY_RESOURCE,
VMCI_SO_NOTIFICATIONS_RECEIVE = IOCTL_VMCI_NOTIFICATIONS_RECEIVE,
VMCI_SO_VERSION2 = IOCTL_VMCI_VERSION2,
VMCI_SO_QUEUEPAIR_ALLOC = IOCTL_VMCI_QUEUEPAIR_ALLOC,
VMCI_SO_QUEUEPAIR_SETVA = IOCTL_VMCI_QUEUEPAIR_SETVA,
VMCI_SO_QUEUEPAIR_SETPAGEFILE = IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE,
VMCI_SO_QUEUEPAIR_DETACH = IOCTL_VMCI_QUEUEPAIR_DETACH,
VMCI_SO_DATAGRAM_SEND = IOCTL_VMCI_DATAGRAM_SEND,
VMCI_SO_DATAGRAM_RECEIVE = IOCTL_VMCI_DATAGRAM_RECEIVE,
VMCI_SO_DATAGRAM_REQUEST_MAP = IOCTL_VMCI_DATAGRAM_REQUEST_MAP,
VMCI_SO_DATAGRAM_REMOVE_MAP = IOCTL_VMCI_DATAGRAM_REMOVE_MAP,
VMCI_SO_CTX_ADD_NOTIFICATION = IOCTL_VMCI_CTX_ADD_NOTIFICATION,
VMCI_SO_CTX_REMOVE_NOTIFICATION = IOCTL_VMCI_CTX_REMOVE_NOTIFICATION,
VMCI_SO_CTX_GET_CPT_STATE = IOCTL_VMCI_CTX_GET_CPT_STATE,
VMCI_SO_CTX_SET_CPT_STATE = IOCTL_VMCI_CTX_SET_CPT_STATE,
VMCI_SO_GET_CONTEXT_ID = IOCTL_VMCI_GET_CONTEXT_ID,
VMCI_SO_USERFD,
};
#define VMCI_MACOS_HOST_DEVICE "com.vmware.kext.vmci"
#endif
/* Clean up helper macros */
#undef IOCTLCMD
#endif // ifndef _VMCI_IOCONTROLS_H_
vmmemctl-only/shared/compat_slab.h 0000444 0000000 0000000 00000006653 12275350061 016273 0 ustar root root /*********************************************************
* Copyright (C) 2005 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_SLAB_H__
# define __COMPAT_SLAB_H__
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 2, 0)
# include <linux/slab.h>
#else
# include <linux/malloc.h>
#endif
/*
* Before 2.6.20, kmem_cache_t was the accepted way to refer to a kmem_cache
* structure. Prior to 2.6.15, this structure was called kmem_cache_s, and
* afterwards it was renamed to kmem_cache. Here we keep things simple and use
* the accepted typedef until it became deprecated, at which point we switch
* over to the kmem_cache name.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
# define compat_kmem_cache struct kmem_cache
#else
# define compat_kmem_cache kmem_cache_t
#endif
/*
* Up to 2.6.22 kmem_cache_create has 6 arguments - name, size, alignment, flags,
* constructor, and destructor. Then for some time kernel was asserting that
* destructor is NULL, and since 2.6.23-pre1 kmem_cache_create takes only 5
* arguments - destructor is gone.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) || defined(VMW_KMEMCR_HAS_DTOR)
#define compat_kmem_cache_create(name, size, align, flags, ctor) \
kmem_cache_create(name, size, align, flags, ctor, NULL)
#else
#define compat_kmem_cache_create(name, size, align, flags, ctor) \
kmem_cache_create(name, size, align, flags, ctor)
#endif
/*
* Up to 2.6.23 kmem_cache constructor has three arguments - pointer to block to
* prepare (aka "this"), from which cache it came, and some unused flags. After
* 2.6.23 flags were removed, and order of "this" and cache parameters was swapped...
* Since 2.6.27-rc2 everything is different again, and ctor has only one argument.
*
* HAS_3_ARGS has precedence over HAS_2_ARGS if both are defined.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23) && !defined(VMW_KMEMCR_CTOR_HAS_3_ARGS)
# define VMW_KMEMCR_CTOR_HAS_3_ARGS
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26) && !defined(VMW_KMEMCR_CTOR_HAS_2_ARGS)
# define VMW_KMEMCR_CTOR_HAS_2_ARGS
#endif
#if defined(VMW_KMEMCR_CTOR_HAS_3_ARGS)
typedef void compat_kmem_cache_ctor(void *, compat_kmem_cache *, unsigned long);
#define COMPAT_KMEM_CACHE_CTOR_ARGS(arg) void *arg, \
compat_kmem_cache *cache, \
unsigned long flags
#elif defined(VMW_KMEMCR_CTOR_HAS_2_ARGS)
typedef void compat_kmem_cache_ctor(compat_kmem_cache *, void *);
#define COMPAT_KMEM_CACHE_CTOR_ARGS(arg) compat_kmem_cache *cache, \
void *arg
#else
typedef void compat_kmem_cache_ctor(void *);
#define COMPAT_KMEM_CACHE_CTOR_ARGS(arg) void *arg
#endif
#endif /* __COMPAT_SLAB_H__ */
vmmemctl-only/shared/compat_interrupt.h 0000444 0000000 0000000 00000003573 12275350061 017404 0 ustar root root /*********************************************************
* Copyright (C) 2003 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_INTERRUPT_H__
# define __COMPAT_INTERRUPT_H__
#include <linux/interrupt.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 69)
/*
* We cannot just define irqreturn_t, as some 2.4.x kernels have
* typedef void irqreturn_t; for "increasing" backward compatibility.
*/
typedef void compat_irqreturn_t;
#define COMPAT_IRQ_NONE
#define COMPAT_IRQ_HANDLED
#define COMPAT_IRQ_RETVAL(x)
#else
typedef irqreturn_t compat_irqreturn_t;
#define COMPAT_IRQ_NONE IRQ_NONE
#define COMPAT_IRQ_HANDLED IRQ_HANDLED
#define COMPAT_IRQ_RETVAL(x) IRQ_RETVAL(x)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
#define COMPAT_IRQF_DISABLED SA_INTERRUPT
#define COMPAT_IRQF_SHARED SA_SHIRQ
#else
#define COMPAT_IRQF_DISABLED IRQF_DISABLED
#define COMPAT_IRQF_SHARED IRQF_SHARED
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
#define COMPAT_IRQ_HANDLER_ARGS(irq, devp) (int irq, void *devp, struct pt_regs *regs)
#else
#define COMPAT_IRQ_HANDLER_ARGS(irq, devp) (int irq, void *devp)
#endif
#endif /* __COMPAT_INTERRUPT_H__ */
vmmemctl-only/shared/compat_page-flags.h 0000444 0000000 0000000 00000005037 12275350061 017353 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_PAGE_FLAGS_H__
# define __COMPAT_PAGE_FLAGS_H__
/* No page-flags.h prior to 2.5.12. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12)
# include <linux/page-flags.h>
#endif
/*
* The pgoff_t type was introduced in 2.5.20, but we'll look for it by
* definition since it's more convenient. Note that we want to avoid a
* situation where, in the future, a #define is changed to a typedef,
* so if pgoff_t is not defined in some future kernel, we won't define it.
*/
#if !defined(pgoff_t) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
#define pgoff_t unsigned long
#endif
/*
* set_page_writeback() was introduced in 2.6.6. Prior to that, callers were
* using the SetPageWriteback() macro directly, so that's what we'll use.
* Prior to 2.5.12, the writeback bit didn't exist, so we don't need to do
* anything.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 12)
#define compat_set_page_writeback(page)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 6)
#define compat_set_page_writeback(page) SetPageWriteback(page)
#else
#define compat_set_page_writeback(page) set_page_writeback(page)
#endif
/*
* end_page_writeback() was introduced in 2.5.12. Prior to that, it looks like
* there was no page writeback bit, and everything the function accomplished
* was done by unlock_page(), so we'll define it out.
*
* Note that we could just #define end_page_writeback to nothing and avoid
* needing the compat_ prefix, but this is more complete with respect to
* compat_set_page_writeback.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 12)
#define compat_end_page_writeback(page)
#else
#define compat_end_page_writeback(page) end_page_writeback(page)
#endif
#endif /* __COMPAT_PAGE_FLAGS_H__ */
vmmemctl-only/shared/vmware_pack_begin.h 0000444 0000000 0000000 00000002444 12275350062 017445 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmware_pack_begin.h --
*
* Begin of structure packing. See vmware_pack_init.h for details.
*
* Note that we do not use the following construct in this include file,
* because we want to emit the code every time the file is included --hpreg
*
* #ifndef foo
* # define foo
* ...
* #endif
*
*/
#include "vmware_pack_init.h"
#ifdef _MSC_VER
# pragma pack(push, 1)
#elif __GNUC__
#else
# error Compiler packing...
#endif
vmmemctl-only/shared/compat_page.h 0000444 0000000 0000000 00000004663 12275350061 016265 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_PAGE_H__
# define __COMPAT_PAGE_H__
#include <linux/mm.h>
#include <asm/page.h>
/* The pfn_to_page() API appeared in 2.5.14 and changed to function during 2.6.x */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(pfn_to_page)
# define pfn_to_page(_pfn) (mem_map + (_pfn))
# define page_to_pfn(_page) ((_page) - mem_map)
#endif
/* The virt_to_page() API appeared in 2.4.0 --hpreg */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) && !defined(virt_to_page)
# define virt_to_page(_kvAddr) pfn_to_page(MAP_NR(_kvAddr))
#endif
/*
* The get_order() API appeared at some point in 2.3.x, and was then backported
* in 2.2.17-21mdk and in the stock 2.2.18. Because we can only detect its
* definition through makefile tricks, we provide our own for now --hpreg
*/
static inline int
compat_get_order(unsigned long size) // IN
{
int order;
size = (size - 1) >> (PAGE_SHIFT - 1);
order = -1;
do {
size >>= 1;
order++;
} while (size);
return order;
}
/*
* BUG() was added to <asm/page.h> in 2.2.18, and was moved to <asm/bug.h>
* in 2.5.58.
*
* XXX: Technically, this belongs in some sort of "compat_asm_page.h" file, but
* since our compatibility wrappers don't distinguish between <asm/xxx.h> and
* <linux/xxx.h>, putting it here is reasonable.
*/
#ifndef BUG
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
__asm__ __volatile__(".byte 0x0f,0x0b"); \
} while (0)
#endif
#endif /* __COMPAT_PAGE_H__ */
vmmemctl-only/shared/compat_version.h 0000444 0000000 0000000 00000007363 12275350061 017036 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_VERSION_H__
# define __COMPAT_VERSION_H__
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMKDRIVERS
#include "includeCheck.h"
#ifndef __linux__
# error "linux-version.h"
#endif
#include <linux/version.h>
#ifndef KERNEL_VERSION
# error KERNEL_VERSION macro is not defined, environment is busted
#endif
/*
* Distinguish relevant classes of Linux kernels.
*
* The convention is that version X defines all
* the KERNEL_Y symbols where Y <= X.
*
* XXX Do not add more definitions here. This way of doing things does not
* scale, and we are going to phase it out soon --hpreg
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 0)
# define KERNEL_2_1
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 2, 0)
# define KERNEL_2_2
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 1)
# define KERNEL_2_3_1
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 15)
/* new networking */
# define KERNEL_2_3_15
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 25)
/* new procfs */
# define KERNEL_2_3_25
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 29)
/* even newer procfs */
# define KERNEL_2_3_29
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 43)
/* softnet changes */
# define KERNEL_2_3_43
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 47)
/* more softnet changes */
# define KERNEL_2_3_47
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 99)
/* name in netdevice struct is array and not pointer */
# define KERNEL_2_3_99
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0)
/* New 'owner' member at the beginning of struct file_operations */
# define KERNEL_2_4_0
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 8)
/* New netif_rx_ni() --hpreg */
# define KERNEL_2_4_8
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 2)
/* New kdev_t, major()/minor() API --hpreg */
# define KERNEL_2_5_2
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 5)
/* New sk_alloc(), pte_offset_map()/pte_unmap() --hpreg */
# define KERNEL_2_5_5
#endif
/* Linux kernel 3.0 can be called 2.6.40, and 3.1 can be 2.6.41...
* Use COMPAT_LINUX_VERSION_CHECK_LT iff you need to compare running kernel to
* versions 3.0 and above.
*
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
/* Straight forward comparison if kernel version is 3.0.0 and beyond */
# define COMPAT_LINUX_VERSION_CHECK_LT(a, b, c) LINUX_VERSION_CODE < KERNEL_VERSION (a, b, c)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 40)
/* Use b of the check to calculate corresponding c of kernel
* version to compare */
# define COMPAT_LINUX_VERSION_CHECK_LT(a, b, c) LINUX_VERSION_CODE < KERNEL_VERSION (2, 6, (b + 40))
#else
/* This is anyways lesser than any 3.x versions */
# define COMPAT_LINUX_VERSION_CHECK_LT(a, b, c) 1
#endif
#endif /* __COMPAT_VERSION_H__ */
vmmemctl-only/shared/compat_netdevice.h 0000444 0000000 0000000 00000023744 12275350061 017320 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_NETDEVICE_H__
# define __COMPAT_NETDEVICE_H__
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
/*
* The enet_statistics structure moved from linux/if_ether.h to
* linux/netdevice.h and is renamed net_device_stats in 2.1.25 --hpreg
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 1, 25)
# include <linux/if_ether.h>
# define net_device_stats enet_statistics
#endif
/* The netif_rx_ni() API appeared in 2.4.8 --hpreg */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 8)
# define netif_rx_ni netif_rx
#endif
/* The device struct was renamed net_device in 2.3.14 --hpreg */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14)
# define net_device device
#endif
/*
* SET_MODULE_OWNER appeared sometime during 2.3.x. It was setting
* dev->owner = THIS_MODULE until 2.5.70, where netdevice refcounting
* was completely changed. SET_MODULE_OWNER was nop for whole
* 2.6.x series, and finally disappeared in 2.6.24.
*
* MOD_xxx_USE_COUNT wrappers are here, as they must be mutually
* exclusive with SET_MODULE_OWNER call.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
# define COMPAT_SET_MODULE_OWNER(dev) do {} while (0)
# define COMPAT_NETDEV_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
# define COMPAT_NETDEV_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
#else
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
# define COMPAT_SET_MODULE_OWNER(dev) SET_MODULE_OWNER(dev)
# else
# define COMPAT_SET_MODULE_OWNER(dev) do {} while (0)
# endif
# define COMPAT_NETDEV_MOD_INC_USE_COUNT do {} while (0)
# define COMPAT_NETDEV_MOD_DEC_USE_COUNT do {} while (0)
#endif
/*
* SET_NETDEV_DEV appeared sometime during 2.5.x, and later was
* crossported to various 2.4.x kernels (as dummy macro).
*/
#ifdef SET_NETDEV_DEV
# define COMPAT_SET_NETDEV_DEV(dev, pdev) SET_NETDEV_DEV(dev, pdev)
#else
# define COMPAT_SET_NETDEV_DEV(dev, pdev) do {} while (0)
#endif
/*
* Build alloc_etherdev API on the top of init_etherdev. For 2.0.x kernels
* we must provide dummy init method, otherwise register_netdev does
* nothing.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0)
int
vmware_dummy_init(struct net_device *dev)
{
return 0;
}
#endif
static inline struct net_device*
compat_alloc_etherdev(int priv_size)
{
struct net_device* dev;
int size = sizeof *dev + priv_size;
/*
* The name is dynamically allocated before 2.4.0, but
* is an embedded array in later kernels.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
size += sizeof("ethXXXXXXX");
#endif
dev = kmalloc(size, GFP_KERNEL);
if (dev) {
memset(dev, 0, size);
if (priv_size) {
dev->priv = dev + 1;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
dev->name = (char *)(dev + 1) + priv_size;
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0)
dev->init = vmware_dummy_init;
#endif
if (init_etherdev(dev, 0) != dev) {
kfree(dev);
dev = NULL;
}
}
return dev;
}
#else
#define compat_alloc_etherdev(sz) alloc_etherdev(sz)
#endif
/*
* alloc_netdev and free_netdev are there since 2.4.23. Their use is mandatory
* since 2.6.24.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 23)
static inline struct net_device *
compat_alloc_netdev(int priv_size,
const char *mask,
void (*setup)(struct net_device *))
{
struct net_device *dev;
int netdev_size = sizeof *dev;
int alloc_size;
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
netdev_size += IFNAMSIZ;
# endif
alloc_size = netdev_size + priv_size;
dev = kmalloc(alloc_size, GFP_KERNEL);
if (dev) {
memset(dev, 0, alloc_size);
dev->priv = (char*)dev + netdev_size;
setup(dev);
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
dev->name = (char*)(dev + 1);
# endif
strcpy(dev->name, mask);
}
return dev;
}
# define compat_free_netdev(dev) kfree(dev)
#else
# define compat_alloc_netdev(size, mask, setup) alloc_netdev(size, mask, setup)
# define compat_free_netdev(dev) free_netdev(dev)
#endif
/* netdev_priv() appeared in 2.6.3 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 3)
# define compat_netdev_priv(netdev) (netdev)->priv
#else
# define compat_netdev_priv(netdev) netdev_priv(netdev)
#endif
/*
* In 3.1 merge window feature maros were removed from mainline,
* so let's add back ones we care about.
*/
#if !defined(HAVE_NET_DEVICE_OPS) && \
LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
# define HAVE_NET_DEVICE_OPS 1
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9)
# define COMPAT_NETDEV_TX_OK NETDEV_TX_OK
# define COMPAT_NETDEV_TX_BUSY NETDEV_TX_BUSY
#else
# define COMPAT_NETDEV_TX_OK 0
# define COMPAT_NETDEV_TX_BUSY 1
#endif
/* unregister_netdevice_notifier was not safe prior to 2.6.17 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17) && \
!defined(ATOMIC_NOTIFIER_INIT)
/* pre 2.6.17 and not patched */
static inline int compat_unregister_netdevice_notifier(struct notifier_block *nb) {
int err;
rtnl_lock();
err = unregister_netdevice_notifier(nb);
rtnl_unlock();
return err;
}
#else
/* post 2.6.17 or patched */
#define compat_unregister_netdevice_notifier(_nb) \
unregister_netdevice_notifier(_nb);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) || defined(__VMKLNX__)
# define compat_netif_napi_add(dev, napi, poll, quota) \
netif_napi_add(dev, napi, poll, quota)
# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) || \
defined VMW_NETIF_SINGLE_NAPI_PARM
# define compat_napi_complete(dev, napi) napi_complete(napi)
# define compat_napi_schedule(dev, napi) napi_schedule(napi)
# else
# define compat_napi_complete(dev, napi) netif_rx_complete(dev, napi)
# define compat_napi_schedule(dev, napi) netif_rx_schedule(dev, napi)
# endif
# define compat_napi_enable(dev, napi) napi_enable(napi)
# define compat_napi_disable(dev, napi) napi_disable(napi)
#else
# define compat_napi_complete(dev, napi) netif_rx_complete(dev)
# define compat_napi_schedule(dev, napi) netif_rx_schedule(dev)
# define compat_napi_enable(dev, napi) netif_poll_enable(dev)
# define compat_napi_disable(dev, napi) netif_poll_disable(dev)
/* RedHat ported GRO to 2.6.18 bringing new napi_struct with it */
# if defined NETIF_F_GRO
# define compat_netif_napi_add(netdev, napi, pollcb, quota) \
do { \
(netdev)->poll = (pollcb); \
(netdev)->weight = (quota);\
(napi)->dev = (netdev); \
} while (0)
# else
struct napi_struct {
int dummy;
};
# define compat_netif_napi_add(dev, napi, pollcb, quota) \
do { \
(dev)->poll = (pollcb); \
(dev)->weight = (quota);\
} while (0)
# endif
#endif
#ifdef NETIF_F_TSO6
# define COMPAT_NETIF_F_TSO (NETIF_F_TSO6 | NETIF_F_TSO)
#else
# define COMPAT_NETIF_F_TSO (NETIF_F_TSO)
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
# define compat_netif_tx_lock(dev) netif_tx_lock(dev)
# define compat_netif_tx_unlock(dev) netif_tx_unlock(dev)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16)
# define compat_netif_tx_lock(dev) spin_lock(&dev->xmit_lock)
# define compat_netif_tx_unlock(dev) spin_unlock(&dev->xmit_lock)
#else
/* Vendor backporting (SLES 10) has muddled the tx_lock situation. Pick whichever
* of the above works for you. */
# define compat_netif_tx_lock(dev) do {} while (0)
# define compat_netif_tx_unlock(dev) do {} while (0)
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
# define COMPAT_VLAN_GROUP_ARRAY_LEN VLAN_N_VID
# define compat_flush_scheduled_work(work) cancel_work_sync(work)
#else
# define COMPAT_VLAN_GROUP_ARRAY_LEN VLAN_GROUP_ARRAY_LEN
# define compat_flush_scheduled_work(work) flush_scheduled_work()
#endif
/*
* For kernel versions older than 2.6.29, where pci_msi_enabled is not
* available, check if
* 1. CONFIG_PCI_MSI is present
* 2. kernel version is newer than 2.6.25 (because multiqueue is not
* supporter) in kernels older than that)
* 3. msi can be enabled. If it fails it means that MSI is not available.
* When all the above are true, return non-zero so that multiple queues will be
* allowed in the driver.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
# define compat_multiqueue_allowed(dev) pci_msi_enabled()
#else
# if defined CONFIG_PCI_MSI && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
static inline int
compat_multiqueue_allowed(struct pci_dev *dev)
{
int ret;
if (!pci_enable_msi(dev))
ret = 1;
else
ret = 0;
pci_disable_msi(dev);
return ret;
}
# else
# define compat_multiqueue_allowed(dev) (0)
# endif
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
# define compat_vlan_get_protocol(skb) vlan_get_protocol(skb)
#else
# define compat_vlan_get_protocol(skb) (skb->protocol)
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
typedef netdev_features_t compat_netdev_features_t;
#else
typedef u32 compat_netdev_features_t;
#endif
#endif /* __COMPAT_NETDEVICE_H__ */
vmmemctl-only/shared/compat_workqueue.h 0000444 0000000 0000000 00000014361 12275350061 017374 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_WORKQUEUE_H__
# define __COMPAT_WORKQUEUE_H__
#include <linux/kernel.h>
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41)
# include <linux/workqueue.h>
#endif
/*
*
* Work queues and delayed work queues.
*
* Prior to 2.5.41, the notion of work queues did not exist. Taskqueues are
* used for work queues and timers are used for delayed work queues.
*
* After 2.6.20, normal work structs ("work_struct") and delayed work
* ("delayed_work") structs were separated so that the work_struct could be
* slimmed down. The interface was also changed such that the address of the
* work_struct itself is passed in as the argument to the work function. This
* requires that one embed the work struct in the larger struct containing the
* information necessary to complete the work and use container_of() to obtain
* the address of the containing structure.
*
* Users of these macros should embed a compat_work or compat_delayed_work in
* a larger structure, then specify the larger structure as the _data argument
* for the initialization functions, specify the work function to take
* a compat_work_arg or compat_delayed_work_arg, then use the appropriate
* _GET_DATA macro to obtain the reference to the structure passed in as _data.
* An example is below.
*
*
* typedef struct WorkData {
* int data;
* compat_work work;
* } WorkData;
*
*
* void
* WorkFunc(compat_work_arg data)
* {
* WorkData *workData = COMPAT_WORK_GET_DATA(data, WorkData, work);
*
* ...
* }
*
*
* {
* WorkData *workData = kmalloc(sizeof *workData, GFP_EXAMPLE);
* if (!workData) {
* return -ENOMEM;
* }
*
* COMPAT_INIT_WORK(&workData->work, WorkFunc, workData);
* compat_schedule_work(&workData->work);
* }
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 41) /* { */
typedef struct tq_struct compat_work;
typedef struct compat_delayed_work {
struct tq_struct work;
struct timer_list timer;
} compat_delayed_work;
typedef void * compat_work_arg;
typedef void * compat_delayed_work_arg;
/*
* Delayed work queues need to run at some point in the future in process
* context, but task queues don't support delaying the task one is scheduling.
* Timers allow us to delay the execution of our work queue until the future,
* but timer handlers run in bottom-half context. As such, we use both a timer
* and task queue and use the timer handler below to schedule the task in
* process context immediately. The timer lets us delay execution, and the
* task queue lets us run in process context.
*
* Note that this is similar to how delayed_work is implemented with work
* queues in later kernel versions.
*/
static inline void
__compat_delayed_work_timer(unsigned long arg)
{
compat_delayed_work *dwork = (compat_delayed_work *)arg;
if (dwork) {
schedule_task(&dwork->work);
}
}
# define COMPAT_INIT_WORK(_work, _func, _data) \
INIT_LIST_HEAD(&(_work)->list); \
(_work)->sync = 0; \
(_work)->routine = _func; \
(_work)->data = _data
# define COMPAT_INIT_DELAYED_WORK(_work, _func, _data) \
COMPAT_INIT_WORK(&(_work)->work, _func, _data); \
init_timer(&(_work)->timer); \
(_work)->timer.expires = 0; \
(_work)->timer.function = __compat_delayed_work_timer; \
(_work)->timer.data = (unsigned long)_work
# define compat_schedule_work(_work) \
schedule_task(_work)
# define compat_schedule_delayed_work(_work, _delay) \
(_work)->timer.expires = jiffies + _delay; \
add_timer(&(_work)->timer)
# define COMPAT_WORK_GET_DATA(_p, _type, _member) \
(_type *)(_p)
# define COMPAT_DELAYED_WORK_GET_DATA(_p, _type, _member) \
(_type *)(_p)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) \
&& !defined(__VMKLNX__) /* } { */
typedef struct work_struct compat_work;
typedef struct work_struct compat_delayed_work;
typedef void * compat_work_arg;
typedef void * compat_delayed_work_arg;
# define COMPAT_INIT_WORK(_work, _func, _data) \
INIT_WORK(_work, _func, _data)
# define COMPAT_INIT_DELAYED_WORK(_work, _func, _data) \
INIT_WORK(_work, _func, _data)
# define compat_schedule_work(_work) \
schedule_work(_work)
# define compat_schedule_delayed_work(_work, _delay) \
schedule_delayed_work(_work, _delay)
# define COMPAT_WORK_GET_DATA(_p, _type, _member) \
(_type *)(_p)
# define COMPAT_DELAYED_WORK_GET_DATA(_p, _type, _member) \
(_type *)(_p)
#else /* } Linux >= 2.6.20 { */
typedef struct work_struct compat_work;
typedef struct delayed_work compat_delayed_work;
typedef struct work_struct * compat_work_arg;
typedef struct work_struct * compat_delayed_work_arg;
# define COMPAT_INIT_WORK(_work, _func, _data) \
INIT_WORK(_work, _func)
# define COMPAT_INIT_DELAYED_WORK(_work, _func, _data) \
INIT_DELAYED_WORK(_work, _func)
# define compat_schedule_work(_work) \
schedule_work(_work)
# define compat_schedule_delayed_work(_work, _delay) \
schedule_delayed_work(_work, _delay)
# define COMPAT_WORK_GET_DATA(_p, _type, _member) \
container_of(_p, _type, _member)
# define COMPAT_DELAYED_WORK_GET_DATA(_p, _type, _member) \
container_of(_p, _type, _member.work)
#endif /* } */
#endif /* __COMPAT_WORKQUEUE_H__ */
vmmemctl-only/shared/compat_ioport.h 0000444 0000000 0000000 00000004041 12275350061 016653 0 ustar root root /*********************************************************
* Copyright (C) 2003 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_IOPORT_H__
# define __COMPAT_IOPORT_H__
#include <linux/ioport.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
static inline void *
compat_request_region(unsigned long start, unsigned long len, const char *name)
{
if (check_region(start, len)) {
return NULL;
}
request_region(start, len, name);
return (void*)1;
}
#else
#define compat_request_region(start, len, name) request_region(start, len, name)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 7)
/* mmap io support starts from 2.3.7, fail the call for kernel prior to that */
static inline void *
compat_request_mem_region(unsigned long start, unsigned long len, const char *name)
{
return NULL;
}
static inline void
compat_release_mem_region(unsigned long start, unsigned long len)
{
return;
}
#else
#define compat_request_mem_region(start, len, name) request_mem_region(start, len, name)
#define compat_release_mem_region(start, len) release_mem_region(start, len)
#endif
/* these two macro defs are needed by compat_pci_request_region */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 15)
# define IORESOURCE_IO 0x00000100
# define IORESOURCE_MEM 0x00000200
#endif
#endif /* __COMPAT_IOPORT_H__ */
vmmemctl-only/shared/compat_skbuff.h 0000444 0000000 0000000 00000015575 12275350061 016635 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_SKBUFF_H__
# define __COMPAT_SKBUFF_H__
#include <linux/skbuff.h>
/*
* When transition from mac/nh/h to skb_* accessors was made, also SKB_WITH_OVERHEAD
* was introduced.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) || \
(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 21) && defined(SKB_WITH_OVERHEAD))
#define compat_skb_mac_header(skb) skb_mac_header(skb)
#define compat_skb_network_header(skb) skb_network_header(skb)
#define compat_skb_network_offset(skb) skb_network_offset(skb)
#define compat_skb_transport_header(skb) skb_transport_header(skb)
#define compat_skb_transport_offset(skb) skb_transport_offset(skb)
#define compat_skb_network_header_len(skb) skb_network_header_len(skb)
#define compat_skb_tail_pointer(skb) skb_tail_pointer(skb)
#define compat_skb_end_pointer(skb) skb_end_pointer(skb)
#define compat_skb_ip_header(skb) ((struct iphdr *)skb_network_header(skb))
#define compat_skb_ipv6_header(skb) ((struct ipv6hdr *)skb_network_header(skb))
#define compat_skb_tcp_header(skb) ((struct tcphdr *)skb_transport_header(skb))
#define compat_skb_reset_mac_header(skb) skb_reset_mac_header(skb)
#define compat_skb_reset_network_header(skb) skb_reset_network_header(skb)
#define compat_skb_reset_transport_header(skb) skb_reset_transport_header(skb)
#define compat_skb_set_network_header(skb, off) skb_set_network_header(skb, off)
#define compat_skb_set_transport_header(skb, off) skb_set_transport_header(skb, off)
#else
#define compat_skb_mac_header(skb) (skb)->mac.raw
#define compat_skb_network_header(skb) (skb)->nh.raw
#define compat_skb_network_offset(skb) ((skb)->nh.raw - (skb)->data)
#define compat_skb_transport_header(skb) (skb)->h.raw
#define compat_skb_transport_offset(skb) ((skb)->h.raw - (skb)->data)
#define compat_skb_network_header_len(skb) ((skb)->h.raw - (skb)->nh.raw)
#define compat_skb_tail_pointer(skb) (skb)->tail
#define compat_skb_end_pointer(skb) (skb)->end
#define compat_skb_ip_header(skb) (skb)->nh.iph
#define compat_skb_ipv6_header(skb) (skb)->nh.ipv6h
#define compat_skb_tcp_header(skb) (skb)->h.th
#define compat_skb_reset_mac_header(skb) ((skb)->mac.raw = (skb)->data)
#define compat_skb_reset_network_header(skb) ((skb)->nh.raw = (skb)->data)
#define compat_skb_reset_transport_header(skb) ((skb)->h.raw = (skb)->data)
#define compat_skb_set_network_header(skb, off) ((skb)->nh.raw = (skb)->data + (off))
#define compat_skb_set_transport_header(skb, off) ((skb)->h.raw = (skb)->data + (off))
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18) || defined(VMW_SKB_LINEARIZE_2618)
# define compat_skb_linearize(skb) skb_linearize((skb))
#else
# if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 0)
# define compat_skb_linearize(skb) __skb_linearize((skb), GFP_ATOMIC)
# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 4)
# define compat_skb_linearize(skb) skb_linearize((skb), GFP_ATOMIC)
# else
static inline int
compat_skb_linearize(struct sk_buff *skb)
{
return 0;
}
# endif
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
#define compat_skb_csum_offset(skb) (skb)->csum_offset
#else
#define compat_skb_csum_offset(skb) (skb)->csum
#endif
/*
* Note that compat_skb_csum_start() has semantic different from kernel's csum_start:
* kernel's skb->csum_start is offset between start of checksummed area and start of
* complete skb buffer, while our compat_skb_csum_start(skb) is offset from start
* of packet itself.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
#define compat_skb_csum_start(skb) ((skb)->csum_start - skb_headroom(skb))
#else
#define compat_skb_csum_start(skb) compat_skb_transport_offset(skb)
#endif
#if defined(NETIF_F_GSO) /* 2.6.18 and upwards */
#define compat_skb_mss(skb) (skb_shinfo(skb)->gso_size)
#else
#define compat_skb_mss(skb) (skb_shinfo(skb)->tso_size)
#endif
/* used by both received pkts and outgoing ones */
#define VM_CHECKSUM_UNNECESSARY CHECKSUM_UNNECESSARY
/* csum status of received pkts */
#if defined(CHECKSUM_COMPLETE)
# define VM_RX_CHECKSUM_PARTIAL CHECKSUM_COMPLETE
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) && defined(CHECKSUM_HW)
# define VM_RX_CHECKSUM_PARTIAL CHECKSUM_HW
#else
# define VM_RX_CHECKSUM_PARTIAL CHECKSUM_PARTIAL
#endif
/* csum status of outgoing pkts */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) && defined(CHECKSUM_HW)
# define VM_TX_CHECKSUM_PARTIAL CHECKSUM_HW
#else
# define VM_TX_CHECKSUM_PARTIAL CHECKSUM_PARTIAL
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0))
# define compat_kfree_skb(skb, type) kfree_skb(skb, type)
# define compat_dev_kfree_skb(skb, type) dev_kfree_skb(skb, type)
# define compat_dev_kfree_skb_any(skb, type) dev_kfree_skb(skb, type)
# define compat_dev_kfree_skb_irq(skb, type) dev_kfree_skb(skb, type)
#else
# define compat_kfree_skb(skb, type) kfree_skb(skb)
# define compat_dev_kfree_skb(skb, type) dev_kfree_skb(skb)
# if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,43))
# define compat_dev_kfree_skb_any(skb, type) dev_kfree_skb(skb)
# define compat_dev_kfree_skb_irq(skb, type) dev_kfree_skb(skb)
# else
# define compat_dev_kfree_skb_any(skb, type) dev_kfree_skb_any(skb)
# define compat_dev_kfree_skb_irq(skb, type) dev_kfree_skb_irq(skb)
# endif
#endif
#ifndef NET_IP_ALIGN
# define COMPAT_NET_IP_ALIGN 2
#else
# define COMPAT_NET_IP_ALIGN NET_IP_ALIGN
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 4)
# define compat_skb_headlen(skb) skb_headlen(skb)
# define compat_pskb_may_pull(skb, len) pskb_may_pull(skb, len)
# define compat_skb_is_nonlinear(skb) skb_is_nonlinear(skb)
#else
# define compat_skb_headlen(skb) (skb)->len
# define compat_pskb_may_pull(skb, len) 1
# define compat_skb_is_nonlinear(skb) 0
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12)
# define compat_skb_header_cloned(skb) skb_header_cloned(skb)
#else
# define compat_skb_header_cloned(skb) 0
#endif
#endif /* __COMPAT_SKBUFF_H__ */
vmmemctl-only/shared/backdoor_def.h 0000444 0000000 0000000 00000023444 12275350062 016407 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* backdoor_def.h --
*
* This contains backdoor defines that can be included from
* an assembly language file.
*/
#ifndef _BACKDOOR_DEF_H_
#define _BACKDOOR_DEF_H_
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMKERNEL
#include "includeCheck.h"
/*
* If you want to add a new low-level backdoor call for a guest userland
* application, please consider using the GuestRpc mechanism instead. --hpreg
*/
#define BDOOR_MAGIC 0x564D5868
/* Low-bandwidth backdoor port. --hpreg */
#define BDOOR_PORT 0x5658
#define BDOOR_CMD_GETMHZ 1
/*
* BDOOR_CMD_APMFUNCTION is used by:
*
* o The FrobOS code, which instead should either program the virtual chipset
* (like the new BIOS code does, matthias offered to implement that), or not
* use any VM-specific code (which requires that we correctly implement
* "power off on CLI HLT" for SMP VMs, boris offered to implement that)
*
* o The old BIOS code, which will soon be jettisoned
*
* --hpreg
*/
#define BDOOR_CMD_APMFUNCTION 2 /* CPL0 only. */
#define BDOOR_CMD_GETDISKGEO 3
#define BDOOR_CMD_GETPTRLOCATION 4
#define BDOOR_CMD_SETPTRLOCATION 5
#define BDOOR_CMD_GETSELLENGTH 6
#define BDOOR_CMD_GETNEXTPIECE 7
#define BDOOR_CMD_SETSELLENGTH 8
#define BDOOR_CMD_SETNEXTPIECE 9
#define BDOOR_CMD_GETVERSION 10
#define BDOOR_CMD_GETDEVICELISTELEMENT 11
#define BDOOR_CMD_TOGGLEDEVICE 12
#define BDOOR_CMD_GETGUIOPTIONS 13
#define BDOOR_CMD_SETGUIOPTIONS 14
#define BDOOR_CMD_GETSCREENSIZE 15
#define BDOOR_CMD_MONITOR_CONTROL 16 /* Disabled by default. */
#define BDOOR_CMD_GETHWVERSION 17
#define BDOOR_CMD_OSNOTFOUND 18 /* CPL0 only. */
#define BDOOR_CMD_GETUUID 19
#define BDOOR_CMD_GETMEMSIZE 20
#define BDOOR_CMD_HOSTCOPY 21 /* Devel only. */
//#define BDOOR_CMD_SERVICE_VM 22 /* Not in use. Never shipped. */
#define BDOOR_CMD_GETTIME 23 /* Deprecated -> GETTIMEFULL. */
#define BDOOR_CMD_STOPCATCHUP 24
#define BDOOR_CMD_PUTCHR 25 /* Disabled by default. */
#define BDOOR_CMD_ENABLE_MSG 26 /* Devel only. */
#define BDOOR_CMD_GOTO_TCL 27 /* Devel only. */
#define BDOOR_CMD_INITPCIOPROM 28 /* CPL 0 only. */
//#define BDOOR_CMD_INT13 29 /* Not in use. */
#define BDOOR_CMD_MESSAGE 30
#define BDOOR_CMD_SIDT 31
#define BDOOR_CMD_SGDT 32
#define BDOOR_CMD_SLDT_STR 33
#define BDOOR_CMD_ISACPIDISABLED 34
//#define BDOOR_CMD_TOE 35 /* Not in use. */
#define BDOOR_CMD_ISMOUSEABSOLUTE 36
#define BDOOR_CMD_PATCH_SMBIOS_STRUCTS 37 /* CPL 0 only. */
#define BDOOR_CMD_MAPMEM 38 /* Devel only */
#define BDOOR_CMD_ABSPOINTER_DATA 39
#define BDOOR_CMD_ABSPOINTER_STATUS 40
#define BDOOR_CMD_ABSPOINTER_COMMAND 41
//#define BDOOR_CMD_TIMER_SPONGE 42 /* Not in use. */
#define BDOOR_CMD_PATCH_ACPI_TABLES 43 /* CPL 0 only. */
//#define BDOOR_CMD_DEVEL_FAKEHARDWARE 44 /* Not in use. */
#define BDOOR_CMD_GETHZ 45
#define BDOOR_CMD_GETTIMEFULL 46
#define BDOOR_CMD_STATELOGGER 47 /* Disabled by default. */
#define BDOOR_CMD_CHECKFORCEBIOSSETUP 48 /* CPL 0 only. */
#define BDOOR_CMD_LAZYTIMEREMULATION 49 /* CPL 0 only. */
#define BDOOR_CMD_BIOSBBS 50 /* CPL 0 only. */
//#define BDOOR_CMD_VASSERT 51 /* Not in use. */
#define BDOOR_CMD_ISGOSDARWIN 52
#define BDOOR_CMD_DEBUGEVENT 53
#define BDOOR_CMD_OSNOTMACOSXSERVER 54 /* CPL 0 only. */
#define BDOOR_CMD_GETTIMEFULL_WITH_LAG 55
#define BDOOR_CMD_ACPI_HOTPLUG_DEVICE 56 /* Devel only. */
#define BDOOR_CMD_ACPI_HOTPLUG_MEMORY 57 /* Devel only. */
#define BDOOR_CMD_ACPI_HOTPLUG_CBRET 58 /* Devel only. */
//#define BDOOR_CMD_GET_HOST_VIDEO_MODES 59 /* Not in use. */
#define BDOOR_CMD_ACPI_HOTPLUG_CPU 60 /* Devel only. */
//#define BDOOR_CMD_USB_HOTPLUG_MOUSE 61 /* Not in use. Never shipped. */
#define BDOOR_CMD_XPMODE 62 /* CPL 0 only. */
#define BDOOR_CMD_NESTING_CONTROL 63
#define BDOOR_CMD_FIRMWARE_INIT 64 /* CPL 0 only. */
#define BDOOR_CMD_FIRMWARE_ACPI_SERVICES 65 /* CPL 0 only. */
# define BDOOR_CMD_FAS_GET_TABLE_SIZE 0
# define BDOOR_CMD_FAS_GET_TABLE_DATA 1
# define BDOOR_CMD_FAS_GET_PLATFORM_NAME 2
# define BDOOR_CMD_FAS_GET_PCIE_OSC_MASK 3
# define BDOOR_CMD_FAS_GET_APIC_ROUTING 4
# define BDOOR_CMD_FAS_GET_TABLE_SKIP 5
# define BDOOR_CMD_FAS_GET_SLEEP_ENABLES 6
#define BDOOR_CMD_SENDPSHAREHINTS 66
#define BDOOR_CMD_ENABLE_USB_MOUSE 67
#define BDOOR_CMD_GET_VCPU_INFO 68
# define BDOOR_CMD_VCPU_SLC64 0
# define BDOOR_CMD_VCPU_SYNC_VTSCS 1
# define BDOOR_CMD_VCPU_HV_REPLAY_OK 2
# define BDOOR_CMD_VCPU_RESERVED 31
#define BDOOR_CMD_EFI_SERIALCON_CONFIG 69 /* CPL 0 only. */
#define BDOOR_CMD_BUG328986 70 /* CPL 0 only. */
#define BDOOR_CMD_FIRMWARE_ERROR 71 /* CPL 0 only. */
# define BDOOR_CMD_FE_INSUFFICIENT_MEM 0
# define BDOOR_CMD_FE_EXCEPTION 1
#define BDOOR_CMD_VMK_INFO 72
#define BDOOR_CMD_EFI_BOOT_CONFIG 73 /* CPL 0 only. */
# define BDOOR_CMD_EBC_LEGACYBOOT_ENABLED 0
# define BDOOR_CMD_EBC_GET_ORDER 1
# define BDOOR_CMD_EBC_SHELL_ACTIVE 2
#define BDOOR_CMD_GET_HW_MODEL 74 /* CPL 0 only. */
#define BDOOR_CMD_GET_SVGA_CAPABILITIES 75 /* CPL 0 only. */
#define BDOOR_CMD_GET_FORCE_X2APIC 76 /* CPL 0 only */
#define BDOOR_CMD_SET_PCI_HOLE 77 /* CPL 0 only */
#define BDOOR_CMD_GET_PCI_HOLE 78 /* CPL 0 only */
#define BDOOR_CMD_GET_PCI_BAR 79 /* CPL 0 only */
#define BDOOR_CMD_SHOULD_GENERATE_SYSTEMID 80 /* CPL 0 only */
#define BDOOR_CMD_MAX 81
/*
* IMPORTANT NOTE: When modifying the behavior of an existing backdoor command,
* you must adhere to the semantics expected by the oldest Tools who use that
* command. Specifically, do not alter the way in which the command modifies
* the registers. Otherwise backwards compatibility will suffer.
*/
/* Processing mode for guest pshare hints (SENDPSHAREHINTS cmd) */
#define BDOOR_PSHARE_HINTS_ASYNC 0
#define BDOOR_PSHARE_HINTS_SYNC 1
#define BDOOR_PSHARE_HINTS_TYPE(ecx) (((ecx) >> 16) & 0x1)
/* Version of backdoor pshare hints protocol */
#define BDOOR_PSHARE_HINTS_VERSION 1
#define BDOOR_PSHARE_HINTS_VER(ecx) (((ecx) >> 17) & 0x7f)
/* Task applied to backdoor pshare hints */
#define BDOOR_PSHARE_HINTS_CMD_SHARE 0
#define BDOOR_PSHARE_HINTS_CMD_DROP 1
#define BDOOR_PSHARE_HINTS_CMD_MAX 2
#define BDOOR_PSHARE_HINTS_CMD(ecx) (((ecx) >> 24) & 0xff)
/* Nesting control operations */
#define NESTING_CONTROL_RESTRICT_BACKDOOR 0
#define NESTING_CONTROL_OPEN_BACKDOOR 1
#define NESTING_CONTROL_QUERY 2
#define NESTING_CONTROL_MAX 2
/* EFI Boot Order options, nibble-sized. */
#define EFI_BOOT_ORDER_TYPE_EFI 0x0
#define EFI_BOOT_ORDER_TYPE_LEGACY 0x1
#define EFI_BOOT_ORDER_TYPE_NONE 0xf
/* High-bandwidth backdoor port. --hpreg */
#define BDOORHB_PORT 0x5659
#define BDOORHB_CMD_MESSAGE 0
#define BDOORHB_CMD_VASSERT 1
#define BDOORHB_CMD_MAX 2
/*
* There is another backdoor which allows access to certain TSC-related
* values using otherwise illegal PMC indices when the pseudo_perfctr
* control flag is set.
*/
#define BDOOR_PMC_HW_TSC 0x10000
#define BDOOR_PMC_REAL_NS 0x10001
#define BDOOR_PMC_APPARENT_NS 0x10002
#define BDOOR_PMC_PSEUDO_TSC 0x10003
#define IS_BDOOR_PMC(index) (((index) | 3) == 0x10003)
#define BDOOR_CMD(ecx) ((ecx) & 0xffff)
/* Sub commands for BDOOR_CMD_VMK_INFO */
#define BDOOR_CMD_VMK_INFO_ENTRY 1
#ifdef VMM
/*
*----------------------------------------------------------------------
*
* Backdoor_CmdRequiresFullyValidVCPU --
*
* A few backdoor commands require the full VCPU to be valid
* (including GDTR, IDTR, TR and LDTR). The rest get read/write
* access to GPRs and read access to Segment registers (selectors).
*
* Result:
* True iff VECX contains a command that require the full VCPU to
* be valid.
*
*----------------------------------------------------------------------
*/
static INLINE Bool
Backdoor_CmdRequiresFullyValidVCPU(unsigned cmd)
{
return cmd == BDOOR_CMD_SIDT ||
cmd == BDOOR_CMD_SGDT ||
cmd == BDOOR_CMD_SLDT_STR;
}
#endif
#endif
vmmemctl-only/shared/compat_mutex.h 0000444 0000000 0000000 00000003475 12275350061 016513 0 ustar root root /*********************************************************
* Copyright (C) 2009 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_MUTEX_H__
# define __COMPAT_MUTEX_H__
/* Blocking mutexes were introduced in 2.6.16. */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16)
#include "compat_semaphore.h"
typedef struct semaphore compat_mutex_t;
# define compat_define_mutex(_mx) DECLARE_MUTEX(_mx)
# define compat_mutex_init(_mx) init_MUTEX(_mx)
# define compat_mutex_lock(_mx) down(_mx)
# define compat_mutex_lock_interruptible(_mx) down_interruptible(_mx)
# define compat_mutex_unlock(_mx) up(_mx)
#else
#include <linux/mutex.h>
typedef struct mutex compat_mutex_t;
# define compat_define_mutex(_mx) DEFINE_MUTEX(_mx)
# define compat_mutex_init(_mx) mutex_init(_mx)
# define compat_mutex_lock(_mx) mutex_lock(_mx)
# define compat_mutex_lock_interruptible(_mx) mutex_lock_interruptible(_mx)
# define compat_mutex_unlock(_mx) mutex_unlock(_mx)
#endif
#endif /* __COMPAT_MUTEX_H__ */
vmmemctl-only/shared/vmci_infrastructure.h 0000444 0000000 0000000 00000007766 12275350062 020114 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmci_infrastructure.h --
*
* This file implements the VMCI infrastructure.
*/
#ifndef _VMCI_INFRASTRUCTURE_H_
#define _VMCI_INFRASTRUCTURE_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_DISTRIBUTE
#include "includeCheck.h"
#include "vmware.h"
#include "vmci_defs.h"
typedef enum {
VMCIOBJ_VMX_VM = 10,
VMCIOBJ_CONTEXT,
VMCIOBJ_SOCKET,
VMCIOBJ_NOT_SET,
} VMCIObjType;
/* For storing VMCI structures in file handles. */
typedef struct VMCIObj {
void *ptr;
VMCIObjType type;
} VMCIObj;
/* Guestcalls currently support a maximum of 8 uint64 arguments. */
#define VMCI_GUESTCALL_MAX_ARGS_SIZE 64
/*
* Structure used for checkpointing the doorbell mappings. It is
* written to the checkpoint as is, so changing this structure will
* break checkpoint compatibility.
*/
typedef struct VMCIDoorbellCptState {
VMCIHandle handle;
uint64 bitmapIdx;
} VMCIDoorbellCptState;
/* Used to determine what checkpoint state to get and set. */
#define VMCI_NOTIFICATION_CPT_STATE 0x1
#define VMCI_WELLKNOWN_CPT_STATE 0x2
#define VMCI_DG_OUT_STATE 0x3
#define VMCI_DG_IN_STATE 0x4
#define VMCI_DG_IN_SIZE_STATE 0x5
#define VMCI_DOORBELL_CPT_STATE 0x6
/* Used to control the VMCI device in the vmkernel */
#define VMCI_DEV_RESET 0x01
#define VMCI_DEV_QP_RESET 0x02 // DEPRECATED
#define VMCI_DEV_QUIESCE 0x03
#define VMCI_DEV_UNQUIESCE 0x04
#define VMCI_DEV_QP_BREAK_SHARING 0x05 // DEPRECATED
#define VMCI_DEV_RESTORE_SYNC 0x06
#define VMCI_DEV_BMASTER_OFF 0x07
#define VMCI_DEV_BMASTER_ON 0x08
/*
*-------------------------------------------------------------------------
*
* VMCI_Hash --
*
* Hash function used by the Simple Datagram API. Based on the djb2
* hash function by Dan Bernstein.
*
* Result:
* Returns guest call size.
*
* Side effects:
* None.
*
*-------------------------------------------------------------------------
*/
static INLINE int
VMCI_Hash(VMCIHandle handle, // IN
unsigned size) // IN
{
unsigned i;
int hash = 5381;
const uint64 handleValue = QWORD(handle.resource, handle.context);
for (i = 0; i < sizeof handle; i++) {
hash = ((hash << 5) + hash) + (uint8)(handleValue >> (i * 8));
}
return hash & (size - 1);
}
/*
*-------------------------------------------------------------------------
*
* VMCI_HashId --
*
* Hash function used by the Simple Datagram API. Hashes only a VMCI id
* (not the full VMCI handle) Based on the djb2
* hash function by Dan Bernstein.
*
* Result:
* Returns guest call size.
*
* Side effects:
* None.
*
*-------------------------------------------------------------------------
*/
static INLINE int
VMCI_HashId(VMCIId id, // IN
unsigned size) // IN
{
unsigned i;
int hash = 5381;
for (i = 0; i < sizeof id; i++) {
hash = ((hash << 5) + hash) + (uint8)(id >> (i * 8));
}
return hash & (size - 1);
}
#endif // _VMCI_INFRASTRUCTURE_H_
vmmemctl-only/shared/includeCheck.h 0000444 0000000 0000000 00000010063 12275350062 016357 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* includeCheck.h --
*
* Restrict include file use.
*
* In every .h file, define one or more of these
*
* INCLUDE_ALLOW_VMX
* INCLUDE_ALLOW_USERLEVEL
* INCLUDE_ALLOW_VMCORE
* INCLUDE_ALLOW_MODULE
* INCLUDE_ALLOW_VMKERNEL
* INCLUDE_ALLOW_DISTRIBUTE
* INCLUDE_ALLOW_VMK_MODULE
* INCLUDE_ALLOW_VMKDRIVERS
* INCLUDE_ALLOW_VMIROM
* INCLUDE_ALLOW_MKS
*
* Then include this file.
*
* Any file that has INCLUDE_ALLOW_DISTRIBUTE defined will potentially
* be distributed in source form along with GPLed code. Ensure
* that this is acceptable.
*/
/*
* Declare a VMCORE-only variable to help classify object
* files. The variable goes in the common block and does
* not create multiple definition link-time conflicts.
*/
#if defined VMCORE && defined VMX86_DEVEL && defined VMX86_DEBUG && \
defined linux && !defined MODULE && \
!defined COMPILED_WITH_VMCORE
#define COMPILED_WITH_VMCORE compiled_with_vmcore
#ifdef ASM
.comm compiled_with_vmcore, 0
#else
asm(".comm compiled_with_vmcore, 0");
#endif /* ASM */
#endif
#if defined VMCORE && \
!(defined VMX86_VMX || defined VMM || \
defined MONITOR_APP || defined VMMON)
#error "Makefile problem: VMCORE without VMX86_VMX or \
VMM or MONITOR_APP or MODULE."
#endif
#if defined VMCORE && !defined INCLUDE_ALLOW_VMCORE
#error "The surrounding include file is not allowed in vmcore."
#endif
#undef INCLUDE_ALLOW_VMCORE
#if defined VMX86_VMX && !defined VMCORE && \
!defined INCLUDE_ALLOW_VMX && !defined INCLUDE_ALLOW_USERLEVEL && \
!defined INCLUDE_ALLOW_MKS
#error "The surrounding include file is not allowed in the VMX."
#endif
#undef INCLUDE_ALLOW_VMX
#if defined USERLEVEL && !defined VMX86_VMX && !defined VMCORE && \
!defined INCLUDE_ALLOW_USERLEVEL && !defined INCLUDE_ALLOW_MKS
#error "The surrounding include file is not allowed at userlevel."
#endif
#undef INCLUDE_ALLOW_USERLEVEL
#if defined MODULE && !defined VMKERNEL_MODULE && \
!defined VMMON && !defined INCLUDE_ALLOW_MODULE
#error "The surrounding include file is not allowed in driver modules."
#endif
#undef INCLUDE_ALLOW_MODULE
#if defined VMMON && !defined INCLUDE_ALLOW_VMMON
#error "The surrounding include file is not allowed in vmmon."
#endif
#undef INCLUDE_ALLOW_VMMON
#if defined VMKERNEL && !defined INCLUDE_ALLOW_VMKERNEL
#error "The surrounding include file is not allowed in the vmkernel."
#endif
#undef INCLUDE_ALLOW_VMKERNEL
#if defined GPLED_CODE && !defined INCLUDE_ALLOW_DISTRIBUTE
#error "The surrounding include file is not allowed in GPL code."
#endif
#undef INCLUDE_ALLOW_DISTRIBUTE
#if defined VMKERNEL_MODULE && !defined VMKERNEL && \
!defined INCLUDE_ALLOW_VMK_MODULE && !defined INCLUDE_ALLOW_VMKDRIVERS
#error "The surrounding include file is not allowed in vmkernel modules."
#endif
#undef INCLUDE_ALLOW_VMK_MODULE
#undef INCLUDE_ALLOW_VMKDRIVERS
#if defined VMIROM && ! defined INCLUDE_ALLOW_VMIROM
#error "The surrounding include file is not allowed in vmirom."
#endif
#undef INCLUDE_ALLOW_VMIROM
#if defined INCLUDE_ALLOW_MKS && \
!(defined LOCALMKS || defined REMOTEMKS || \
defined SERVERMKS || defined CLIENTMKS)
#error "The surrounding include file is not allowed outside of the MKS."
#endif
#undef INCLUDE_ALLOW_MKS
vmmemctl-only/shared/vmci_call_defs.h 0000444 0000000 0000000 00000024063 12275350062 016735 0 ustar root root /*********************************************************
* Copyright (C) 2006-2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef _VMCI_CALL_DEFS_H_
#define _VMCI_CALL_DEFS_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMKMOD
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_DISTRIBUTE
#include "includeCheck.h"
#include "vm_basic_types.h"
#include "vmci_defs.h"
/*
* All structs here are an integral size of their largest member, ie. a struct
* with at least one 8-byte member will have a size that is an integral of 8.
* A struct which has a largest member of size 4 will have a size that is an
* integral of 4. This is because Windows CL enforces this rule. 32 bit gcc
* doesn't e.g. 32 bit gcc can misalign an 8 byte member if it is preceeded by
* a 4 byte member.
*/
/*
* Base struct for vmci datagrams.
*/
typedef struct VMCIDatagram {
VMCIHandle dst;
VMCIHandle src;
uint64 payloadSize;
} VMCIDatagram;
/*
* Second flag is for creating a well-known handle instead of a per context
* handle. Next flag is for deferring datagram delivery, so that the
* datagram callback is invoked in a delayed context (not interrupt context).
*/
#define VMCI_FLAG_DG_NONE 0
#define VMCI_FLAG_WELLKNOWN_DG_HND 0x1
#define VMCI_FLAG_ANYCID_DG_HND 0x2
#define VMCI_FLAG_DG_DELAYED_CB 0x4
/* Event callback should fire in a delayed context (not interrupt context.) */
#define VMCI_FLAG_EVENT_NONE 0
#define VMCI_FLAG_EVENT_DELAYED_CB 0x1
/*
* Maximum supported size of a VMCI datagram for routable datagrams.
* Datagrams going to the hypervisor are allowed to be larger.
*/
#define VMCI_MAX_DG_SIZE (17 * 4096)
#define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - sizeof(VMCIDatagram))
#define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) + sizeof(VMCIDatagram))
#define VMCI_DG_HEADERSIZE sizeof(VMCIDatagram)
#define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payloadSize)
#define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (size_t)CONST64U(0xfffffffffffffff8))
#define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2)
/*
* We allow at least 1024 more event datagrams from the hypervisor past the
* normally allowed datagrams pending for a given context. We define this
* limit on event datagrams from the hypervisor to guard against DoS attack
* from a malicious VM which could repeatedly attach to and detach from a queue
* pair, causing events to be queued at the destination VM. However, the rate
* at which such events can be generated is small since it requires a VM exit
* and handling of queue pair attach/detach call at the hypervisor. Event
* datagrams may be queued up at the destination VM if it has interrupts
* disabled or if it is not draining events for some other reason. 1024
* datagrams is a grossly conservative estimate of the time for which
* interrupts may be disabled in the destination VM, but at the same time does
* not exacerbate the memory pressure problem on the host by much (size of each
* event datagram is small).
*/
#define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE \
(VMCI_MAX_DATAGRAM_QUEUE_SIZE + \
1024 * (sizeof(VMCIDatagram) + sizeof(VMCIEventData_Max)))
/*
* Struct for sending VMCI_DATAGRAM_REQUEST_MAP and
* VMCI_DATAGRAM_REMOVE_MAP datagrams. Struct size is 32 bytes. All
* fields in struct are aligned to their natural alignment. These
* datagrams are obsoleted by the removal of VM to VM communication.
*/
typedef struct VMCIDatagramWellKnownMapMsg {
VMCIDatagram hdr;
VMCIId wellKnownID;
uint32 _pad;
} VMCIDatagramWellKnownMapMsg;
/*
* Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of
* hypervisor resources.
* Struct size is 16 bytes. All fields in struct are aligned to their natural
* alignment.
*/
typedef struct VMCIResourcesQueryHdr {
VMCIDatagram hdr;
uint32 numResources;
uint32 _padding;
} VMCIResourcesQueryHdr;
/*
* Convenience struct for negotiating vectors. Must match layout of
* VMCIResourceQueryHdr minus the VMCIDatagram header.
*/
typedef struct VMCIResourcesQueryMsg {
uint32 numResources;
uint32 _padding;
VMCI_Resource resources[1];
} VMCIResourcesQueryMsg;
/*
* The maximum number of resources that can be queried using
* VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31
* bits of a positive return value. Negative values are reserved for
* errors.
*/
#define VMCI_RESOURCE_QUERY_MAX_NUM 31
/* Maximum size for the VMCI_RESOURCE_QUERY request. */
#define VMCI_RESOURCE_QUERY_MAX_SIZE sizeof(VMCIResourcesQueryHdr) \
+ VMCI_RESOURCE_QUERY_MAX_NUM * sizeof(VMCI_Resource)
/*
* Struct used for setting the notification bitmap. All fields in
* struct are aligned to their natural alignment.
*/
typedef struct VMCINotifyBitmapSetMsg {
VMCIDatagram hdr;
PPN bitmapPPN;
uint32 _pad;
} VMCINotifyBitmapSetMsg;
/*
* Struct used for linking a doorbell handle with an index in the
* notify bitmap. All fields in struct are aligned to their natural
* alignment.
*/
typedef struct VMCIDoorbellLinkMsg {
VMCIDatagram hdr;
VMCIHandle handle;
uint64 notifyIdx;
} VMCIDoorbellLinkMsg;
/*
* Struct used for unlinking a doorbell handle from an index in the
* notify bitmap. All fields in struct are aligned to their natural
* alignment.
*/
typedef struct VMCIDoorbellUnlinkMsg {
VMCIDatagram hdr;
VMCIHandle handle;
} VMCIDoorbellUnlinkMsg;
/*
* Struct used for generating a notification on a doorbell handle. All
* fields in struct are aligned to their natural alignment.
*/
typedef struct VMCIDoorbellNotifyMsg {
VMCIDatagram hdr;
VMCIHandle handle;
} VMCIDoorbellNotifyMsg;
/*
* This struct is used to contain data for events. Size of this struct is a
* multiple of 8 bytes, and all fields are aligned to their natural alignment.
*/
typedef struct VMCI_EventData {
VMCI_Event event; /* 4 bytes. */
uint32 _pad;
/*
* Event payload is put here.
*/
} VMCI_EventData;
/* Callback needed for correctly waiting on events. */
typedef int
(*VMCIDatagramRecvCB)(void *clientData, // IN: client data for handler
VMCIDatagram *msg); // IN:
/*
* We use the following inline function to access the payload data associated
* with an event data.
*/
static INLINE void *
VMCIEventDataPayload(VMCI_EventData *evData) // IN:
{
return (void *)((char *)evData + sizeof *evData);
}
/*
* Define the different VMCI_EVENT payload data types here. All structs must
* be a multiple of 8 bytes, and fields must be aligned to their natural
* alignment.
*/
typedef struct VMCIEventPayload_Context {
VMCIId contextID; /* 4 bytes. */
uint32 _pad;
} VMCIEventPayload_Context;
typedef struct VMCIEventPayload_QP {
VMCIHandle handle; /* QueuePair handle. */
VMCIId peerId; /* Context id of attaching/detaching VM. */
uint32 _pad;
} VMCIEventPayload_QP;
/*
* We define the following struct to get the size of the maximum event data
* the hypervisor may send to the guest. If adding a new event payload type
* above, add it to the following struct too (inside the union).
*/
typedef struct VMCIEventData_Max {
VMCI_EventData eventData;
union {
VMCIEventPayload_Context contextPayload;
VMCIEventPayload_QP qpPayload;
} evDataPayload;
} VMCIEventData_Max;
/*
* Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and VMCI_EVENT_HANDLER
* messages. Struct size is 32 bytes. All fields in struct are aligned to
* their natural alignment.
*/
typedef struct VMCIEventMsg {
VMCIDatagram hdr;
VMCI_EventData eventData; /* Has event type and payload. */
/*
* Payload gets put here.
*/
} VMCIEventMsg;
/*
* We use the following inline function to access the payload data associated
* with an event message.
*/
static INLINE void *
VMCIEventMsgPayload(VMCIEventMsg *eMsg) // IN:
{
return VMCIEventDataPayload(&eMsg->eventData);
}
/* Flags for VMCI QueuePair API. */
#define VMCI_QPFLAG_ATTACH_ONLY 0x1 /* Fail alloc if QP not created by peer. */
#define VMCI_QPFLAG_LOCAL 0x2 /* Only allow attaches from local context. */
#define VMCI_QPFLAG_NONBLOCK 0x4 /* Host won't block when guest is quiesced. */
#define VMCI_QPFLAG_PINNED 0x8 /* Keep all data pages pinned. This flag */
/* must be combined with NONBLOCK. */
/* For asymmetric queuepairs, update as new flags are added. */
#define VMCI_QP_ASYMM (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)
#define VMCI_QP_ASYMM_PEER (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QP_ASYMM)
/* Update the following (bitwise OR flags) while adding new flags. */
#define VMCI_QP_ALL_FLAGS (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL | \
VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)
/*
* Structs used for QueuePair alloc and detach messages. We align fields of
* these structs to 64bit boundaries.
*/
typedef struct VMCIQueuePairAllocMsg {
VMCIDatagram hdr;
VMCIHandle handle;
VMCIId peer; /* 32bit field. */
uint32 flags;
uint64 produceSize;
uint64 consumeSize;
uint64 numPPNs;
/* List of PPNs placed here. */
} VMCIQueuePairAllocMsg;
typedef struct VMCIQueuePairDetachMsg {
VMCIDatagram hdr;
VMCIHandle handle;
} VMCIQueuePairDetachMsg;
#endif
vmmemctl-only/shared/compat_log2.h 0000444 0000000 0000000 00000003672 12275350061 016213 0 ustar root root /*********************************************************
* Copyright (C) 2011 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_LOG2_H__
# define __COMPAT_LOG2_H__
#ifndef LINUX_VERSION_CODE
# error "Include compat_version.h before compat_log2.h"
#endif
/* linux/log2.h was introduced in 2.6.20. */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 19)
# include <linux/log2.h>
#endif
/*
* is_power_of_2 was introduced in 2.6.21. This implementation is almost
* identical to the one found there.
*/
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20)
#define compat_is_power_of_2(n) is_power_of_2(n)
#else
static inline __attribute__((const))
int compat_is_power_of_2(unsigned long n)
{
return (n != 0 && ((n && (n - 1)) == 0));
}
#endif
/*
* rounddown_power_of_two was introduced in 2.6.24. This implementation is
* similar to the one in log2.h but with input of int instead of long to
* avoid more version related checks for fls_long().
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
#define compat_rounddown_pow_of_two(n) rounddown_pow_of_two(n)
#else
static inline __attribute__((const))
unsigned int compat_rounddown_pow_of_two(unsigned int n)
{
return 1U << (fls(n) -1);
}
#endif
#endif /* __COMPAT_LOG2_H__ */
vmmemctl-only/shared/driver-config.h 0000444 0000000 0000000 00000004314 12275350061 016535 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* Sets the proper defines from the Linux header files
*
* This file must be included before the inclusion of any kernel header file,
* with the exception of linux/autoconf.h and linux/version.h --hpreg
*/
#ifndef __VMX_CONFIG_H__
#define __VMX_CONFIG_H__
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMKDRIVERS
#include "includeCheck.h"
#include "compat_version.h"
#include "compat_autoconf.h"
/*
* We rely on Kernel Module support. Check here.
*/
#ifndef CONFIG_MODULES
# error "No Module support in this kernel. Please configure with CONFIG_MODULES"
#endif
/*
* 2.2 kernels still use __SMP__ (derived from CONFIG_SMP
* in the main Makefile), so we do it here.
*/
#ifdef CONFIG_SMP
# define __SMP__ 1
#endif
#if defined(CONFIG_MODVERSIONS) && defined(KERNEL_2_1)
# if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,60)
/*
* MODVERSIONS might be already defined when using kernel's Makefiles.
*/
# ifndef MODVERSIONS
# define MODVERSIONS
# endif
# include <linux/modversions.h>
# endif
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
/*
* Force the uintptr_t definition to come from linux/types.h instead of vm_basic_types.h.
*/
# include <linux/types.h>
# define _STDINT_H 1
#endif
#ifndef __KERNEL__
# define __KERNEL__
#endif
#endif
vmmemctl-only/shared/vm_basic_defs.h 0000444 0000000 0000000 00000040372 12275350062 016570 0 ustar root root /*********************************************************
* Copyright (C) 2003-2010 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vm_basic_defs.h --
*
* Standard macros for VMware source code.
*/
#ifndef _VM_BASIC_DEFS_H_
#define _VM_BASIC_DEFS_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_VMKDRIVERS
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMIROM
#include "includeCheck.h"
#include "vm_basic_types.h" // For INLINE.
/* Checks for FreeBSD, filtering out VMKERNEL. */
#define __IS_FREEBSD__ (!defined(VMKERNEL) && defined(__FreeBSD__))
#define __IS_FREEBSD_VER__(ver) (__IS_FREEBSD__ && __FreeBSD_version >= (ver))
#if defined _WIN32 && defined USERLEVEL
#include <stddef.h> /*
* We redefine offsetof macro from stddef; make
* sure that it's already defined before we do that.
*/
#include <windows.h> // for Sleep() and LOWORD() etc.
#undef GetFreeSpace // Unpollute preprocessor namespace.
#endif
/*
* Simple macros
*/
#if (defined __APPLE__ || defined __FreeBSD__) && \
(!defined KERNEL && !defined _KERNEL && !defined VMKERNEL && !defined __KERNEL__)
# include <stddef.h>
#else
// XXX the _WIN32 one matches that of VC++, to prevent redefinition warning
// XXX the other one matches that of gcc3.3.3/glibc2.2.4 to prevent redefinition warnings
#ifndef offsetof
#ifdef _WIN32
#define offsetof(s,m) (size_t)&(((s *)0)->m)
#else
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
#endif
#endif // __APPLE__
#define VMW_CONTAINER_OF(ptr, type, member) \
((type *)((char *)(ptr) - offsetof(type, member)))
#ifndef ARRAYSIZE
#define ARRAYSIZE(a) (sizeof (a) / sizeof *(a))
#endif
#ifndef MIN
#define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b))
#endif
/* The Solaris 9 cross-compiler complains about these not being used */
#ifndef sun
static INLINE int
Min(int a, int b)
{
return a < b ? a : b;
}
#endif
#ifndef MAX
#define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b))
#endif
#ifndef sun
static INLINE int
Max(int a, int b)
{
return a > b ? a : b;
}
#endif
#define VMW_CLAMP(x, min, max) \
((x) < (min) ? (min) : ((x) > (max) ? (max) : (x)))
#define ROUNDUP(x,y) (((x) + (y) - 1) / (y) * (y))
#define ROUNDDOWN(x,y) ((x) / (y) * (y))
#define ROUNDUPBITS(x, bits) (((uintptr_t) (x) + MASK(bits)) & ~MASK(bits))
#define ROUNDDOWNBITS(x, bits) ((uintptr_t) (x) & ~MASK(bits))
#define CEILING(x, y) (((x) + (y) - 1) / (y))
#if defined __APPLE__
#include <machine/param.h>
#undef MASK
#endif
/*
* The MASK macro behaves badly when given negative numbers or numbers larger
* than the highest order bit number (e.g. 32 on a 32-bit machine) as an
* argument. The range 0..31 is safe.
*/
#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
#define MASK64(n) ((CONST64U(1) << (n)) - 1) /* make an n-bit mask */
/*
* MASKRANGE64 makes a bit vector starting at bit lo and ending at bit hi. No
* checking for lo < hi is done.
*/
#define MASKRANGE64(hi, lo) (MASK64((hi) - (lo) + 1) << (lo))
/* SIGNEXT64 sign extends a n-bit value to 64-bits. */
#define SIGNEXT64(val, n) (((int64)(val) << (64 - (n))) >> (64 - (n)))
#define DWORD_ALIGN(x) ((((x) + 3) >> 2) << 2)
#define QWORD_ALIGN(x) ((((x) + 7) >> 3) << 3)
#define IMPLIES(a,b) (!(a) || (b))
/*
* Not everybody (e.g., the monitor) has NULL
*/
#ifndef NULL
#ifdef __cplusplus
#define NULL 0
#else
#define NULL ((void *)0)
#endif
#endif
/*
* Token concatenation
*
* The C preprocessor doesn't prescan arguments when they are
* concatenated or stringified. So we need extra levels of
* indirection to convince the preprocessor to expand its
* arguments.
*/
#define CONC(x, y) x##y
#define XCONC(x, y) CONC(x, y)
#define XXCONC(x, y) XCONC(x, y)
#define MAKESTR(x) #x
#define XSTR(x) MAKESTR(x)
/*
* Wide versions of string constants.
*/
#ifndef WSTR
#define WSTR_(X) L ## X
#define WSTR(X) WSTR_(X)
#endif
/*
* Page operations
*
* It has been suggested that these definitions belong elsewhere
* (like x86types.h). However, I deem them common enough
* (since even regular user-level programs may want to do
* page-based memory manipulation) to be here.
* -- edward
*/
#ifndef PAGE_SHIFT // {
#if defined VM_I386
#define PAGE_SHIFT 12
#elif defined __APPLE__
#define PAGE_SHIFT 12
#elif defined __arm__
#define PAGE_SHIFT 12
#else
#error
#endif
#endif // }
#ifndef PAGE_SIZE
#define PAGE_SIZE (1<<PAGE_SHIFT)
#endif
#ifndef PAGE_MASK
#define PAGE_MASK (PAGE_SIZE - 1)
#endif
#ifndef PAGE_OFFSET
#define PAGE_OFFSET(_addr) ((uintptr_t)(_addr)&(PAGE_SIZE-1))
#endif
#ifndef VM_PAGE_BASE
#define VM_PAGE_BASE(_addr) ((_addr)&~(PAGE_SIZE-1))
#endif
#ifndef VM_PAGES_SPANNED
#define VM_PAGES_SPANNED(_addr, _size) \
((((_addr) & (PAGE_SIZE - 1)) + (_size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)
#endif
#ifndef BYTES_2_PAGES
#define BYTES_2_PAGES(_nbytes) ((_nbytes) >> PAGE_SHIFT)
#endif
#ifndef PAGES_2_BYTES
#define PAGES_2_BYTES(_npages) (((uint64)(_npages)) << PAGE_SHIFT)
#endif
#ifndef MBYTES_2_PAGES
#define MBYTES_2_PAGES(_nbytes) ((_nbytes) << (20 - PAGE_SHIFT))
#endif
#ifndef PAGES_2_MBYTES
#define PAGES_2_MBYTES(_npages) ((_npages) >> (20 - PAGE_SHIFT))
#endif
#ifndef GBYTES_2_PAGES
#define GBYTES_2_PAGES(_nbytes) ((_nbytes) << (30 - PAGE_SHIFT))
#endif
#ifndef PAGES_2_GBYTES
#define PAGES_2_GBYTES(_npages) ((_npages) >> (30 - PAGE_SHIFT))
#endif
#ifndef BYTES_2_MBYTES
#define BYTES_2_MBYTES(_nbytes) ((_nbytes) >> 20)
#endif
#ifndef MBYTES_2_BYTES
#define MBYTES_2_BYTES(_nbytes) ((uint64)(_nbytes) << 20)
#endif
#ifndef VM_PAE_LARGE_PAGE_SHIFT
#define VM_PAE_LARGE_PAGE_SHIFT 21
#endif
#ifndef VM_PAE_LARGE_PAGE_SIZE
#define VM_PAE_LARGE_PAGE_SIZE (1 << VM_PAE_LARGE_PAGE_SHIFT)
#endif
#ifndef VM_PAE_LARGE_PAGE_MASK
#define VM_PAE_LARGE_PAGE_MASK (VM_PAE_LARGE_PAGE_SIZE - 1)
#endif
#ifndef VM_PAE_LARGE_2_SMALL_PAGES
#define VM_PAE_LARGE_2_SMALL_PAGES (BYTES_2_PAGES(VM_PAE_LARGE_PAGE_SIZE))
#endif
#ifndef NR_MPNS_PER_PAGE
#define NR_MPNS_PER_PAGE (PAGE_SIZE / sizeof(MPN))
#endif
/*
* Word operations
*/
#ifndef LOWORD
#define LOWORD(_dw) ((_dw) & 0xffff)
#endif
#ifndef HIWORD
#define HIWORD(_dw) (((_dw) >> 16) & 0xffff)
#endif
#ifndef LOBYTE
#define LOBYTE(_w) ((_w) & 0xff)
#endif
#ifndef HIBYTE
#define HIBYTE(_w) (((_w) >> 8) & 0xff)
#endif
#define HIDWORD(_qw) ((uint32)((_qw) >> 32))
#define LODWORD(_qw) ((uint32)(_qw))
#define QWORD(_hi, _lo) ((((uint64)(_hi)) << 32) | ((uint32)(_lo)))
/*
* Deposit a field _src at _pos bits from the right,
* with a length of _len, into the integer _target.
*/
#define DEPOSIT_BITS(_src,_pos,_len,_target) { \
unsigned mask = ((1 << _len) - 1); \
unsigned shiftedmask = ((1 << _len) - 1) << _pos; \
_target = (_target & ~shiftedmask) | ((_src & mask) << _pos); \
}
/*
* Get return address.
*/
#ifdef _MSC_VER
#ifdef __cplusplus
extern "C"
#endif
void *_ReturnAddress(void);
#pragma intrinsic(_ReturnAddress)
#define GetReturnAddress() _ReturnAddress()
#elif __GNUC__
#define GetReturnAddress() __builtin_return_address(0)
#endif
#ifdef __GNUC__
#ifndef sun
static INLINE_SINGLE_CALLER uintptr_t
GetFrameAddr(void)
{
uintptr_t bp;
#if !(__GNUC__ == 4 && (__GNUC_MINOR__ == 0 || __GNUC_MINOR__ == 1))
bp = (uintptr_t)__builtin_frame_address(0);
#else
/*
* We use this assembly hack due to a bug discovered in gcc 4.1.1.
* The bug was fixed in 4.2.0; assume it originated with 4.0.
* PR147638, PR554369.
*/
__asm__ __volatile__(
# if defined(VM_X86_64)
"movq %%rbp, %0\n"
# else
"movl %%ebp, %0\n"
# endif
: "=g" (bp));
#endif
return bp;
}
/*
* Returns the frame pointer of the calling function.
* Equivalent to __builtin_frame_address(1).
*/
static INLINE_SINGLE_CALLER uintptr_t
GetCallerFrameAddr(void)
{
return *(uintptr_t*)GetFrameAddr();
}
#endif // sun
#endif // __GNUC__
/*
* Data prefetch was added in gcc 3.1.1
* http://www.gnu.org/software/gcc/gcc-3.1/changes.html
*/
#ifdef __GNUC__
# if ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ > 1) || \
(__GNUC__ == 3 && __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ >= 1))
# define PREFETCH_R(var) __builtin_prefetch((var), 0 /* read */, \
3 /* high temporal locality */)
# define PREFETCH_W(var) __builtin_prefetch((var), 1 /* write */, \
3 /* high temporal locality */)
# else
# define PREFETCH_R(var) ((void)(var))
# define PREFETCH_W(var) ((void)(var))
# endif
#endif /* __GNUC__ */
#ifdef USERLEVEL // {
/*
* Note this might be a problem on NT b/c while sched_yield guarantees it
* moves you to the end of your priority list, Sleep(0) offers no such
* guarantee. Bummer. --Jeremy.
*/
#if defined(N_PLAT_NLM)
/* We do not have YIELD() as we do not need it yet... */
#elif defined(_WIN32)
# define YIELD() Sleep(0)
#elif defined(VMKERNEL)
/* We don't have a YIELD macro in the vmkernel */
#else
# include <sched.h> // For sched_yield. Don't ask. --Jeremy.
# define YIELD() sched_yield()
#endif
/*
* Standardize some Posix names on Windows.
*/
#ifdef _WIN32 // {
#define snprintf _snprintf
#define strtok_r strtok_s
#if (_MSC_VER < 1500)
#define vsnprintf _vsnprintf
#endif
typedef int uid_t;
typedef int gid_t;
static INLINE void
sleep(unsigned int sec)
{
Sleep(sec * 1000);
}
static INLINE int
usleep(unsigned long usec)
{
Sleep(CEILING(usec, 1000));
return 0;
}
typedef int pid_t;
#define F_OK 0
#define X_OK 1
#define W_OK 2
#define R_OK 4
#endif // }
/*
* Macro for username comparison.
*/
#ifdef _WIN32 // {
#define USERCMP(x,y) Str_Strcasecmp(x,y)
#else
#define USERCMP(x,y) strcmp(x,y)
#endif // }
#endif // }
#ifndef va_copy
#ifdef _WIN32
/*
* Windows needs va_copy. This works for both 32 and 64-bit Windows
* based on inspection of how varags.h from the Visual C CRTL is
* implemented. (Future versions of the RTL may break this).
*/
#define va_copy(dest, src) ((dest) = (src))
#elif defined(__APPLE__) && defined(KERNEL)
#include "availabilityMacOS.h"
#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
// The Mac OS 10.5 kernel SDK defines va_copy in stdarg.h.
#include <stdarg.h>
#else
/*
* The Mac OS 10.4 kernel SDK needs va_copy. Based on inspection of
* stdarg.h from the MacOSX10.4u.sdk kernel framework, this should
* work.
*/
#define va_copy(dest, src) ((dest) = (src))
#endif // MAC_OS_X_VERSION_MIN_REQUIRED
#elif defined(__GNUC__) && (__GNUC__ < 3)
/*
* Old versions of gcc recognize __va_copy, but not va_copy.
*/
#define va_copy(dest, src) __va_copy(dest, src)
#endif // _WIN32
#endif // va_copy
/*
* This one is outside USERLEVEL because it's used by
* files compiled into the Windows hgfs driver or the display
* driver.
*/
#ifdef _WIN32
#define PATH_MAX 256
#ifndef strcasecmp
#define strcasecmp(_s1,_s2) _stricmp((_s1),(_s2))
#endif
#ifndef strncasecmp
#define strncasecmp(_s1,_s2,_n) _strnicmp((_s1),(_s2),(_n))
#endif
#endif
#if defined __linux__ && !defined __KERNEL__ && !defined MODULE && \
!defined VMM && !defined FROBOS && !defined __ANDROID__
#include <features.h>
#if __GLIBC_PREREQ(2, 1) && !defined GLIBC_VERSION_21
#define GLIBC_VERSION_21
#endif
#if __GLIBC_PREREQ(2, 2) && !defined GLIBC_VERSION_22
#define GLIBC_VERSION_22
#endif
#if __GLIBC_PREREQ(2, 3) && !defined GLIBC_VERSION_23
#define GLIBC_VERSION_23
#endif
#if __GLIBC_PREREQ(2, 4) && !defined GLIBC_VERSION_24
#define GLIBC_VERSION_24
#endif
#if __GLIBC_PREREQ(2, 5) && !defined GLIBC_VERSION_25
#define GLIBC_VERSION_25
#endif
#if __GLIBC_PREREQ(2, 12) && !defined GLIBC_VERSION_212
#define GLIBC_VERSION_212
#endif
#endif
/*
* Convenience macros and definitions. Can often be used instead of #ifdef.
*/
#undef DEBUG_ONLY
#ifdef VMX86_DEBUG
#define vmx86_debug 1
#define DEBUG_ONLY(x) x
#else
#define vmx86_debug 0
#define DEBUG_ONLY(x)
#endif
#ifdef VMX86_STATS
#define vmx86_stats 1
#define STATS_ONLY(x) x
#else
#define vmx86_stats 0
#define STATS_ONLY(x)
#endif
#ifdef VMX86_DEVEL
#define vmx86_devel 1
#define DEVEL_ONLY(x) x
#else
#define vmx86_devel 0
#define DEVEL_ONLY(x)
#endif
#ifdef VMX86_LOG
#define vmx86_log 1
#define LOG_ONLY(x) x
#else
#define vmx86_log 0
#define LOG_ONLY(x)
#endif
#ifdef VMX86_BETA
#define vmx86_beta 1
#define BETA_ONLY(x) x
#else
#define vmx86_beta 0
#define BETA_ONLY(x)
#endif
#ifdef VMX86_RELEASE
#define vmx86_release 1
#define RELEASE_ONLY(x) x
#else
#define vmx86_release 0
#define RELEASE_ONLY(x)
#endif
#ifdef VMX86_SERVER
#define vmx86_server 1
#define SERVER_ONLY(x) x
#define HOSTED_ONLY(x)
#else
#define vmx86_server 0
#define SERVER_ONLY(x)
#define HOSTED_ONLY(x) x
#endif
#ifdef VMKERNEL
#define vmkernel 1
#define VMKERNEL_ONLY(x) x
#else
#define vmkernel 0
#define VMKERNEL_ONLY(x)
#endif
#ifdef _WIN32
#define WIN32_ONLY(x) x
#define POSIX_ONLY(x)
#define vmx86_win32 1
#else
#define WIN32_ONLY(x)
#define POSIX_ONLY(x) x
#define vmx86_win32 0
#endif
#ifdef __linux__
#define vmx86_linux 1
#define LINUX_ONLY(x) x
#else
#define vmx86_linux 0
#define LINUX_ONLY(x)
#endif
#ifdef __APPLE__
#define vmx86_apple 1
#define APPLE_ONLY(x) x
#else
#define vmx86_apple 0
#define APPLE_ONLY(x)
#endif
#ifdef VMM
#define VMM_ONLY(x) x
#define USER_ONLY(x)
#else
#define VMM_ONLY(x)
#define USER_ONLY(x) x
#endif
/* VMVISOR ifdef only allowed in the vmkernel */
#ifdef VMKERNEL
#ifdef VMVISOR
#define vmvisor 1
#define VMVISOR_ONLY(x) x
#else
#define vmvisor 0
#define VMVISOR_ONLY(x)
#endif
#endif
#ifdef _WIN32
#define VMW_INVALID_HANDLE INVALID_HANDLE_VALUE
#else
#define VMW_INVALID_HANDLE (-1LL)
#endif
#ifdef _WIN32
#define fsync(fd) _commit(fd)
#define fileno(f) _fileno(f)
#else
#endif
/*
* Debug output macros for Windows drivers (the Eng variant is for
* display/printer drivers only.
*/
#ifdef _WIN32
#ifndef USES_OLD_WINDDK
#if defined(VMX86_LOG)
#ifdef _WIN64
#define WinDrvPrint(arg, ...) DbgPrintEx(DPFLTR_IHVDRIVER_ID, (ULONG)~0, arg, __VA_ARGS__)
#else
#define WinDrvPrint(arg, ...) DbgPrint(arg, __VA_ARGS__)
#endif
#define WinDrvEngPrint(arg, ...) EngDbgPrint(arg, __VA_ARGS__)
#else
#define WinDrvPrint(arg, ...)
#define WinDrvEngPrint(arg, ...)
#endif
#endif
#endif // _WIN32
#ifdef HOSTED_LG_PG
#define hosted_lg_pg 1
#else
#define hosted_lg_pg 0
#endif
/*
* Use to initialize cbSize for this structure to preserve < Vista
* compatibility.
*/
#define NONCLIENTMETRICSINFO_V1_SIZE CCSIZEOF_STRUCT(NONCLIENTMETRICS, \
lfMessageFont)
/* This is not intended to be thread-safe. */
#define DO_ONCE(code) \
do { \
static Bool _doOnceDone = FALSE; \
if (UNLIKELY(!_doOnceDone)) { \
_doOnceDone = TRUE; \
code; \
} \
} while (0)
/*
* Bug 827422 and 838523.
*/
#if defined __GNUC__ && __GNUC__ >= 4
#define VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
#else
#define VISIBILITY_HIDDEN /* nothing */
#endif
#endif // ifndef _VM_BASIC_DEFS_H_
vmmemctl-only/shared/compat_statfs.h 0000444 0000000 0000000 00000002306 12275350061 016645 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_STATFS_H__
# define __COMPAT_STATFS_H__
/* vfs.h simply include statfs.h, but it knows what directory statfs.h is in. */
#include <linux/vfs.h>
/* 2.5.74 renamed struct statfs to kstatfs. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 74)
#define compat_kstatfs kstatfs
#else
#define compat_kstatfs statfs
#endif
#endif /* __COMPAT_STATFS_H__ */
vmmemctl-only/shared/compat_semaphore.h 0000444 0000000 0000000 00000003142 12275350061 017323 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_SEMAPHORE_H__
# define __COMPAT_SEMAPHORE_H__
/* <= 2.6.25 have asm only, 2.6.26 has both, and 2.6.27-rc2+ has linux only. */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
# include <asm/semaphore.h>
#else
# include <linux/semaphore.h>
#endif
/*
* The init_MUTEX_LOCKED() API appeared in 2.2.18, and is also in
* 2.2.17-21mdk --hpreg
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)
#ifndef init_MUTEX_LOCKED
#define init_MUTEX_LOCKED(_sem) *(_sem) = MUTEX_LOCKED
#endif
#ifndef DECLARE_MUTEX
#define DECLARE_MUTEX(name) struct semaphore name = MUTEX
#endif
#ifndef DECLARE_MUTEX_LOCKED
#define DECLARE_MUTEX_LOCKED(name) struct semaphore name = MUTEX_LOCKED
#endif
#endif
#endif /* __COMPAT_SEMAPHORE_H__ */
vmmemctl-only/shared/vm_basic_asm_x86_64.h 0000444 0000000 0000000 00000035733 12275350062 017452 0 ustar root root /*********************************************************
* Copyright (C) 1998-2004 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vm_basic_asm_x86_64.h
*
* Basic x86_64 asm macros.
*/
#ifndef _VM_BASIC_ASM_X86_64_H_
#define _VM_BASIC_ASM_X86_64_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#include "includeCheck.h"
#ifndef VM_X86_64
#error "This file is x86-64 only!"
#endif
#ifdef _MSC_VER
#ifdef __cplusplus
extern "C" {
#endif
uint64 _umul128(uint64 multiplier, uint64 multiplicand,
uint64 *highProduct);
int64 _mul128(int64 multiplier, int64 multiplicand,
int64 *highProduct);
uint64 __shiftright128(uint64 lowPart, uint64 highPart, uint8 shift);
#ifdef __cplusplus
}
#endif
#pragma intrinsic(_umul128, _mul128, __shiftright128)
#endif // _MSC_VER
/*
* GET_CURRENT_RIP
*
* Return an approximation of the current instruction pointer. For example for a
* function call
* foo.c
* L123: Foo(GET_CURRENT_RIP())
*
* The return value from GET_CURRENT_RIP will point a debugger to L123.
*/
#if defined(__GNUC__)
#define GET_CURRENT_RIP() ({ \
void *__rip; \
asm("lea 0(%%rip), %0;\n\t" \
: "=r" (__rip)); \
__rip; \
})
#endif
/*
* FXSAVE/FXRSTOR
* save/restore SIMD/MMX fpu state
*
* The pointer passed in must be 16-byte aligned.
*
* Intel and AMD processors behave differently w.r.t. fxsave/fxrstor. Intel
* processors unconditionally save the exception pointer state (instruction
* ptr., data ptr., and error instruction opcode). FXSAVE_ES1 and FXRSTOR_ES1
* work correctly for Intel processors.
*
* AMD processors only save the exception pointer state if ES=1. This leads to a
* security hole whereby one process/VM can inspect the state of another process
* VM. The AMD recommended workaround involves clobbering the exception pointer
* state unconditionally, and this is implemented in FXRSTOR_AMD_ES0. Note that
* FXSAVE_ES1 will only save the exception pointer state for AMD processors if
* ES=1.
*
* The workaround (FXRSTOR_AMD_ES0) only costs 1 cycle more than just doing an
* fxrstor, on both AMD Opteron and Intel Core CPUs.
*/
#if defined(__GNUC__)
static INLINE void
FXSAVE_ES1(void *save)
{
__asm__ __volatile__ ("fxsaveq %0 \n" : "=m" (*(uint8 *)save) : : "memory");
}
static INLINE void
FXSAVE_COMPAT_ES1(void *save)
{
__asm__ __volatile__ ("fxsave %0 \n" : "=m" (*(uint8 *)save) : : "memory");
}
static INLINE void
FXRSTOR_ES1(const void *load)
{
__asm__ __volatile__ ("fxrstorq %0 \n"
: : "m" (*(const uint8 *)load) : "memory");
}
static INLINE void
FXRSTOR_COMPAT_ES1(const void *load)
{
__asm__ __volatile__ ("fxrstor %0 \n"
: : "m" (*(const uint8 *)load) : "memory");
}
static INLINE void
FXRSTOR_AMD_ES0(const void *load)
{
uint64 dummy = 0;
__asm__ __volatile__
("fnstsw %%ax \n" // Grab x87 ES bit
"bt $7,%%ax \n" // Test ES bit
"jnc 1f \n" // Jump if ES=0
"fnclex \n" // ES=1. Clear it so fild doesn't trap
"1: \n"
"ffree %%st(7) \n" // Clear tag bit - avoid poss. stack overflow
"fildl %0 \n" // Dummy Load from "safe address" changes all
// x87 exception pointers.
"fxrstorq %1 \n"
:
: "m" (dummy), "m" (*(const uint8 *)load)
: "ax", "memory");
}
#endif /* __GNUC__ */
/*
* XSAVE/XRSTOR
* save/restore GSSE/SIMD/MMX fpu state
*
* The pointer passed in must be 64-byte aligned.
* See above comment for more information.
*/
#if defined(__GNUC__) && (defined(VMM) || defined(VMKERNEL) || defined(FROBOS))
static INLINE void
XSAVE_ES1(void *save, uint64 mask)
{
#if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1
__asm__ __volatile__ (
".byte 0x48, 0x0f, 0xae, 0x21 \n"
:
: "c" ((uint8 *)save), "a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#else
__asm__ __volatile__ (
"xsaveq %0 \n"
: "=m" (*(uint8 *)save)
: "a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#endif
}
static INLINE void
XSAVE_COMPAT_ES1(void *save, uint64 mask)
{
#if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1
__asm__ __volatile__ (
".byte 0x0f, 0xae, 0x21 \n"
:
: "c" ((uint8 *)save), "a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#else
__asm__ __volatile__ (
"xsave %0 \n"
: "=m" (*(uint8 *)save)
: "a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#endif
}
static INLINE void
XSAVEOPT_ES1(void *save, uint64 mask)
{
__asm__ __volatile__ (
".byte 0x48, 0x0f, 0xae, 0x31 \n"
:
: "c" ((uint8 *)save), "a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
}
static INLINE void
XRSTOR_ES1(const void *load, uint64 mask)
{
#if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1
__asm__ __volatile__ (
".byte 0x48, 0x0f, 0xae, 0x29 \n"
:
: "c" ((const uint8 *)load),
"a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#else
__asm__ __volatile__ (
"xrstorq %0 \n"
:
: "m" (*(const uint8 *)load),
"a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#endif
}
static INLINE void
XRSTOR_COMPAT_ES1(const void *load, uint64 mask)
{
#if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1
__asm__ __volatile__ (
".byte 0x0f, 0xae, 0x29 \n"
:
: "c" ((const uint8 *)load),
"a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#else
__asm__ __volatile__ (
"xrstor %0 \n"
:
: "m" (*(const uint8 *)load),
"a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#endif
}
static INLINE void
XRSTOR_AMD_ES0(const void *load, uint64 mask)
{
uint64 dummy = 0;
__asm__ __volatile__
("fnstsw %%ax \n" // Grab x87 ES bit
"bt $7,%%ax \n" // Test ES bit
"jnc 1f \n" // Jump if ES=0
"fnclex \n" // ES=1. Clear it so fild doesn't trap
"1: \n"
"ffree %%st(7) \n" // Clear tag bit - avoid poss. stack overflow
"fildl %0 \n" // Dummy Load from "safe address" changes all
// x87 exception pointers.
"mov %%ebx, %%eax \n"
#if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1
".byte 0x48, 0x0f, 0xae, 0x29 \n"
:
: "m" (dummy), "c" ((const uint8 *)load),
"b" ((uint32)mask), "d" ((uint32)(mask >> 32))
#else
"xrstorq %1 \n"
:
: "m" (dummy), "m" (*(const uint8 *)load),
"b" ((uint32)mask), "d" ((uint32)(mask >> 32))
#endif
: "eax", "memory");
}
#endif /* __GNUC__ */
/*
* XTEST
* Return TRUE if processor is in transaction region.
*
*/
#if defined(__GNUC__) && (defined(VMM) || defined(VMKERNEL) || defined(FROBOS))
static INLINE Bool
xtest(void)
{
uint8 al;
__asm__ __volatile__(".byte 0x0f, 0x01, 0xd6 # xtest \n"
"setnz %%al\n"
: "=a"(al) : : "cc");
return al;
}
#endif /* __GNUC__ */
/*
*-----------------------------------------------------------------------------
*
* Mul64x6464 --
*
* Unsigned integer by fixed point multiplication:
* result = multiplicand * multiplier >> shift
*
* Unsigned 64-bit integer multiplicand.
* Unsigned 64-bit fixed point multiplier, represented as
* multiplier >> shift, where shift < 64.
* Unsigned 64-bit integer product.
*
* Implementation:
* Multiply 64x64 bits to yield a full 128-bit product.
* Shift result in RDX:RAX right by "shift".
* Return the low-order 64 bits of the above.
*
* Result:
* Product
*
*-----------------------------------------------------------------------------
*/
#if defined(__GNUC__)
static INLINE uint64
Mul64x6464(uint64 multiplicand,
uint64 multiplier,
uint32 shift)
{
uint64 result, dummy;
__asm__("mulq %3 \n\t"
"shrdq %b4, %1, %0 \n\t"
: "=a" (result),
"=d" (dummy)
: "0" (multiplier),
"rm" (multiplicand),
"c" (shift)
: "cc");
return result;
}
#elif defined(_MSC_VER)
static INLINE uint64
Mul64x6464(uint64 multiplicand, uint64 multiplier, uint32 shift)
{
uint64 tmplo, tmphi;
tmplo = _umul128(multiplicand, multiplier, &tmphi);
return __shiftright128(tmplo, tmphi, (uint8) shift);
}
#endif
/*
*-----------------------------------------------------------------------------
*
* Muls64x64s64 --
*
* Signed integer by fixed point multiplication:
* result = multiplicand * multiplier >> shift
*
* Signed 64-bit integer multiplicand.
* Unsigned 64-bit fixed point multiplier, represented as
* multiplier >> shift, where shift < 64.
* Signed 64-bit integer product.
*
* Implementation:
* Multiply 64x64 bits to yield a full 128-bit product.
* Shift result in RDX:RAX right by "shift".
* Return the low-order 64 bits of the above.
*
* Note: using an unsigned shift instruction is correct because
* shift < 64 and we return only the low 64 bits of the shifted
* result.
*
* Result:
* Product
*
*-----------------------------------------------------------------------------
*/
#if defined(__GNUC__)
static inline int64
Muls64x64s64(int64 multiplicand, int64 multiplier, uint32 shift)
{
int64 result, dummy;
__asm__("imulq %3 \n\t"
"shrdq %b4, %1, %0 \n\t"
: "=a" (result),
"=d" (dummy)
: "0" (multiplier),
"rm" (multiplicand),
"c" (shift)
: "cc");
return result;
}
#elif defined(_MSC_VER)
static INLINE int64
Muls64x64s64(int64 multiplicand, int64 multiplier, uint32 shift)
{
int64 tmplo, tmphi;
tmplo = _mul128(multiplicand, multiplier, &tmphi);
return __shiftright128(tmplo, tmphi, (uint8) shift);
}
#endif
/*
*-----------------------------------------------------------------------------
*
* Mul64x3264 --
*
* Unsigned integer by fixed point multiplication:
* result = multiplicand * multiplier >> shift
*
* Unsigned 64-bit integer multiplicand.
* Unsigned 32-bit fixed point multiplier, represented as
* multiplier >> shift, where shift < 64.
* Unsigned 64-bit integer product.
*
* Implementation:
* Multiply 64x64 bits to yield a full 128-bit product.
* Shift result in RDX:RAX right by "shift".
* Return the low-order 64 bits of the above.
*
* Result:
* Return the low-order 64 bits of ((multiplicand * multiplier) >> shift)
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
Mul64x3264(uint64 multiplicand, uint32 multiplier, uint32 shift)
{
return Mul64x6464(multiplicand, multiplier, shift);
}
/*
*-----------------------------------------------------------------------------
*
* Muls64x32s64 --
*
* Signed integer by fixed point multiplication:
* result = (multiplicand * multiplier) >> shift
*
* Signed 64-bit integer multiplicand.
* Unsigned 32-bit fixed point multiplier, represented as
* multiplier >> shift, where shift < 64.
* Signed 64-bit integer product.
*
* Implementation:
* Multiply 64x64 bits to yield a full 128-bit product.
* Shift result in RDX:RAX right by "shift".
* Return the low-order 64 bits of the above.
*
* Result:
* Return the low-order 64 bits of ((multiplicand * multiplier) >> shift)
*
*-----------------------------------------------------------------------------
*/
static INLINE int64
Muls64x32s64(int64 multiplicand, uint32 multiplier, uint32 shift)
{
return Muls64x64s64(multiplicand, multiplier, shift);
}
#if defined(__GNUC__)
static INLINE void *
uint64set(void *dst, uint64 val, uint64 count)
{
int dummy0;
int dummy1;
__asm__ __volatile__("\t"
"cld" "\n\t"
"rep ; stosq" "\n"
: "=c" (dummy0), "=D" (dummy1)
: "0" (count), "1" (dst), "a" (val)
: "memory", "cc");
return dst;
}
#endif
/*
*-----------------------------------------------------------------------------
*
* Div643232 --
*
* Unsigned integer division:
* The dividend is 64-bit wide
* The divisor is 32-bit wide
* The quotient is 32-bit wide
*
* Use this function if you are certain that the quotient will fit in 32 bits,
* If that is not the case, a #DE exception was generated in 32-bit version,
* but not in this 64-bit version. So please be careful.
*
* Results:
* Quotient and remainder
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
#if defined(__GNUC__) || defined(_MSC_VER)
static INLINE void
Div643232(uint64 dividend, // IN
uint32 divisor, // IN
uint32 *quotient, // OUT
uint32 *remainder) // OUT
{
*quotient = (uint32)(dividend / divisor);
*remainder = (uint32)(dividend % divisor);
}
#endif
/*
*-----------------------------------------------------------------------------
*
* Div643264 --
*
* Unsigned integer division:
* The dividend is 64-bit wide
* The divisor is 32-bit wide
* The quotient is 64-bit wide
*
* Results:
* Quotient and remainder
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
#if defined(__GNUC__)
static INLINE void
Div643264(uint64 dividend, // IN
uint32 divisor, // IN
uint64 *quotient, // OUT
uint32 *remainder) // OUT
{
*quotient = dividend / divisor;
*remainder = dividend % divisor;
}
#endif
#endif // _VM_BASIC_ASM_X86_64_H_
vmmemctl-only/shared/kernelStubsLinux.c 0000444 0000000 0000000 00000022635 12275350045 017323 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* kernelStubsLinux.c
*
* This file contains implementations of common userspace functions in terms
* that the Linux kernel can understand.
*/
/* Must come before any kernel header file */
#include "driver-config.h"
#include "kernelStubs.h"
#include "compat_kernel.h"
#include "compat_page.h"
#include "compat_sched.h"
#include <linux/slab.h>
#include "vm_assert.h"
/*
*-----------------------------------------------------------------------------
*
* Panic --
*
* Prints the debug message and stops the system.
*
* Results:
* None.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
void
Panic(const char *fmt, ...) // IN
{
va_list args;
char *result;
va_start(args, fmt);
result = Str_Vasprintf(NULL, fmt, args);
va_end(args);
if (result) {
printk(KERN_EMERG "%s", result);
}
BUG();
while (1); // Avoid compiler warning.
}
/*
*----------------------------------------------------------------------
*
* Str_Strcpy--
*
* Wrapper for strcpy that checks for buffer overruns.
*
* Results:
* Same as strcpy.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
char *
Str_Strcpy(char *buf, // OUT
const char *src, // IN
size_t maxSize) // IN
{
unsigned int *stack = (unsigned int *)&buf;
size_t len;
len = strlen(src);
if (len >= maxSize) {
Panic("%s:%d Buffer too small 0x%x\n", __FILE__,__LINE__,
stack[-1]);
}
return memcpy(buf, src, len + 1);
}
/*
*----------------------------------------------------------------------
*
* Str_Vsnprintf --
*
* Compatability wrapper b/w different libc versions
*
* Results:
* int - number of bytes written (not including NULL terminate character),
* -1 on overflow (insufficient space for NULL terminate is considered
* overflow)
*
* NB: on overflow the buffer WILL be null terminated
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
int
Str_Vsnprintf(char *str, // OUT
size_t size, // IN
const char *format, // IN
va_list arguments) // IN
{
int retval;
retval = vsnprintf(str, size, format, arguments);
/*
* Linux glibc 2.0.x returns -1 and null terminates (which we shouldn't
* be linking against), but glibc 2.1.x follows c99 and returns
* characters that would have been written.
*/
if (retval >= size) {
return -1;
}
return retval;
}
/*
*-----------------------------------------------------------------------------
*
* Str_Vasprintf --
*
* Allocate and format a string, using the GNU libc way to specify the
* format (i.e. optionally allow the use of positional parameters)
*
* Results:
* The allocated string on success (if 'length' is not NULL, *length
* is set to the length of the allocated string)
* NULL on failure
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
char *
Str_Vasprintf(size_t *length, // OUT
const char *format, // IN
va_list arguments) // IN
{
/*
* Simple implementation of Str_Vasprintf when userlevel libraries are not
* available (e.g. for use in drivers). We just fallback to vsnprintf,
* doubling if we didn't have enough space.
*/
unsigned int bufSize;
char *buf;
int retval;
bufSize = strlen(format);
buf = NULL;
do {
/*
* Initial allocation of strlen(format) * 2. Should this be tunable?
* XXX Yes, this could overflow and spin forever when you get near 2GB
* allocations. I don't care. --rrdharan
*/
va_list args2;
bufSize *= 2;
buf = realloc(buf, bufSize);
if (!buf) {
return NULL;
}
va_copy(args2, arguments);
retval = Str_Vsnprintf(buf, bufSize, format, args2);
va_end(args2);
} while (retval == -1);
if (length) {
*length = retval;
}
/*
* Try to trim the buffer here to save memory?
*/
return buf;
}
/*
*-----------------------------------------------------------------------------
*
* Str_Asprintf --
*
* Same as Str_Vasprintf(), but parameters are passed inline --hpreg
*
* Results:
* Same as Str_Vasprintf()
*
* Side effects:
* Same as Str_Vasprintf()
*
*-----------------------------------------------------------------------------
*/
char *
Str_Asprintf(size_t *length, // OUT
const char *format, // IN
...) // IN
{
va_list arguments;
char *result;
va_start(arguments, format);
result = Str_Vasprintf(length, format, arguments);
va_end(arguments);
return result;
}
/*
*-----------------------------------------------------------------------------
*
* strdup --
*
* Duplicates a string.
*
* Results:
* A pointer to memory containing the duplicated string or NULL if no
* memory was available.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
char *
strdup(const char *source) // IN
{
char *target = NULL;
if (source) {
/*
* We call our special implementation of malloc() because the users of
* strdup() will call free(), and that'll decrement the pointer before
* freeing it. Thus, we need to make sure that the allocated block
* also stores the block length before the block itself (see malloc()
* below).
*/
unsigned int len = strlen(source);
target = malloc(len + 1);
if (target) {
memcpy(target, source, len + 1);
}
}
return target;
}
/*
*----------------------------------------------------------------------------
*
* malloc --
*
* Allocate memory using kmalloc. There is no realloc
* equivalent, so we roll our own by padding each allocation with
* 4 (or 8 for 64 bit guests) extra bytes to store the block length.
*
* Results:
* Pointer to driver heap memory, offset by 4 (or 8)
* bytes from the real block pointer.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------------
*/
void *
malloc(size_t size) // IN
{
size_t *ptr;
ptr = kmalloc(size + sizeof size, GFP_KERNEL);
if (ptr) {
*ptr++ = size;
}
return ptr;
}
/*
*---------------------------------------------------------------------------
*
* free --
*
* Free memory allocated by a previous call to malloc, calloc or realloc.
*
* Results:
* None.
*
* Side effects:
* Calls kfree to free the real (base) pointer.
*
*---------------------------------------------------------------------------
*/
void
free(void *mem) // IN
{
if (mem) {
size_t *dataPtr = (size_t *)mem;
kfree(--dataPtr);
}
}
/*
*----------------------------------------------------------------------------
*
* calloc --
*
* Malloc and zero.
*
* Results:
* Pointer to driver heap memory (see malloc, above).
*
* Side effects:
* None.
*
*----------------------------------------------------------------------------
*/
void *
calloc(size_t num, // IN
size_t len) // IN
{
size_t size;
void *ptr;
size = num * len;
ptr = malloc(size);
if (ptr) {
memset(ptr, 0, size);
}
return ptr;
}
/*
*----------------------------------------------------------------------------
*
* realloc --
*
* Since the driver heap has no realloc equivalent, we have to roll our
* own. Fortunately, we can retrieve the block size of every block we
* hand out since we stashed it at allocation time (see malloc above).
*
* Results:
* Pointer to memory block valid for 'newSize' bytes, or NULL if
* allocation failed.
*
* Side effects:
* Could copy memory around.
*
*----------------------------------------------------------------------------
*/
void *
realloc(void* ptr, // IN
size_t newSize) // IN
{
void *newPtr;
size_t *dataPtr;
size_t length, lenUsed;
dataPtr = (size_t *)ptr;
length = ptr ? dataPtr[-1] : 0;
if (newSize == 0) {
if (ptr) {
free(ptr);
newPtr = NULL;
} else {
newPtr = malloc(newSize);
}
} else if (newSize == length) {
newPtr = ptr;
} else if ((newPtr = malloc(newSize))) {
if (length < newSize) {
lenUsed = length;
} else {
lenUsed = newSize;
}
memcpy(newPtr, ptr, lenUsed);
free(ptr);
}
return newPtr;
}
vmmemctl-only/shared/vmware_pack_end.h 0000444 0000000 0000000 00000002470 12275350062 017126 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmware_pack_end.h --
*
* End of structure packing. See vmware_pack_init.h for details.
*
* Note that we do not use the following construct in this include file,
* because we want to emit the code every time the file is included --hpreg
*
* #ifndef foo
* # define foo
* ...
* #endif
*
*/
#include "vmware_pack_init.h"
#ifdef _MSC_VER
# pragma pack(pop)
#elif __GNUC__
__attribute__((__packed__))
#else
# error Compiler packing...
#endif
vmmemctl-only/shared/vm_assert.h 0000444 0000000 0000000 00000025722 12275350062 016011 0 ustar root root /*********************************************************
* Copyright (C) 1998-2004 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vm_assert.h --
*
* The basic assertion facility for all VMware code.
*
* For proper use, see
* http://vmweb.vmware.com/~mts/WebSite/guide/programming/asserts.html
*/
#ifndef _VM_ASSERT_H_
#define _VM_ASSERT_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_VMKDRIVERS
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMIROM
#include "includeCheck.h"
// XXX not necessary except some places include vm_assert.h improperly
#include "vm_basic_types.h"
#include "vm_basic_defs.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Some bits of vmcore are used in VMKernel code and cannot have
* the VMKERNEL define due to other header dependencies.
*/
#if defined(VMKERNEL) && !defined(VMKPANIC)
#define VMKPANIC 1
#endif
/*
* Internal macros, functions, and strings
*
* The monitor wants to save space at call sites, so it has specialized
* functions for each situation. User level wants to save on implementation
* so it uses generic functions.
*/
#if !defined VMM || defined MONITOR_APP // {
#if defined VMKPANIC
// vmkernel Panic() function does not want a trailing newline.
#define _ASSERT_PANIC(name) \
Panic(_##name##Fmt, __FILE__, __LINE__)
#define _ASSERT_PANIC_BUG(bug, name) \
Panic(_##name##Fmt " bugNr=%d", __FILE__, __LINE__, bug)
#define _ASSERT_PANIC_NORETURN(name) \
Panic_NoReturn(_##name##Fmt, __FILE__, __LINE__)
#else /* !VMKPANIC */
#define _ASSERT_PANIC(name) \
Panic(_##name##Fmt "\n", __FILE__, __LINE__)
#define _ASSERT_PANIC_BUG(bug, name) \
Panic(_##name##Fmt " bugNr=%d\n", __FILE__, __LINE__, bug)
#endif /* VMKPANIC */
#ifdef VMX86_DEVEL
# define _ASSERT_WARNING(name) \
Warning(_##name##Fmt "\n", __FILE__, __LINE__)
#else
# define _ASSERT_WARNING(name) \
Log(_##name##Fmt "\n", __FILE__, __LINE__)
#endif
#endif // }
// these don't have newline so a bug can be tacked on
#define _AssertPanicFmt "PANIC %s:%d"
#define _AssertAssertFmt "ASSERT %s:%d"
#define _AssertNotImplementedFmt "NOT_IMPLEMENTED %s:%d"
#define _AssertNotReachedFmt "NOT_REACHED %s:%d"
#define _AssertMemAllocFmt "MEM_ALLOC %s:%d"
#define _AssertNotTestedFmt "NOT_TESTED %s:%d"
/*
* Panic and log functions
*/
void Log(const char *fmt, ...) PRINTF_DECL(1, 2);
void Warning(const char *fmt, ...) PRINTF_DECL(1, 2);
#if defined VMKPANIC
void Panic_SaveRegs(void);
#ifdef VMX86_DEBUG
void Panic_NoSave(const char *fmt, ...) PRINTF_DECL(1, 2);
#else
NORETURN void Panic_NoSave(const char *fmt, ...) PRINTF_DECL(1, 2);
#endif
NORETURN void Panic_NoSaveNoReturn(const char *fmt, ...)
PRINTF_DECL(1, 2);
#define Panic(fmt...) do { \
Panic_SaveRegs(); \
Panic_NoSave(fmt); \
} while(0)
#define Panic_NoReturn(fmt...) do { \
Panic_SaveRegs(); \
Panic_NoSaveNoReturn(fmt); \
} while(0)
#else
NORETURN void Panic(const char *fmt, ...) PRINTF_DECL(1, 2);
#endif
void LogThrottled(uint32 *count, const char *fmt, ...)
PRINTF_DECL(2, 3);
void WarningThrottled(uint32 *count, const char *fmt, ...)
PRINTF_DECL(2, 3);
/*
* Stress testing: redefine ASSERT_IFNOT() to taste
*/
#ifndef ASSERT_IFNOT
/*
* PR 271512: When compiling with gcc, catch assignments inside an ASSERT.
*
* 'UNLIKELY' is defined with __builtin_expect, which does not warn when
* passed an assignment (gcc bug 36050). To get around this, we put 'cond'
* in an 'if' statement and make sure it never gets executed by putting
* that inside of 'if (0)'. We use gcc's statement expression syntax to
* make ASSERT an expression because some code uses it that way.
*
* Since statement expression syntax is a gcc extension and since it's
* not clear if this is a problem with other compilers, the ASSERT
* definition was not changed for them. Using a bare 'cond' with the
* ternary operator may provide a solution.
*/
#ifdef __GNUC__
#define ASSERT_IFNOT(cond, panic) \
({if (UNLIKELY(!(cond))) { panic; if (0) { if (cond) { ; } } } (void)0;})
#else
#define ASSERT_IFNOT(cond, panic) \
(UNLIKELY(!(cond)) ? (panic) : (void)0)
#endif
#endif
/*
* Assert, panic, and log macros
*
* Some of these are redefined below undef !VMX86_DEBUG.
* ASSERT() is special cased because of interaction with Windows DDK.
*/
#if defined VMX86_DEBUG || defined ASSERT_ALWAYS_AVAILABLE
#undef ASSERT
#define ASSERT(cond) \
ASSERT_IFNOT(cond, _ASSERT_PANIC(AssertAssert))
#endif
#define ASSERT_BUG(bug, cond) \
ASSERT_IFNOT(cond, _ASSERT_PANIC_BUG(bug, AssertAssert))
#define ASSERT_BUG_DEBUGONLY(bug, cond) ASSERT_BUG(bug, cond)
#define PANIC() _ASSERT_PANIC(AssertPanic)
#define PANIC_BUG(bug) _ASSERT_PANIC_BUG(bug, AssertPanic)
#ifdef VMKPANIC
#define ASSERT_OR_IN_PANIC(cond) ASSERT((cond) || Panic_IsSystemInPanic())
#endif
#define ASSERT_NOT_IMPLEMENTED(cond) \
ASSERT_IFNOT(cond, NOT_IMPLEMENTED())
#define ASSERT_NOT_IMPLEMENTED_BUG(bug, cond) \
ASSERT_IFNOT(cond, NOT_IMPLEMENTED_BUG(bug))
#if defined VMKPANIC && defined VMX86_DEBUG
#define NOT_IMPLEMENTED() _ASSERT_PANIC_NORETURN(AssertNotImplemented)
#else
#define NOT_IMPLEMENTED() _ASSERT_PANIC(AssertNotImplemented)
#endif
#define NOT_IMPLEMENTED_BUG(bug) _ASSERT_PANIC_BUG(bug, AssertNotImplemented)
#if defined VMKPANIC && defined VMX86_DEBUG
#define NOT_REACHED() _ASSERT_PANIC_NORETURN(AssertNotReached)
#else
#define NOT_REACHED() _ASSERT_PANIC(AssertNotReached)
#endif
#define NOT_REACHED_BUG(bug) _ASSERT_PANIC_BUG(bug, AssertNotReached)
#define ASSERT_MEM_ALLOC(cond) \
ASSERT_IFNOT(cond, _ASSERT_PANIC(AssertMemAlloc))
#ifdef VMX86_DEVEL
#define ASSERT_DEVEL(cond) ASSERT(cond)
#else
#define ASSERT_DEVEL(cond) ((void) 0)
#endif
#define ASSERT_NO_INTERRUPTS() ASSERT(!INTERRUPTS_ENABLED())
#define ASSERT_HAS_INTERRUPTS() ASSERT(INTERRUPTS_ENABLED())
#define NOT_TESTED() _ASSERT_WARNING(AssertNotTested)
#define ASSERT_NOT_TESTED(cond) (UNLIKELY(!(cond)) ? NOT_TESTED() : (void)0)
#define NOT_TESTED_ONCE() DO_ONCE(NOT_TESTED())
#define NOT_TESTED_1024() \
do { \
static uint16 count = 0; \
if (UNLIKELY(count == 0)) { NOT_TESTED(); } \
count = (count + 1) & 1023; \
} while (0)
#define LOG_ONCE(_s) DO_ONCE(Log _s)
#ifdef VMX86_DEVEL
#define DEPRECATED(_fix) DO_ONCE( \
Warning("%s:%d: %s is DEPRECATED; %s\n", \
__FILE__, __LINE__, __FUNCTION__, \
_fix))
#else
#define DEPRECATED(_fix) do {} while (0)
#endif
/*
* Redefine macros that are only in debug versions
*/
#if !defined VMX86_DEBUG && !defined ASSERT_ALWAYS_AVAILABLE // {
#undef ASSERT
#define ASSERT(cond) ((void) 0)
#undef ASSERT_BUG_DEBUGONLY
#define ASSERT_BUG_DEBUGONLY(bug, cond) ((void) 0)
#undef ASSERT_LENGTH
#define ASSERT_LENGTH(real, expected) ((void) 0)
/*
* Expand NOT_REACHED() as appropriate for each situation.
*
* Mainly, we want the compiler to infer the same control-flow
* information as it would from Panic(). Otherwise, different
* compilation options will lead to different control-flow-derived
* errors, causing some make targets to fail while others succeed.
*
* VC++ has the __assume() built-in function which we don't trust
* (see bug 43485); gcc has no such construct; we just panic in
* userlevel code. The monitor doesn't want to pay the size penalty
* (measured at 212 bytes for the release vmm for a minimal infinite
* loop; panic would cost even more) so it does without and lives
* with the inconsistency.
*/
#ifdef VMM
#undef NOT_REACHED
#define NOT_REACHED() ((void) 0)
#else
// keep debug definition
#endif
#undef ASSERT_LOG_UNEXPECTED
#define ASSERT_LOG_UNEXPECTED(bug, cond) ((void) 0)
#undef LOG_UNEXPECTED
#define LOG_UNEXPECTED(bug) ((void) 0)
#undef ASSERT_NOT_TESTED
#define ASSERT_NOT_TESTED(cond) ((void) 0)
#undef NOT_TESTED
#define NOT_TESTED() ((void) 0)
#undef NOT_TESTED_ONCE
#define NOT_TESTED_ONCE() ((void) 0)
#undef NOT_TESTED_1024
#define NOT_TESTED_1024() ((void) 0)
#endif // !VMX86_DEBUG }
/*
* Compile-time assertions.
*
* ASSERT_ON_COMPILE does not use the common
* switch (0) { case 0: case (e): ; } trick because some compilers (e.g. MSVC)
* generate code for it.
*
* The implementation uses both enum and typedef because the typedef alone is
* insufficient; gcc allows arrays to be declared with non-constant expressions
* (even in typedefs, where it makes no sense).
*
* NOTE: if GCC ever changes so that it ignores unused types altogether, this
* assert might not fire! We explicitly mark it as unused because GCC 4.8+
* uses -Wunused-local-typedefs as part of -Wall, which means the typedef will
* generate a warning.
*/
#define ASSERT_ON_COMPILE(e) \
do { \
enum { AssertOnCompileMisused = ((e) ? 1 : -1) }; \
UNUSED_TYPE(typedef char AssertOnCompileFailed[AssertOnCompileMisused]); \
} while (0)
/*
* To put an ASSERT_ON_COMPILE() outside a function, wrap it
* in MY_ASSERTS(). The first parameter must be unique in
* each .c file where it appears. For example,
*
* MY_ASSERTS(FS3_INT,
* ASSERT_ON_COMPILE(sizeof(FS3_DiskLock) == 128);
* ASSERT_ON_COMPILE(sizeof(FS3_DiskLockReserved) == DISK_BLOCK_SIZE);
* ASSERT_ON_COMPILE(sizeof(FS3_DiskBlock) == DISK_BLOCK_SIZE);
* ASSERT_ON_COMPILE(sizeof(Hardware_DMIUUID) == 16);
* )
*
* Caution: ASSERT() within MY_ASSERTS() is silently ignored.
* The same goes for anything else not evaluated at compile time.
*/
#define MY_ASSERTS(name, assertions) \
static INLINE void name(void) { \
assertions \
}
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* ifndef _VM_ASSERT_H_ */
vmmemctl-only/shared/backdoor_types.h 0000444 0000000 0000000 00000006727 12275350062 017022 0 ustar root root /*********************************************************
* Copyright (C) 1999 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* backdoor_types.h --
*
* Type definitions for backdoor interaction code.
*/
#ifndef _BACKDOOR_TYPES_H_
#define _BACKDOOR_TYPES_H_
#ifndef VM_I386
#error The backdoor protocol is only supported on x86 architectures.
#endif
/*
* These #defines are intended for defining register structs as part of
* existing named unions. If the union should encapsulate the register
* (and nothing else), use DECLARE_REG_NAMED_STRUCT defined below.
*/
#define DECLARE_REG32_STRUCT \
struct { \
uint16 low; \
uint16 high; \
} halfs; \
uint32 word
#define DECLARE_REG64_STRUCT \
DECLARE_REG32_STRUCT; \
struct { \
uint32 low; \
uint32 high; \
} words; \
uint64 quad
#ifndef VM_X86_64
#define DECLARE_REG_STRUCT DECLARE_REG32_STRUCT
#else
#define DECLARE_REG_STRUCT DECLARE_REG64_STRUCT
#endif
#define DECLARE_REG_NAMED_STRUCT(_r) \
union { DECLARE_REG_STRUCT; } _r
/*
* Some of the registers are expressed by semantic name, because if they were
* expressed as register structs declared above, we could only address them
* by fixed size (half-word, word, quad, etc.) instead of by varying size
* (size_t, uintptr_t).
*
* To be cleaner, these registers are expressed ONLY by semantic name,
* rather than by a union of the semantic name and a register struct.
*/
typedef union {
struct {
DECLARE_REG_NAMED_STRUCT(ax);
size_t size; /* Register bx. */
DECLARE_REG_NAMED_STRUCT(cx);
DECLARE_REG_NAMED_STRUCT(dx);
DECLARE_REG_NAMED_STRUCT(si);
DECLARE_REG_NAMED_STRUCT(di);
} in;
struct {
DECLARE_REG_NAMED_STRUCT(ax);
DECLARE_REG_NAMED_STRUCT(bx);
DECLARE_REG_NAMED_STRUCT(cx);
DECLARE_REG_NAMED_STRUCT(dx);
DECLARE_REG_NAMED_STRUCT(si);
DECLARE_REG_NAMED_STRUCT(di);
} out;
} Backdoor_proto;
typedef union {
struct {
DECLARE_REG_NAMED_STRUCT(ax);
DECLARE_REG_NAMED_STRUCT(bx);
size_t size; /* Register cx. */
DECLARE_REG_NAMED_STRUCT(dx);
uintptr_t srcAddr; /* Register si. */
uintptr_t dstAddr; /* Register di. */
DECLARE_REG_NAMED_STRUCT(bp);
} in;
struct {
DECLARE_REG_NAMED_STRUCT(ax);
DECLARE_REG_NAMED_STRUCT(bx);
DECLARE_REG_NAMED_STRUCT(cx);
DECLARE_REG_NAMED_STRUCT(dx);
DECLARE_REG_NAMED_STRUCT(si);
DECLARE_REG_NAMED_STRUCT(di);
DECLARE_REG_NAMED_STRUCT(bp);
} out;
} Backdoor_proto_hb;
MY_ASSERTS(BACKDOOR_STRUCT_SIZES,
ASSERT_ON_COMPILE(sizeof(Backdoor_proto) == 6 * sizeof(uintptr_t));
ASSERT_ON_COMPILE(sizeof(Backdoor_proto_hb) == 7 * sizeof(uintptr_t));
)
#undef DECLARE_REG_STRUCT
#endif /* _BACKDOOR_TYPES_H_ */
vmmemctl-only/shared/compat_mm.h 0000444 0000000 0000000 00000006630 12275350061 015756 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_MM_H__
# define __COMPAT_MM_H__
#include <linux/mm.h>
/* The get_page() API appeared in 2.3.7 --hpreg */
/* Sometime during development it became function instead of macro --petr */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) && !defined(get_page)
# define get_page(_page) atomic_inc(&(_page)->count)
/* The __free_page() API is exported in 2.1.67 --hpreg */
# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 67)
# define put_page __free_page
# else
# include "compat_page.h"
# define page_to_phys(_page) (page_to_pfn(_page) << PAGE_SHIFT)
# define put_page(_page) free_page(page_to_phys(_page))
# endif
#endif
/* page_count() is 2.4.0 invention. Unfortunately unavailable in some RedHat
* kernels (for example 2.4.21-4-RHEL3). */
/* It is function since 2.6.0, and hopefully RedHat will not play silly games
* with mm_inline.h again... */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(page_count)
# define page_count(page) atomic_read(&(page)->count)
#endif
/* 2.2.x uses 0 instead of some define */
#ifndef NOPAGE_SIGBUS
#define NOPAGE_SIGBUS (0)
#endif
/* 2.2.x does not have HIGHMEM support */
#ifndef GFP_HIGHUSER
#define GFP_HIGHUSER (GFP_USER)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
#include "compat_page.h"
static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int order)
{
unsigned long addr;
addr = __get_free_pages(gfp_mask, order);
if (!addr) {
return NULL;
}
return virt_to_page(addr);
}
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#endif
/*
* In 2.4.14, the logic behind the UnlockPage macro was moved to the
* unlock_page() function. Later (in 2.5.12), the UnlockPage macro was removed
* altogether, and nowadays everyone uses unlock_page().
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 14)
#define compat_unlock_page(page) UnlockPage(page)
#else
#define compat_unlock_page(page) unlock_page(page)
#endif
/*
* In 2.4.10, vmtruncate was changed from returning void to returning int.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10)
#define compat_vmtruncate(inode, size) \
({ \
int result = 0; \
vmtruncate(inode, size); \
result; \
})
#else
#define compat_vmtruncate(inode, size) vmtruncate(inode, size)
#endif
#endif /* __COMPAT_MM_H__ */
vmmemctl-only/shared/compat_pci.h 0000444 0000000 0000000 00000005152 12275350061 016116 0 ustar root root /*********************************************************
* Copyright (C) 1999 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* compat_pci.h: PCI compatibility wrappers.
*/
#ifndef __COMPAT_PCI_H__
#define __COMPAT_PCI_H__
#include "compat_ioport.h"
#include <linux/pci.h>
#ifndef DMA_BIT_MASK
# define DMA_BIT_MASK(n) DMA_##n##BIT_MASK
#endif
/*
* Power Management related compat wrappers.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10)
# define compat_pci_save_state(pdev) pci_save_state((pdev), NULL)
# define compat_pci_restore_state(pdev) pci_restore_state((pdev), NULL)
#else
# define compat_pci_save_state(pdev) pci_save_state((pdev))
# define compat_pci_restore_state(pdev) pci_restore_state((pdev))
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
# define pm_message_t u32
# define compat_pci_choose_state(pdev, state) (state)
# define PCI_D0 0
# define PCI_D3hot 3
#else
# define compat_pci_choose_state(pdev, state) pci_choose_state((pdev), (state))
#endif
/* 2.6.14 changed the PCI shutdown callback */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
# define COMPAT_PCI_SHUTDOWN(func) .driver = { .shutdown = (func), }
# define COMPAT_PCI_DECLARE_SHUTDOWN(func, var) (func)(struct device *(var))
# define COMPAT_PCI_TO_DEV(dev) (to_pci_dev(dev))
#else
# define COMPAT_PCI_SHUTDOWN(func) .shutdown = (func)
# define COMPAT_PCI_DECLARE_SHUTDOWN(func, var) (func)(struct pci_dev *(var))
# define COMPAT_PCI_TO_DEV(dev) (dev)
#endif
/* 2.6.26 introduced the device_set_wakeup_enable() function */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
# define compat_device_set_wakeup_enable(dev, val) do {} while(0)
#else
# define compat_device_set_wakeup_enable(dev, val) \
device_set_wakeup_enable(dev, val)
#endif
#endif /* __COMPAT_PCI_H__ */
vmmemctl-only/shared/vmci_kernel_if.h 0000444 0000000 0000000 00000042654 12275350062 016765 0 ustar root root /*********************************************************
* Copyright (C) 2006-2011 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmci_kernel_if.h --
*
* This file defines helper functions for VMCI host _and_ guest
* kernel code. It must work for Windows, Mac OS, vmkernel, Linux and
* Solaris kernels, i.e. using defines where necessary.
*/
#ifndef _VMCI_KERNEL_IF_H_
#define _VMCI_KERNEL_IF_H_
#if !defined(linux) && !defined(_WIN32) && !defined(__APPLE__) && \
!defined(VMKERNEL) && !defined(SOLARIS)
# error "Platform not supported."
#endif
#if defined(_WIN32)
# include <ntddk.h>
#endif
#if defined(linux) && !defined(VMKERNEL)
# include "driver-config.h"
# include "compat_cred.h"
# include "compat_module.h"
# include "compat_semaphore.h"
# include "compat_spinlock.h"
# include "compat_version.h"
# include <linux/wait.h>
#endif // linux
#ifdef __APPLE__
# include <IOKit/IOLib.h>
# include <mach/task.h>
# include <mach/semaphore.h>
# include <sys/kauth.h>
#endif
#ifdef VMKERNEL
# include "splock.h"
# include "semaphore_ext.h"
# include "vmkapi.h"
# include "world_dist.h"
#endif
#ifdef SOLARIS
# include <sys/ddi.h>
# include <sys/kmem.h>
# include <sys/mutex.h>
# include <sys/poll.h>
# include <sys/semaphore.h>
# include <sys/sunddi.h>
# include <sys/types.h>
#endif
#include "vm_basic_types.h"
#include "vmci_defs.h"
#if defined(VMKERNEL)
# include "list.h"
#else
# include "dbllnklst.h"
#endif
/* Flags for specifying memory type. */
#define VMCI_MEMORY_NORMAL 0x0
#define VMCI_MEMORY_ATOMIC 0x1
#define VMCI_MEMORY_NONPAGED 0x2
/* Platform specific type definitions. */
#if defined(VMKERNEL)
# define VMCI_EXPORT_SYMBOL(_SYMBOL) VMK_MODULE_EXPORT_SYMBOL(_SYMBOL);
#elif defined(linux)
# define VMCI_EXPORT_SYMBOL(_symbol) EXPORT_SYMBOL(_symbol);
#elif defined(__APPLE__)
# define VMCI_EXPORT_SYMBOL(_symbol) __attribute__((visibility("default")))
#else
# define VMCI_EXPORT_SYMBOL(_symbol)
#endif
#if defined(VMKERNEL)
typedef SP_SpinLock VMCILock;
typedef SP_IRQL VMCILockFlags;
typedef Semaphore VMCIEvent;
typedef Semaphore VMCIMutex;
typedef World_ID VMCIHostVmID;
typedef uint32 VMCIHostUser;
typedef PPN *VMCIQPGuestMem;
#elif defined(linux)
typedef spinlock_t VMCILock;
typedef unsigned long VMCILockFlags;
typedef wait_queue_head_t VMCIEvent;
typedef struct semaphore VMCIMutex;
typedef PPN *VMCIPpnList; /* List of PPNs in produce/consume queue. */
typedef uid_t VMCIHostUser;
typedef VA64 VMCIQPGuestMem;
#elif defined(__APPLE__)
typedef IOLock *VMCILock;
typedef unsigned long VMCILockFlags;
typedef struct {
IOLock *lock;
DblLnkLst_Links waiters;
int buffered;
} VMCIEvent;
typedef IOLock *VMCIMutex;
typedef void *VMCIPpnList; /* Actually a pointer to the C++ Object IOMemoryDescriptor */
typedef uid_t VMCIHostUser;
typedef VA64 *VMCIQPGuestMem;
#elif defined(_WIN32)
typedef KSPIN_LOCK VMCILock;
typedef KIRQL VMCILockFlags;
typedef KEVENT VMCIEvent;
typedef FAST_MUTEX VMCIMutex;
typedef PMDL VMCIPpnList; /* MDL to map the produce/consume queue. */
typedef PSID VMCIHostUser;
typedef VA64 *VMCIQPGuestMem;
#elif defined(SOLARIS)
typedef kmutex_t VMCILock;
typedef unsigned long VMCILockFlags;
typedef ksema_t VMCIEvent;
typedef kmutex_t VMCIMutex;
typedef PPN *VMCIPpnList; /* List of PPNs in produce/consume queue. */
typedef uid_t VMCIHostUser;
typedef VA64 VMCIQPGuestMem;
#endif // VMKERNEL
/* Callback needed for correctly waiting on events. */
typedef int (*VMCIEventReleaseCB)(void *clientData);
/*
* Internal locking dependencies within VMCI:
* * CONTEXTFIRE < CONTEXT, CONTEXTLIST, EVENT, HASHTABLE
* * DOORBELL < HASHTABLE
* * QPHIBERNATE < EVENT
*/
#ifdef VMKERNEL
typedef Lock_Rank VMCILockRank;
typedef SemaRank VMCISemaRank;
#define VMCI_SEMA_RANK_QPHEADER (SEMA_RANK_FS - 1)
#define VMCI_LOCK_RANK_MAX (MIN(SP_RANK_WAIT, \
SP_RANK_HEAPLOCK_DYNAMIC) - 1)
#else
typedef unsigned long VMCILockRank;
typedef unsigned long VMCISemaRank;
#define VMCI_LOCK_RANK_MAX 0x0fff
#define VMCI_SEMA_RANK_QPHEADER 0x0fff
#endif // VMKERNEL
#define VMCI_LOCK_RANK_CONTEXT VMCI_LOCK_RANK_MAX
#define VMCI_LOCK_RANK_CONTEXTLIST VMCI_LOCK_RANK_MAX
#define VMCI_LOCK_RANK_DATAGRAMVMK VMCI_LOCK_RANK_MAX
#define VMCI_LOCK_RANK_EVENT VMCI_LOCK_RANK_MAX
#define VMCI_LOCK_RANK_HASHTABLE VMCI_LOCK_RANK_MAX
#define VMCI_LOCK_RANK_RESOURCE VMCI_LOCK_RANK_MAX
#define VMCI_LOCK_RANK_QPHEADER VMCI_LOCK_RANK_MAX
#define VMCI_LOCK_RANK_DOORBELL (VMCI_LOCK_RANK_HASHTABLE - 1)
#define VMCI_LOCK_RANK_CONTEXTFIRE (MIN(VMCI_LOCK_RANK_CONTEXT, \
MIN(VMCI_LOCK_RANK_CONTEXTLIST, \
MIN(VMCI_LOCK_RANK_EVENT, \
VMCI_LOCK_RANK_HASHTABLE))) - 1)
#define VMCI_LOCK_RANK_QPHIBERNATE (VMCI_LOCK_RANK_EVENT - 1)
#define VMCI_LOCK_RANK_PACKET_QP (VMCI_LOCK_RANK_QPHEADER - 1)
//#define VMCI_LOCK_RANK_PACKET_QP 0xffd /* For vVol */
#define VMCI_SEMA_RANK_QUEUEPAIRLIST (VMCI_SEMA_RANK_QPHEADER - 1)
#define VMCI_SEMA_RANK_GUESTMEM (VMCI_SEMA_RANK_QUEUEPAIRLIST - 1)
/*
* Host specific struct used for signalling.
*/
typedef struct VMCIHost {
#if defined(VMKERNEL)
World_ID vmmWorldID[2]; /*
* First one is the active one and the second
* one is shadow world during FSR.
*/
#elif defined(linux)
wait_queue_head_t waitQueue;
#elif defined(__APPLE__)
struct Socket *socket; /* vmci Socket object on Mac OS. */
#elif defined(_WIN32)
KEVENT *callEvent; /* Ptr to userlevel event used when signalling
* new pending guestcalls in kernel.
*/
#elif defined(SOLARIS)
struct pollhead pollhead; /* Per datagram handle pollhead structure to
* be treated as a black-box. None of its
* fields should be referenced.
*/
#endif
} VMCIHost;
/*
* Guest device port I/O.
*/
#if defined(linux)
typedef unsigned short int VMCIIoPort;
typedef int VMCIIoHandle;
#elif defined(_WIN32)
typedef PUCHAR VMCIIoPort;
typedef int VMCIIoHandle;
#elif defined(SOLARIS)
typedef uint8_t * VMCIIoPort;
typedef ddi_acc_handle_t VMCIIoHandle;
#elif defined(__APPLE__)
typedef unsigned short int VMCIIoPort;
typedef void *VMCIIoHandle;
#endif // __APPLE__
void VMCI_ReadPortBytes(VMCIIoHandle handle, VMCIIoPort port, uint8 *buffer,
size_t bufferLength);
int VMCI_InitLock(VMCILock *lock, char *name, VMCILockRank rank);
void VMCI_CleanupLock(VMCILock *lock);
void VMCI_GrabLock(VMCILock *lock, VMCILockFlags *flags);
void VMCI_ReleaseLock(VMCILock *lock, VMCILockFlags flags);
void VMCI_GrabLock_BH(VMCILock *lock, VMCILockFlags *flags);
void VMCI_ReleaseLock_BH(VMCILock *lock, VMCILockFlags flags);
void VMCIHost_InitContext(VMCIHost *hostContext, uintptr_t eventHnd);
void VMCIHost_ReleaseContext(VMCIHost *hostContext);
void VMCIHost_SignalCall(VMCIHost *hostContext);
void VMCIHost_ClearCall(VMCIHost *hostContext);
Bool VMCIHost_WaitForCallLocked(VMCIHost *hostContext,
VMCILock *lock,
VMCILockFlags *flags,
Bool useBH);
#ifdef VMKERNEL
int VMCIHost_ContextToHostVmID(VMCIHost *hostContext, VMCIHostVmID *hostVmID);
int VMCIHost_ContextHasUuid(VMCIHost *hostContext, const char *uuid);
void VMCIHost_SetActiveHnd(VMCIHost *hostContext, uintptr_t eventHnd);
Bool VMCIHost_RemoveHnd(VMCIHost *hostContext, uintptr_t eventHnd);
Bool VMCIHost_IsActiveHnd(VMCIHost *hostContext, uintptr_t eventHnd);
void VMCIHost_SetInactiveHnd(VMCIHost *hostContext, uintptr_t eventHnd);
uint32 VMCIHost_NumHnds(VMCIHost *hostContext);
uintptr_t VMCIHost_GetActiveHnd(VMCIHost *hostContext);
void VMCIHost_SignalBitmap(VMCIHost *hostContext);
#endif
#if defined(_WIN32)
/*
* On Windows, Driver Verifier will panic() if we leak memory when we are
* unloaded. It dumps the leaked blocks for us along with callsites, which
* it handily tracks, but if we embed ExAllocate() inside a function, then
* the callsite is useless. So make this a macro on this platform only.
*/
# define VMCI_AllocKernelMem(_sz, _f) \
ExAllocatePoolWithTag((((_f) & VMCI_MEMORY_NONPAGED) ? \
NonPagedPool : PagedPool), \
(_sz), 'MMTC')
#else // _WIN32
void *VMCI_AllocKernelMem(size_t size, int flags);
#endif // _WIN32
void VMCI_FreeKernelMem(void *ptr, size_t size);
int VMCI_CopyToUser(VA64 dst, const void *src, size_t len);
Bool VMCIWellKnownID_AllowMap(VMCIId wellKnownID,
VMCIPrivilegeFlags privFlags);
int VMCIHost_CompareUser(VMCIHostUser *user1, VMCIHostUser *user2);
void VMCI_CreateEvent(VMCIEvent *event);
void VMCI_DestroyEvent(VMCIEvent *event);
void VMCI_SignalEvent(VMCIEvent *event);
void VMCI_WaitOnEvent(VMCIEvent *event, VMCIEventReleaseCB releaseCB,
void *clientData);
#if (defined(__APPLE__) || defined(__linux__) || defined(_WIN32)) && !defined(VMKERNEL)
Bool VMCI_WaitOnEventInterruptible(VMCIEvent *event,
VMCIEventReleaseCB releaseCB,
void *clientData);
#endif
#if !defined(VMKERNEL) && (defined(__linux__) || defined(_WIN32) || \
defined(__APPLE__) || defined(SOLARIS))
int VMCI_CopyFromUser(void *dst, VA64 src, size_t len);
#endif
typedef void (VMCIWorkFn)(void *data);
Bool VMCI_CanScheduleDelayedWork(void);
int VMCI_ScheduleDelayedWork(VMCIWorkFn *workFn, void *data);
int VMCIMutex_Init(VMCIMutex *mutex, char *name, VMCILockRank rank);
void VMCIMutex_Destroy(VMCIMutex *mutex);
void VMCIMutex_Acquire(VMCIMutex *mutex);
void VMCIMutex_Release(VMCIMutex *mutex);
#if defined(SOLARIS) || defined(_WIN32) || defined(__APPLE__)
int VMCIKernelIf_Init(void);
void VMCIKernelIf_Exit(void);
#if defined(_WIN32)
void VMCIKernelIf_DrainDelayedWork(void);
#endif // _WIN32
#endif // SOLARIS || _WIN32 || __APPLE__ || VMKERNEL
#if !defined(VMKERNEL) && (defined(__linux__) || defined(_WIN32) || \
defined(SOLARIS) || defined(__APPLE__))
void *VMCI_AllocQueue(uint64 size, uint32 flags);
void VMCI_FreeQueue(void *q, uint64 size);
typedef struct PPNSet {
uint64 numProducePages;
uint64 numConsumePages;
VMCIPpnList producePPNs;
VMCIPpnList consumePPNs;
Bool initialized;
} PPNSet;
int VMCI_AllocPPNSet(void *produceQ, uint64 numProducePages, void *consumeQ,
uint64 numConsumePages, PPNSet *ppnSet);
void VMCI_FreePPNSet(PPNSet *ppnSet);
int VMCI_PopulatePPNList(uint8 *callBuf, const PPNSet *ppnSet);
#endif
struct VMCIQueue;
struct PageStoreAttachInfo;
struct VMCIQueue *VMCIHost_AllocQueue(uint64 queueSize);
void VMCIHost_FreeQueue(struct VMCIQueue *queue, uint64 queueSize);
#if defined(VMKERNEL)
typedef World_Handle *VMCIGuestMemID;
#define INVALID_VMCI_GUEST_MEM_ID NULL
#else
typedef uint32 VMCIGuestMemID;
#define INVALID_VMCI_GUEST_MEM_ID 0
#endif
#if defined(VMKERNEL) || defined(__linux__) || defined(_WIN32) || \
defined(__APPLE__)
struct QueuePairPageStore;
int VMCIHost_RegisterUserMemory(unsigned int index,
struct QueuePairPageStore *pageStore,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
void VMCIHost_UnregisterUserMemory(unsigned int index,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
int VMCIHost_MapQueues(unsigned int index,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ,
uint32 flags);
int VMCIHost_UnmapQueues(unsigned int index,
VMCIGuestMemID gid,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
void VMCI_InitQueueMutex(struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
void VMCI_CleanupQueueMutex(struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
int VMCI_AcquireQueueMutex(struct VMCIQueue *queue, Bool canBlock);
void VMCI_ReleaseQueueMutex(struct VMCIQueue *queue);
#else // Below are the guest OS'es without host side support.
# define VMCI_InitQueueMutex(_pq, _cq)
# define VMCI_CleanupQueueMutex(_pq, _cq) do { } while (0)
# define VMCI_AcquireQueueMutex(_q, _cb) VMCI_SUCCESS
# define VMCI_ReleaseQueueMutex(_q) do { } while (0)
# define VMCIHost_RegisterUserMemory(_idx, _ps, _pq, _cq) VMCI_ERROR_UNAVAILABLE
# define VMCIHost_UnregisterUserMemory(_idx, _pq, _cq) do { } while (0)
# define VMCIHost_MapQueues(_idx, _pq, _cq, _f) VMCI_SUCCESS
# define VMCIHost_UnmapQueues(_idx, _gid, _pq, _cq) VMCI_SUCCESS
#endif
#if defined(VMKERNEL)
void VMCIHost_MarkQueuesAvailable(unsigned int index,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
void VMCIHost_MarkQueuesUnavailable(unsigned int index,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
#else
# define VMCIHost_MarkQueuesAvailable(_idx, _q, _p) do { } while (0)
# define VMCIHost_MarkQueuesUnavailable(_idx, _q, _p) do { } while(0)
#endif
#if defined(VMKERNEL) || defined(__linux__)
void VMCI_LockQueueHeader(struct VMCIQueue *queue);
void VMCI_UnlockQueueHeader(struct VMCIQueue *queue);
#else
# define VMCI_LockQueueHeader(_q) ASSERT_NOT_IMPLEMENTED(FALSE)
# define VMCI_UnlockQueueHeader(_q) ASSERT_NOT_IMPLEMENTED(FALSE)
#endif
#if (!defined(VMKERNEL) && defined(__linux__)) || defined(_WIN32) || \
defined(__APPLE__) || defined(SOLARIS)
int VMCIHost_GetUserMemory(unsigned int index,
VA64 produceUVA, VA64 consumeUVA,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
void VMCIHost_ReleaseUserMemory(unsigned int index,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
#else
# define VMCIHost_GetUserMemory(_idx, _puva, _cuva, _pq, _cq) VMCI_ERROR_UNAVAILABLE
# define VMCIHost_ReleaseUserMemory(_idx, _pq, _cq) ASSERT_NOT_IMPLEMENTED(FALSE)
#endif
#if defined(_WIN32)
Bool VMCI_EnqueueToDevNull(struct VMCIQueue *queue);
int VMCI_ConvertToLocalQueue(struct VMCIQueue *queueInfo,
struct VMCIQueue *otherQueueInfo,
uint64 size, Bool keepContent,
void **oldQueue);
void VMCI_RevertToNonLocalQueue(struct VMCIQueue *queueInfo,
void *nonLocalQueue, uint64 size);
void VMCI_FreeQueueBuffer(void *queue, uint64 size);
Bool VMCI_CanCreate(void);
#else // _WIN32
# define VMCI_EnqueueToDevNull(_q) FALSE
# define VMCI_ConvertToLocalQueue(_pq, _cq, _s, _oq, _kc) VMCI_ERROR_UNAVAILABLE
# define VMCI_RevertToNonLocalQueue(_q, _nlq, _s)
# define VMCI_FreeQueueBuffer(_q, _s)
# define VMCI_CanCreate() TRUE
#endif // !_WIN32
Bool VMCI_GuestPersonalityActive(void);
Bool VMCI_HostPersonalityActive(void);
#if defined(VMKERNEL)
typedef List_Links VMCIListItem;
typedef List_Links VMCIList;
# define VMCIList_Init(_l) List_Init(_l)
# define VMCIList_InitEntry(_e) List_InitElement(_e)
# define VMCIList_Empty(_l) List_IsEmpty(_l)
# define VMCIList_Insert(_e, _l) List_Insert(_e, LIST_ATREAR(_l))
# define VMCIList_Remove(_e) List_Remove(_e)
# define VMCIList_Scan(_cur, _l) LIST_FORALL(_l, _cur)
# define VMCIList_ScanSafe(_cur, _next, _l) LIST_FORALL_SAFE(_l, _cur, _next)
# define VMCIList_Entry(_elem, _type, _field) List_Entry(_elem, _type, _field)
# define VMCIList_First(_l) (VMCIList_Empty(_l)?NULL:List_First(_l))
#else
typedef DblLnkLst_Links VMCIListItem;
typedef DblLnkLst_Links VMCIList;
# define VMCIList_Init(_l) DblLnkLst_Init(_l)
# define VMCIList_InitEntry(_e) DblLnkLst_Init(_e)
# define VMCIList_Empty(_l) (!DblLnkLst_IsLinked(_l))
# define VMCIList_Insert(_e, _l) DblLnkLst_LinkLast(_l, _e)
# define VMCIList_Remove(_e) DblLnkLst_Unlink1(_e)
# define VMCIList_Scan(_cur, _l) DblLnkLst_ForEach(_cur, _l)
# define VMCIList_ScanSafe(_cur, _next, _l) DblLnkLst_ForEachSafe(_cur, _next, _l)
# define VMCIList_Entry(_elem, _type, _field) DblLnkLst_Container(_elem, _type, _field)
# define VMCIList_First(_l) (VMCIList_Empty(_l)?NULL:(_l)->next)
#endif
#endif // _VMCI_KERNEL_IF_H_
vmmemctl-only/shared/driverLog.h 0000444 0000000 0000000 00000002232 12275350061 015731 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* driverLog.h --
*
* Logging functions for Linux kernel modules.
*/
#ifndef __DRIVERLOG_H__
#define __DRIVERLOG_H__
/*
* The definitions of Warning(), Log(), and Panic() come from vm_assert.h for
* consistency.
*/
#include "vm_assert.h"
void DriverLog_Init(const char *prefix);
#endif /* __DRIVERLOG_H__ */
vmmemctl-only/shared/compat_kernel.h 0000444 0000000 0000000 00000002735 12275350061 016627 0 ustar root root /*********************************************************
* Copyright (C) 2004 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_KERNEL_H__
# define __COMPAT_KERNEL_H__
#include <asm/unistd.h>
#include <linux/kernel.h>
/*
* container_of was introduced in 2.5.28 but it's easier to check like this.
*/
#ifndef container_of
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
#endif
/*
* vsnprintf became available in 2.4.10. For older kernels, just fall back on
* vsprintf.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10)
#define vsnprintf(str, size, fmt, args) vsprintf(str, fmt, args)
#endif
#endif /* __COMPAT_KERNEL_H__ */
vmmemctl-only/shared/vmciKernelAPI1.h 0000444 0000000 0000000 00000017514 12275350062 016520 0 ustar root root /*********************************************************
* Copyright (C) 2010 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmciKernelAPI1.h --
*
* Kernel API (v1) exported from the VMCI host and guest drivers.
*/
#ifndef __VMCI_KERNELAPI_1_H__
#define __VMCI_KERNELAPI_1_H__
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#include "includeCheck.h"
#include "vmci_defs.h"
#include "vmci_call_defs.h"
/* VMCI module namespace on vmkernel. */
#define MOD_VMCI_NAMESPACE "com.vmware.vmci"
/* Define version 1. */
#undef VMCI_KERNEL_API_VERSION
#define VMCI_KERNEL_API_VERSION_1 1
#define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_1
/* Macros to operate on the driver version number. */
#define VMCI_MAJOR_VERSION(v) (((v) >> 16) & 0xffff)
#define VMCI_MINOR_VERSION(v) ((v) & 0xffff)
#if defined(_WIN32)
/* Path to callback object in object manager, for Windows only. */
#define VMCI_CALLBACK_OBJECT_PATH L"\\Callback\\VMCIDetachCB"
#endif // _WIN32
/* VMCI Device Usage API. */
#if defined(linux) && !defined(VMKERNEL)
#define vmci_device_get(_a, _b, _c, _d) 1
#define vmci_device_release(_x)
#else // !linux
typedef void (VMCI_DeviceShutdownFn)(void *deviceRegistration,
void *userData);
Bool vmci_device_get(uint32 *apiVersion,
VMCI_DeviceShutdownFn *deviceShutdownCB,
void *userData, void **deviceRegistration);
void vmci_device_release(void *deviceRegistration);
#endif // !linux
#if defined(_WIN32)
/* Called when the client is unloading, for Windows only. */
void vmci_exit(void);
#endif // _WIN32
/* VMCI Datagram API. */
int vmci_datagram_create_handle(uint32 resourceId, uint32 flags,
VMCIDatagramRecvCB recvCB, void *clientData,
VMCIHandle *outHandle);
int vmci_datagram_create_handle_priv(uint32 resourceID, uint32 flags,
VMCIPrivilegeFlags privFlags,
VMCIDatagramRecvCB recvCB,
void *clientData, VMCIHandle *outHandle);
int vmci_datagram_destroy_handle(VMCIHandle handle);
int vmci_datagram_send(VMCIDatagram *msg);
/* VMCI Utility API. */
VMCIId vmci_get_context_id(void);
#if defined(linux) && !defined(VMKERNEL)
/* Returned value is a bool, 0 for false, 1 for true. */
int vmci_is_context_owner(VMCIId contextID, uid_t uid);
#else // !linux || VMKERNEL
/* Returned value is a VMCI error code. */
int vmci_is_context_owner(VMCIId contextID, void *hostUser);
#endif // !linux || VMKERNEL
uint32 vmci_version(void);
int vmci_cid_2_host_vm_id(VMCIId contextID, void *hostVmID,
size_t hostVmIDLen);
/* VMCI Event API. */
typedef void (*VMCI_EventCB)(VMCIId subID, VMCI_EventData *ed,
void *clientData);
int vmci_event_subscribe(VMCI_Event event,
#if !defined(linux) || defined(VMKERNEL)
uint32 flags,
#endif // !linux || VMKERNEL
VMCI_EventCB callback,
void *callbackData, VMCIId *subID);
int vmci_event_unsubscribe(VMCIId subID);
/* VMCI Context API */
VMCIPrivilegeFlags vmci_context_get_priv_flags(VMCIId contextID);
/* VMCI Queue Pair API. */
typedef struct VMCIQPair VMCIQPair;
int vmci_qpair_alloc(VMCIQPair **qpair, VMCIHandle *handle,
uint64 produceQSize, uint64 consumeQSize, VMCIId peer,
uint32 flags, VMCIPrivilegeFlags privFlags);
int vmci_qpair_detach(VMCIQPair **qpair);
int vmci_qpair_get_produce_indexes(const VMCIQPair *qpair,
uint64 *producerTail, uint64 *consumerHead);
int vmci_qpair_get_consume_indexes(const VMCIQPair *qpair,
uint64 *consumerTail, uint64 *producerHead);
int64 vmci_qpair_produce_free_space(const VMCIQPair *qpair);
int64 vmci_qpair_produce_buf_ready(const VMCIQPair *qpair);
int64 vmci_qpair_consume_free_space(const VMCIQPair *qpair);
int64 vmci_qpair_consume_buf_ready(const VMCIQPair *qpair);
ssize_t vmci_qpair_enqueue(VMCIQPair *qpair, const void *buf, size_t bufSize,
int mode);
ssize_t vmci_qpair_dequeue(VMCIQPair *qpair, void *buf, size_t bufSize,
int mode);
ssize_t vmci_qpair_peek(VMCIQPair *qpair, void *buf, size_t bufSize, int mode);
#if defined (SOLARIS) || (defined(__APPLE__) && !defined (VMX86_TOOLS)) || \
(defined(__linux__) && defined(__KERNEL__)) || \
(defined(_WIN32) && defined(WINNT_DDK))
/*
* Environments that support struct iovec
*/
ssize_t vmci_qpair_enquev(VMCIQPair *qpair, void *iov, size_t iovSize,
int mode);
ssize_t vmci_qpair_dequev(VMCIQPair *qpair, void *iov, size_t iovSize,
int mode);
ssize_t vmci_qpair_peekv(VMCIQPair *qpair, void *iov, size_t iovSize,
int mode);
#endif /* Systems that support struct iovec */
/* Typedefs for all of the above, used by the IOCTLs and the kernel library. */
typedef void (VMCI_DeviceReleaseFct)(void *);
typedef int (VMCIDatagram_CreateHndFct)(VMCIId, uint32, VMCIDatagramRecvCB,
void *, VMCIHandle *);
typedef int (VMCIDatagram_CreateHndPrivFct)(VMCIId, uint32, VMCIPrivilegeFlags,
VMCIDatagramRecvCB, void *,
VMCIHandle *);
typedef int (VMCIDatagram_DestroyHndFct)(VMCIHandle);
typedef int (VMCIDatagram_SendFct)(VMCIDatagram *);
typedef VMCIId (VMCI_GetContextIDFct)(void);
typedef uint32 (VMCI_VersionFct)(void);
typedef int (VMCI_ContextID2HostVmIDFct)(VMCIId, void *, size_t);
typedef int (VMCI_IsContextOwnerFct)(VMCIId, void *);
typedef int (VMCIEvent_SubscribeFct)(VMCI_Event, uint32, VMCI_EventCB, void *,
VMCIId *);
typedef int (VMCIEvent_UnsubscribeFct)(VMCIId);
typedef VMCIPrivilegeFlags (VMCIContext_GetPrivFlagsFct)(VMCIId);
typedef int (VMCIQPair_AllocFct)(VMCIQPair **, VMCIHandle *, uint64, uint64,
VMCIId, uint32, VMCIPrivilegeFlags);
typedef int (VMCIQPair_DetachFct)(VMCIQPair **);
typedef int (VMCIQPair_GetProduceIndexesFct)(const VMCIQPair *, uint64 *,
uint64 *);
typedef int (VMCIQPair_GetConsumeIndexesFct)(const VMCIQPair *, uint64 *,
uint64 *);
typedef int64 (VMCIQPair_ProduceFreeSpaceFct)(const VMCIQPair *);
typedef int64 (VMCIQPair_ProduceBufReadyFct)(const VMCIQPair *);
typedef int64 (VMCIQPair_ConsumeFreeSpaceFct)(const VMCIQPair *);
typedef int64 (VMCIQPair_ConsumeBufReadyFct)(const VMCIQPair *);
typedef ssize_t (VMCIQPair_EnqueueFct)(VMCIQPair *, const void *, size_t, int);
typedef ssize_t (VMCIQPair_DequeueFct)(VMCIQPair *, void *, size_t, int);
typedef ssize_t (VMCIQPair_PeekFct)(VMCIQPair *, void *, size_t, int);
typedef ssize_t (VMCIQPair_EnqueueVFct)(VMCIQPair *qpair, void *, size_t, int);
typedef ssize_t (VMCIQPair_DequeueVFct)(VMCIQPair *qpair, void *, size_t, int);
typedef ssize_t (VMCIQPair_PeekVFct)(VMCIQPair *qpair, void *, size_t, int);
#endif /* !__VMCI_KERNELAPI_1_H__ */
vmmemctl-only/shared/community_source.h 0000444 0000000 0000000 00000003742 12275350062 017410 0 ustar root root /*********************************************************
* Copyright (C) 2009 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* community_source.h --
*
* Macros for excluding source code from community.
*/
#ifndef _COMMUNITY_SOURCE_H_
#define _COMMUNITY_SOURCE_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_VMKDRIVERS
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMIROM
#include "includeCheck.h"
/*
* Convenience macro for COMMUNITY_SOURCE
*/
#undef EXCLUDE_COMMUNITY_SOURCE
#ifdef COMMUNITY_SOURCE
#define EXCLUDE_COMMUNITY_SOURCE(x)
#else
#define EXCLUDE_COMMUNITY_SOURCE(x) x
#endif
#undef COMMUNITY_SOURCE_AMD_SECRET
#if !defined(COMMUNITY_SOURCE) || defined(AMD_SOURCE)
/*
* It's ok to include AMD_SECRET source code for non-Community Source,
* or for drops directed at AMD.
*/
#define COMMUNITY_SOURCE_AMD_SECRET
#endif
#undef COMMUNITY_SOURCE_INTEL_SECRET
#if !defined(COMMUNITY_SOURCE) || defined(INTEL_SOURCE)
/*
* It's ok to include INTEL_SECRET source code for non-Community Source,
* or for drops directed at Intel.
*/
#define COMMUNITY_SOURCE_INTEL_SECRET
#endif
#endif
vmmemctl-only/shared/compat_cred.h 0000444 0000000 0000000 00000003013 12275350061 016252 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_CRED_H__
# define __COMPAT_CRED_H__
/*
* Include linux/cred.h via linux/sched.h - it is not nice, but
* as cpp does not have #ifexist...
*/
#include <linux/sched.h>
#if !defined(current_fsuid) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
#define current_uid() (current->uid)
#define current_euid() (current->euid)
#define current_fsuid() (current->fsuid)
#define current_gid() (current->gid)
#define current_egid() (current->egid)
#define current_fsgid() (current->fsgid)
#endif
#if !defined(cap_set_full)
/* cap_set_full was removed in kernel version 3.0-rc4. */
#define cap_set_full(_c) do { (_c) = CAP_FULL_SET; } while (0)
#endif
#endif /* __COMPAT_CRED_H__ */
vmmemctl-only/shared/compat_pgtable.h 0000444 0000000 0000000 00000011044 12275350061 016756 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_PGTABLE_H__
# define __COMPAT_PGTABLE_H__
#if defined(CONFIG_PARAVIRT) && defined(CONFIG_HIGHPTE)
# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21)
# include <asm/paravirt.h>
# undef paravirt_map_pt_hook
# define paravirt_map_pt_hook(type, va, pfn) do {} while (0)
# endif
#endif
#include <asm/pgtable.h>
/* pte_page() API modified in 2.3.23 to return a struct page * --hpreg */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 23)
# define compat_pte_page pte_page
#else
# include "compat_page.h"
# define compat_pte_page(_pte) virt_to_page(pte_page(_pte))
#endif
/* Appeared in 2.5.5 --hpreg */
#ifndef pte_offset_map
/* Appeared in SuSE 8.0's 2.4.18 --hpreg */
# ifdef pte_offset_atomic
# define pte_offset_map pte_offset_atomic
# define pte_unmap pte_kunmap
# else
# define pte_offset_map pte_offset
# define pte_unmap(_pte)
# endif
#endif
/* Appeared in 2.5.74-mmX --petr */
#ifndef pmd_offset_map
# define pmd_offset_map(pgd, address) pmd_offset(pgd, address)
# define pmd_unmap(pmd)
#endif
/*
* Appeared in 2.6.10-rc2-mm1. Older kernels did L4 page tables as
* part of pgd_offset, or they did not have L4 page tables at all.
* In 2.6.11 pml4 -> pgd -> pmd -> pte hierarchy was replaced by
* pgd -> pud -> pmd -> pte hierarchy.
*/
#ifdef PUD_MASK
# define compat_pgd_offset(mm, address) pgd_offset(mm, address)
# define compat_pgd_present(pgd) pgd_present(pgd)
# define compat_pud_offset(pgd, address) pud_offset(pgd, address)
# define compat_pud_present(pud) pud_present(pud)
typedef pgd_t compat_pgd_t;
typedef pud_t compat_pud_t;
#elif defined(pml4_offset)
# define compat_pgd_offset(mm, address) pml4_offset(mm, address)
# define compat_pgd_present(pml4) pml4_present(pml4)
# define compat_pud_offset(pml4, address) pml4_pgd_offset(pml4, address)
# define compat_pud_present(pgd) pgd_present(pgd)
typedef pml4_t compat_pgd_t;
typedef pgd_t compat_pud_t;
#else
# define compat_pgd_offset(mm, address) pgd_offset(mm, address)
# define compat_pgd_present(pgd) pgd_present(pgd)
# define compat_pud_offset(pgd, address) (pgd)
# define compat_pud_present(pud) (1)
typedef pgd_t compat_pgd_t;
typedef pgd_t compat_pud_t;
#endif
#define compat_pgd_offset_k(mm, address) pgd_offset_k(address)
/* Introduced somewhere in 2.6.0, + backported to some 2.4 RedHat kernels */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(pte_pfn)
# define pte_pfn(pte) page_to_pfn(compat_pte_page(pte))
#endif
/* A page_table_lock field is added to struct mm_struct in 2.3.10 --hpreg */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 10)
# define compat_get_page_table_lock(_mm) (&(_mm)->page_table_lock)
#else
# define compat_get_page_table_lock(_mm) NULL
#endif
/*
* Define VM_PAGE_KERNEL_EXEC for vmapping executable pages.
*
* On ia32 PAGE_KERNEL_EXEC was introduced in 2.6.8.1. Unfortunately it accesses
* __PAGE_KERNEL_EXEC which is not exported for modules. So we use
* __PAGE_KERNEL and just cut _PAGE_NX bit from it.
*
* For ia32 kernels before 2.6.8.1 we use PAGE_KERNEL directly, these kernels
* do not have noexec support.
*
* On x86-64 situation is a bit better: they always supported noexec, but
* before 2.6.8.1 flag was named PAGE_KERNEL_EXECUTABLE, and it was renamed
* to PAGE_KERNEL_EXEC when ia32 got noexec too (see above).
*/
#ifdef CONFIG_X86
#ifdef _PAGE_NX
#define VM_PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL & ~_PAGE_NX)
#else
#define VM_PAGE_KERNEL_EXEC PAGE_KERNEL
#endif
#else
#ifdef PAGE_KERNEL_EXECUTABLE
#define VM_PAGE_KERNEL_EXEC PAGE_KERNEL_EXECUTABLE
#else
#define VM_PAGE_KERNEL_EXEC PAGE_KERNEL_EXEC
#endif
#endif
#endif /* __COMPAT_PGTABLE_H__ */
vmmemctl-only/backdoorInt.h 0000444 0000000 0000000 00000002010 12275350060 014756 0 ustar root root /*********************************************************
* Copyright (C) 2005 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* backdoorInt.h --
*
* Internal function prototypes for the real backdoor work.
*/
void BackdoorHbIn(Backdoor_proto_hb *bp);
void BackdoorHbOut(Backdoor_proto_hb *bp);
vmmemctl-only/README 0000444 0000000 0000000 00000000705 12275350046 013243 0 ustar root root The files in this directory are the source files for the VMware
Memory Control driver. In order to build, make certain the
Makefile is correct, especially about whether or not your system is
multi-processor or not, and then just type:
make
from this directory. A copy of the module will be left in 'vmmemctl.o',
which can then be installed in /lib/modules/<kernel-name>/misc.
If you have any problems or questions, send mail to support@vmware.com
vmmemctl-only/backdoor.h 0000444 0000000 0000000 00000002471 12275350060 014316 0 ustar root root /*********************************************************
* Copyright (C) 1999 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* backdoor.h --
*
* First layer of the internal communication channel between guest
* applications and vmware
*/
#ifndef _BACKDOOR_H_
#define _BACKDOOR_H_
#include "vm_basic_types.h"
#include "vm_assert.h"
#include "backdoor_types.h"
void
Backdoor(Backdoor_proto *bp); // IN/OUT
void
Backdoor_InOut(Backdoor_proto *bp); // IN/OUT
void
Backdoor_HbOut(Backdoor_proto_hb *bp); // IN/OUT
void
Backdoor_HbIn(Backdoor_proto_hb *bp); // IN/OUT
#endif /* _BACKDOOR_H_ */
vmmemctl-only/backdoor_balloon.h 0000444 0000000 0000000 00000003110 12275350046 016017 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* backdoor_balloon.h --
*
* This file provides a wrapper for using the more generic backdoor library
* together with the vmballoon-specific backdoor.
*/
#ifndef _BACKDOOR_BALLOON_H_
#define _BACKDOOR_BALLOON_H_
#include "vmballoon.h"
#include "backdoor.h"
#include "balloon_def.h"
int Backdoor_MonitorStart(Balloon *b, uint32 protoVersion);
int Backdoor_MonitorGuestType(Balloon *b);
int Backdoor_MonitorGetTarget(Balloon *b, uint32 *target);
int Backdoor_MonitorLockPage(Balloon *b, PPN64 ppn);
int Backdoor_MonitorUnlockPage(Balloon *b, PPN64 ppn);
int Backdoor_MonitorLockPagesBatched(Balloon *b, PPN64 ppn, uint32 nPages);
int Backdoor_MonitorUnlockPagesBatched(Balloon *b, PPN64 ppn, uint32 nPages);
#endif /* _BACKDOOR_BALLOON_H_ */
vmmemctl-only/os.h 0000444 0000000 0000000 00000003554 12275350046 013162 0 ustar root root /*********************************************************
* Copyright (C) 2000 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* os.h --
*
* Definitions for OS-specific wrapper functions required by "vmmemctl".
*/
#ifndef OS_H
#define OS_H
#include "vm_basic_types.h"
#include "balloon_def.h"
/*
* Types
*/
typedef uintptr_t PageHandle;
typedef uintptr_t Mapping;
#define PAGE_HANDLE_INVALID 0
#define MAPPING_INVALID 0
/*
* Operations
*/
extern void OS_MemZero(void *ptr, size_t size);
extern void OS_MemCopy(void *dest, const void *src, size_t size);
extern void *OS_Malloc(size_t size);
extern void OS_Free(void *ptr, size_t size);
extern void OS_Yield(void);
extern unsigned long OS_ReservedPageGetLimit(void);
extern PA64 OS_ReservedPageGetPA(PageHandle handle);
extern PageHandle OS_ReservedPageGetHandle(PA64 pa);
extern PageHandle OS_ReservedPageAlloc(int canSleep);
extern void OS_ReservedPageFree(PageHandle handle);
extern Mapping OS_MapPageHandle(PageHandle handle);
extern void *OS_Mapping2Addr(Mapping mapping);
extern void OS_UnmapPage(Mapping mapping);
#endif /* OS_H */
vmmemctl-only/balloonInt.h 0000444 0000000 0000000 00000004637 12275350046 014645 0 ustar root root /*********************************************************
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef BALLOONINT_H_
# define BALLOONINT_H_
/*
* Compile-Time Options
*/
#define BALLOON_NAME "vmmemctl"
#define BALLOON_NAME_VERBOSE "VMware memory control driver"
#if defined __linux__ || defined __FreeBSD__ || defined _WIN32
/*
* FIXME: Even if the driver support batched commands keep using the
* non-batched one until more testing has been done.
*/
#define BALLOON_CAPABILITIES BALLOON_BASIC_CMDS
#else
#define BALLOON_CAPABILITIES BALLOON_BASIC_CMDS
#endif
#define BALLOON_RATE_ADAPT 1
#define BALLOON_DEBUG 1
#define BALLOON_DEBUG_VERBOSE 0
#define BALLOON_POLL_PERIOD 1 /* sec */
#define BALLOON_NOSLEEP_ALLOC_MAX 16384
#define BALLOON_RATE_ALLOC_MIN 512
#define BALLOON_RATE_ALLOC_MAX 2048
#define BALLOON_RATE_ALLOC_INC 16
#define BALLOON_RATE_FREE_MIN 512
#define BALLOON_RATE_FREE_MAX 16384
#define BALLOON_RATE_FREE_INC 16
/*
* Move it to bora/public/balloon_def.h later, if needed. Note that
* BALLOON_PAGE_ALLOC_FAILURE is an internal error code used for
* distinguishing page allocation failures from monitor-backdoor errors.
* We use value 1000 because all monitor-backdoor error codes are < 1000.
*/
#define BALLOON_PAGE_ALLOC_FAILURE 1000
#define BALLOON_STATS
#ifdef BALLOON_STATS
#define STATS_INC(stat) (stat)++
#define STATS_DEC(stat) (stat)--
#else
#define STATS_INC(stat)
#define STATS_DEC(stat)
#endif
#define PPN_2_PA(_ppn) ((PPN64)(_ppn) << PAGE_SHIFT)
#define PA_2_PPN(_pa) ((_pa) >> PAGE_SHIFT)
#endif /* !BALLOONINT_H_ */
vmmemctl-only/backdoorGcc32.c 0000444 0000000 0000000 00000014036 12275350060 015073 0 ustar root root /*********************************************************
* Copyright (C) 2005 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* backdoorGcc32.c --
*
* Implements the real work for guest-side backdoor for GCC, 32-bit
* target (supports inline ASM, GAS syntax). The asm sections are marked
* volatile since vmware can change the registers content without the
* compiler knowing it.
*
* XXX
* I tried to write this more cleanly, but:
* - There is no way to specify an "ebp" constraint
* - "ebp" is ignored when specified as cloberred register
* - gas barfs when there is more than 10 operands
* - gas 2.7.2.3, depending on the order of the operands, can
* mis-assemble without any warning
* --hpreg
*
* Note that the problems with gas noted above might longer be relevant
* now that we've upgraded most of our compiler versions.
* --rrdharan
*/
#ifdef __cplusplus
extern "C" {
#endif
#include "backdoor.h"
#include "backdoorInt.h"
/*
*----------------------------------------------------------------------------
*
* Backdoor_InOut --
*
* Send a low-bandwidth basic request (16 bytes) to vmware, and return its
* reply (24 bytes).
*
* Results:
* Host-side response returned in bp IN/OUT parameter.
*
* Side effects:
* Pokes the backdoor.
*
*----------------------------------------------------------------------------
*/
void
Backdoor_InOut(Backdoor_proto *myBp) // IN/OUT
{
uint32 dummy;
__asm__ __volatile__(
#ifdef __PIC__
"pushl %%ebx" "\n\t"
#endif
"pushl %%eax" "\n\t"
"movl 20(%%eax), %%edi" "\n\t"
"movl 16(%%eax), %%esi" "\n\t"
"movl 12(%%eax), %%edx" "\n\t"
"movl 8(%%eax), %%ecx" "\n\t"
"movl 4(%%eax), %%ebx" "\n\t"
"movl (%%eax), %%eax" "\n\t"
"inl %%dx, %%eax" "\n\t"
"xchgl %%eax, (%%esp)" "\n\t"
"movl %%edi, 20(%%eax)" "\n\t"
"movl %%esi, 16(%%eax)" "\n\t"
"movl %%edx, 12(%%eax)" "\n\t"
"movl %%ecx, 8(%%eax)" "\n\t"
"movl %%ebx, 4(%%eax)" "\n\t"
"popl (%%eax)" "\n\t"
#ifdef __PIC__
"popl %%ebx" "\n\t"
#endif
: "=a" (dummy)
: "0" (myBp)
/*
* vmware can modify the whole VM state without the compiler knowing
* it. So far it does not modify EFLAGS. --hpreg
*/
:
#ifndef __PIC__
"ebx",
#endif
"ecx", "edx", "esi", "edi", "memory"
);
}
/*
*-----------------------------------------------------------------------------
*
* BackdoorHbIn --
* BackdoorHbOut --
*
* Send a high-bandwidth basic request to vmware, and return its
* reply.
*
* Results:
* Host-side response returned in bp IN/OUT parameter.
*
* Side-effects:
* Pokes the high-bandwidth backdoor port.
*
*-----------------------------------------------------------------------------
*/
void
BackdoorHbIn(Backdoor_proto_hb *myBp) // IN/OUT
{
uint32 dummy;
__asm__ __volatile__(
#ifdef __PIC__
"pushl %%ebx" "\n\t"
#endif
"pushl %%ebp" "\n\t"
"pushl %%eax" "\n\t"
"movl 24(%%eax), %%ebp" "\n\t"
"movl 20(%%eax), %%edi" "\n\t"
"movl 16(%%eax), %%esi" "\n\t"
"movl 12(%%eax), %%edx" "\n\t"
"movl 8(%%eax), %%ecx" "\n\t"
"movl 4(%%eax), %%ebx" "\n\t"
"movl (%%eax), %%eax" "\n\t"
"cld" "\n\t"
"rep; insb" "\n\t"
"xchgl %%eax, (%%esp)" "\n\t"
"movl %%ebp, 24(%%eax)" "\n\t"
"movl %%edi, 20(%%eax)" "\n\t"
"movl %%esi, 16(%%eax)" "\n\t"
"movl %%edx, 12(%%eax)" "\n\t"
"movl %%ecx, 8(%%eax)" "\n\t"
"movl %%ebx, 4(%%eax)" "\n\t"
"popl (%%eax)" "\n\t"
"popl %%ebp" "\n\t"
#ifdef __PIC__
"popl %%ebx" "\n\t"
#endif
: "=a" (dummy)
: "0" (myBp)
/*
* vmware can modify the whole VM state without the compiler knowing
* it. --hpreg
*/
:
#ifndef __PIC__
"ebx",
#endif
"ecx", "edx", "esi", "edi", "memory", "cc"
);
}
void
BackdoorHbOut(Backdoor_proto_hb *myBp) // IN/OUT
{
uint32 dummy;
__asm__ __volatile__(
#ifdef __PIC__
"pushl %%ebx" "\n\t"
#endif
"pushl %%ebp" "\n\t"
"pushl %%eax" "\n\t"
"movl 24(%%eax), %%ebp" "\n\t"
"movl 20(%%eax), %%edi" "\n\t"
"movl 16(%%eax), %%esi" "\n\t"
"movl 12(%%eax), %%edx" "\n\t"
"movl 8(%%eax), %%ecx" "\n\t"
"movl 4(%%eax), %%ebx" "\n\t"
"movl (%%eax), %%eax" "\n\t"
"cld" "\n\t"
"rep; outsb" "\n\t"
"xchgl %%eax, (%%esp)" "\n\t"
"movl %%ebp, 24(%%eax)" "\n\t"
"movl %%edi, 20(%%eax)" "\n\t"
"movl %%esi, 16(%%eax)" "\n\t"
"movl %%edx, 12(%%eax)" "\n\t"
"movl %%ecx, 8(%%eax)" "\n\t"
"movl %%ebx, 4(%%eax)" "\n\t"
"popl (%%eax)" "\n\t"
"popl %%ebp" "\n\t"
#ifdef __PIC__
"popl %%ebx" "\n\t"
#endif
: "=a" (dummy)
: "0" (myBp)
:
#ifndef __PIC__
"ebx",
#endif
"ecx", "edx", "esi", "edi", "memory", "cc"
);
}
#ifdef __cplusplus
}
#endif
vmmemctl-only/balloon_def.h 0000444 0000000 0000000 00000017660 12275350062 015006 0 ustar root root /*********************************************************
* Copyright (C) 2000-2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* balloon_def.h --
*
* Definitions for server "balloon" mechanism for reclaiming
* physical memory from a VM.
*/
#ifndef _BALLOON_DEF_H
#define _BALLOON_DEF_H
#define INCLUDE_ALLOW_VMX
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_MODULE
#include "includeCheck.h"
#include "vm_basic_types.h"
#include "vm_basic_defs.h"
#include "vm_assert.h"
/*
* constants
*/
/* backdoor port */
#define BALLOON_BDOOR_PORT (0x5670)
#define BALLOON_BDOOR_MAGIC (0x456c6d6f)
/*
* Backdoor commands availability:
*
* +====================+======================+
* | CMD | Capabilities |
* +--------------------+----------------------+
* | START | Always available (*) |
* | TARGET | Always available |
* | LOCK | BASIC_CMDS |
* | UNLOCK | BASIC_CMDS |
* | GUEST_ID | Always available |
* | BATCHED_LOCK | BATCHED_CMDS |
* | BATCHED_UNLOCK | BATCHED_CMDS |
* +====================+======================+
*
* (*) The START command has been slightly modified when more than the
* basic commands are available: It returns
* BALLOON_SUCCESS_WITH_CAPABILITIES with the available capabilities
* stored in %ecx. Previously, a versioned protocol was used, and the
* protocol that should be used was also returned in %ecx. Protocol
* version 2 was the initial version and the only one shipped. Version 3
* was temporary used internally but has caused several issue due to
* protocol mismatch between monitor and guest.
*
*/
/* backdoor command numbers */
#define BALLOON_BDOOR_CMD_START (0)
#define BALLOON_BDOOR_CMD_TARGET (1)
#define BALLOON_BDOOR_CMD_LOCK (2)
#define BALLOON_BDOOR_CMD_UNLOCK (3)
#define BALLOON_BDOOR_CMD_GUEST_ID (4)
/* The command 5 was shortly used between 1881144 and 1901153. */
#define BALLOON_BDOOR_CMD_BATCHED_LOCK (6)
#define BALLOON_BDOOR_CMD_BATCHED_UNLOCK (7)
/* balloon capabilities */
typedef enum {
/*
* Bit 0 is not used and shouldn't be used, due to issue with
* protocol v3, to avoid ambiguity between protocol v3 and
* capabilities, leave this bit as 0. That way, by masking guest
* capabilities with monitor capabilities, bit 0 will always be set
* to 0, and buggy v3 tool will automatically switch to unbatched
* LOCK and UNLOCK.
*/
BALLOON_BASIC_CMDS = (1 << 1),
BALLOON_BATCHED_CMDS = (1 << 2)
} BalloonCapabilities;
/* use config value for max balloon size */
#define BALLOON_MAX_SIZE_USE_CONFIG (0)
/*
* Guest identities
*
* Note : all values should fit in 32 bits
*/
typedef enum {
BALLOON_GUEST_UNKNOWN = 0,
BALLOON_GUEST_LINUX = 1,
BALLOON_GUEST_BSD = 2,
BALLOON_GUEST_WINDOWS_NT4 = 3,
BALLOON_GUEST_WINDOWS_NT5 = 4,
BALLOON_GUEST_SOLARIS = 5,
BALLOON_GUEST_MACOS = 6,
BALLOON_GUEST_FROBOS = 7,
} BalloonGuest;
/* error codes */
#define BALLOON_SUCCESS (0)
#define BALLOON_FAILURE (-1)
#define BALLOON_ERROR_CMD_INVALID (1)
#define BALLOON_ERROR_PPN_INVALID (2)
#define BALLOON_ERROR_PPN_LOCKED (3)
#define BALLOON_ERROR_PPN_UNLOCKED (4)
#define BALLOON_ERROR_PPN_PINNED (5)
#define BALLOON_ERROR_PPN_NOTNEEDED (6)
#define BALLOON_ERROR_RESET (7)
#define BALLOON_ERROR_BUSY (8)
#define BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
/*
* BatchPage.
*/
#define BALLOON_BATCH_MAX_PAGES (PAGE_SIZE / sizeof(PA64))
/*
* We are using the fact that for 4k pages, the 12LSB are set to 0, so
* we can use them and mask those bit when we need the real PA.
*
* +=============+==========+========+
* | | | |
* | Page number | Reserved | Status |
* | | | |
* +=============+==========+========+
* 64 PAGE_SHIFT 6 0
*
* For now, only 4k pages are supported by the monitor, but by using
* reserved bit we can in the future add some flags that will indicate
* whether the page is a 2M page or a 1G page.
*
* The reserved field should be set to 0.
*
*/
#define BALLOON_BATCH_STATUS_MASK MASK64(5)
#define BALLOON_BATCH_PAGE_MASK (~MASK64(PAGE_SHIFT))
typedef struct BalloonBatchPage {
PA64 pages[BALLOON_BATCH_MAX_PAGES];
} BalloonBatchPage;
/*
*-----------------------------------------------------------------------------
*
* Balloon_BatchGetPA --
*
* Get the page stored in the batch page at idx.
*
* Results:
* See above.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE PA64
Balloon_BatchGetPA(BalloonBatchPage *batchPage, // IN
uint16 idx) // IN
{
ASSERT(idx < BALLOON_BATCH_MAX_PAGES);
return batchPage->pages[idx] & BALLOON_BATCH_PAGE_MASK;
}
/*
*-----------------------------------------------------------------------------
*
* Balloon_BatchGetStatus --
*
* Get the error code associated with a page.
*
* Results:
* See above.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE uint8
Balloon_BatchGetStatus(BalloonBatchPage *batchPage, // IN
uint16 idx) // IN
{
ASSERT(idx < BALLOON_BATCH_MAX_PAGES);
return (uint8)(batchPage->pages[idx] & BALLOON_BATCH_STATUS_MASK);
}
/*
*-----------------------------------------------------------------------------
*
* Balloon_BatchSetPA --
*
* Store the page in the batch page at idx.
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Balloon_BatchSetPA(BalloonBatchPage *batchPage, // IN
uint16 idx, // IN
PA64 pa) // IN
{
ASSERT(idx < BALLOON_BATCH_MAX_PAGES);
ASSERT((pa & ~BALLOON_BATCH_PAGE_MASK) == 0);
batchPage->pages[idx] = pa;
}
/*
*-----------------------------------------------------------------------------
*
* Balloon_BatchSetStatus --
*
* Set the error code associated with a page.
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
Balloon_BatchSetStatus(BalloonBatchPage *batchPage, // IN
uint16 idx, // IN
int error) // IN
{
PA64 pa = Balloon_BatchGetPA(batchPage, idx);
ASSERT(idx < BALLOON_BATCH_MAX_PAGES);
ASSERT(error <= BALLOON_ERROR_BUSY && error >= BALLOON_FAILURE);
batchPage->pages[idx] = pa | (PPN64)error;
}
MY_ASSERTS(BALLOON_BATCH_SIZE,
ASSERT_ON_COMPILE(sizeof(BalloonBatchPage) == PAGE_SIZE);
)
#endif /* _BALLOON_DEF_H */
vmmemctl-only/kernelStubsLinux.c 0000444 0000000 0000000 00000022635 12275350045 016055 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* kernelStubsLinux.c
*
* This file contains implementations of common userspace functions in terms
* that the Linux kernel can understand.
*/
/* Must come before any kernel header file */
#include "driver-config.h"
#include "kernelStubs.h"
#include "compat_kernel.h"
#include "compat_page.h"
#include "compat_sched.h"
#include <linux/slab.h>
#include "vm_assert.h"
/*
*-----------------------------------------------------------------------------
*
* Panic --
*
* Prints the debug message and stops the system.
*
* Results:
* None.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
void
Panic(const char *fmt, ...) // IN
{
va_list args;
char *result;
va_start(args, fmt);
result = Str_Vasprintf(NULL, fmt, args);
va_end(args);
if (result) {
printk(KERN_EMERG "%s", result);
}
BUG();
while (1); // Avoid compiler warning.
}
/*
*----------------------------------------------------------------------
*
* Str_Strcpy--
*
* Wrapper for strcpy that checks for buffer overruns.
*
* Results:
* Same as strcpy.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
char *
Str_Strcpy(char *buf, // OUT
const char *src, // IN
size_t maxSize) // IN
{
unsigned int *stack = (unsigned int *)&buf;
size_t len;
len = strlen(src);
if (len >= maxSize) {
Panic("%s:%d Buffer too small 0x%x\n", __FILE__,__LINE__,
stack[-1]);
}
return memcpy(buf, src, len + 1);
}
/*
*----------------------------------------------------------------------
*
* Str_Vsnprintf --
*
* Compatability wrapper b/w different libc versions
*
* Results:
* int - number of bytes written (not including NULL terminate character),
* -1 on overflow (insufficient space for NULL terminate is considered
* overflow)
*
* NB: on overflow the buffer WILL be null terminated
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
int
Str_Vsnprintf(char *str, // OUT
size_t size, // IN
const char *format, // IN
va_list arguments) // IN
{
int retval;
retval = vsnprintf(str, size, format, arguments);
/*
* Linux glibc 2.0.x returns -1 and null terminates (which we shouldn't
* be linking against), but glibc 2.1.x follows c99 and returns
* characters that would have been written.
*/
if (retval >= size) {
return -1;
}
return retval;
}
/*
*-----------------------------------------------------------------------------
*
* Str_Vasprintf --
*
* Allocate and format a string, using the GNU libc way to specify the
* format (i.e. optionally allow the use of positional parameters)
*
* Results:
* The allocated string on success (if 'length' is not NULL, *length
* is set to the length of the allocated string)
* NULL on failure
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
char *
Str_Vasprintf(size_t *length, // OUT
const char *format, // IN
va_list arguments) // IN
{
/*
* Simple implementation of Str_Vasprintf when userlevel libraries are not
* available (e.g. for use in drivers). We just fallback to vsnprintf,
* doubling if we didn't have enough space.
*/
unsigned int bufSize;
char *buf;
int retval;
bufSize = strlen(format);
buf = NULL;
do {
/*
* Initial allocation of strlen(format) * 2. Should this be tunable?
* XXX Yes, this could overflow and spin forever when you get near 2GB
* allocations. I don't care. --rrdharan
*/
va_list args2;
bufSize *= 2;
buf = realloc(buf, bufSize);
if (!buf) {
return NULL;
}
va_copy(args2, arguments);
retval = Str_Vsnprintf(buf, bufSize, format, args2);
va_end(args2);
} while (retval == -1);
if (length) {
*length = retval;
}
/*
* Try to trim the buffer here to save memory?
*/
return buf;
}
/*
*-----------------------------------------------------------------------------
*
* Str_Asprintf --
*
* Same as Str_Vasprintf(), but parameters are passed inline --hpreg
*
* Results:
* Same as Str_Vasprintf()
*
* Side effects:
* Same as Str_Vasprintf()
*
*-----------------------------------------------------------------------------
*/
char *
Str_Asprintf(size_t *length, // OUT
const char *format, // IN
...) // IN
{
va_list arguments;
char *result;
va_start(arguments, format);
result = Str_Vasprintf(length, format, arguments);
va_end(arguments);
return result;
}
/*
*-----------------------------------------------------------------------------
*
* strdup --
*
* Duplicates a string.
*
* Results:
* A pointer to memory containing the duplicated string or NULL if no
* memory was available.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
char *
strdup(const char *source) // IN
{
char *target = NULL;
if (source) {
/*
* We call our special implementation of malloc() because the users of
* strdup() will call free(), and that'll decrement the pointer before
* freeing it. Thus, we need to make sure that the allocated block
* also stores the block length before the block itself (see malloc()
* below).
*/
unsigned int len = strlen(source);
target = malloc(len + 1);
if (target) {
memcpy(target, source, len + 1);
}
}
return target;
}
/*
*----------------------------------------------------------------------------
*
* malloc --
*
* Allocate memory using kmalloc. There is no realloc
* equivalent, so we roll our own by padding each allocation with
* 4 (or 8 for 64 bit guests) extra bytes to store the block length.
*
* Results:
* Pointer to driver heap memory, offset by 4 (or 8)
* bytes from the real block pointer.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------------
*/
void *
malloc(size_t size) // IN
{
size_t *ptr;
ptr = kmalloc(size + sizeof size, GFP_KERNEL);
if (ptr) {
*ptr++ = size;
}
return ptr;
}
/*
*---------------------------------------------------------------------------
*
* free --
*
* Free memory allocated by a previous call to malloc, calloc or realloc.
*
* Results:
* None.
*
* Side effects:
* Calls kfree to free the real (base) pointer.
*
*---------------------------------------------------------------------------
*/
void
free(void *mem) // IN
{
if (mem) {
size_t *dataPtr = (size_t *)mem;
kfree(--dataPtr);
}
}
/*
*----------------------------------------------------------------------------
*
* calloc --
*
* Malloc and zero.
*
* Results:
* Pointer to driver heap memory (see malloc, above).
*
* Side effects:
* None.
*
*----------------------------------------------------------------------------
*/
void *
calloc(size_t num, // IN
size_t len) // IN
{
size_t size;
void *ptr;
size = num * len;
ptr = malloc(size);
if (ptr) {
memset(ptr, 0, size);
}
return ptr;
}
/*
*----------------------------------------------------------------------------
*
* realloc --
*
* Since the driver heap has no realloc equivalent, we have to roll our
* own. Fortunately, we can retrieve the block size of every block we
* hand out since we stashed it at allocation time (see malloc above).
*
* Results:
* Pointer to memory block valid for 'newSize' bytes, or NULL if
* allocation failed.
*
* Side effects:
* Could copy memory around.
*
*----------------------------------------------------------------------------
*/
void *
realloc(void* ptr, // IN
size_t newSize) // IN
{
void *newPtr;
size_t *dataPtr;
size_t length, lenUsed;
dataPtr = (size_t *)ptr;
length = ptr ? dataPtr[-1] : 0;
if (newSize == 0) {
if (ptr) {
free(ptr);
newPtr = NULL;
} else {
newPtr = malloc(newSize);
}
} else if (newSize == length) {
newPtr = ptr;
} else if ((newPtr = malloc(newSize))) {
if (length < newSize) {
lenUsed = length;
} else {
lenUsed = newSize;
}
memcpy(newPtr, ptr, lenUsed);
free(ptr);
}
return newPtr;
}
vmmemctl-only/Makefile.kernel 0000444 0000000 0000000 00000004463 12275350046 015307 0 ustar root root #!/usr/bin/make -f
##########################################################
# Copyright (C) 1998 VMware, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation version 2 and no later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
##########################################################
####
#### VMware vmballoon Makefile to be distributed externally
####
INCLUDE += -I.
obj-m += $(DRIVER).o
$(DRIVER)-y := $(subst $(SRCROOT)/, , $(patsubst %.c, %.o, $(wildcard $(SRCROOT)/*.c)))
#
# In open-vm-tools, need to compile the common sources from the lib directory.
#
ifdef OVT_SOURCE_DIR
VMMEMCTL_PATH := $(shell cd $(SRCROOT) && pwd)
VMMEMCTL_SHARED_PATH := $(OVT_SOURCE_DIR)/modules/shared/vmmemctl
LIBBACKDOOR_PATH := $(call VMLIB_PATH,backdoor)
INCLUDE += -I$(LIBBACKDOOR_PATH)
INCLUDE += -I$(VMMEMCTL_SHARED_PATH)
LIBBACKDOOR := backdoorGcc32.o
LIBBACKDOOR += backdoorGcc64.o
VMMEMCTL_SHARED := vmballoon.o kernelStubsLinux.o
$(addprefix $(VMMEMCTL_PATH)/,$(LIBBACKDOOR)): $(VMMEMCTL_PATH)/%.o: $(LIBBACKDOOR_PATH)/%.c
$(Q)$(rule_cc_o_c)
$(addprefix $(VMMEMCTL_PATH)/,$(VMMEMCTL_SHARED)): $(VMMEMCTL_PATH)/%.o: $(VMMEMCTL_SHARED_PATH)/%.c
$(Q)$(rule_cc_o_c)
$(DRIVER)-y += $(LIBBACKDOOR)
$(DRIVER)-y += $(VMMEMCTL_SHARED)
endif
EXTRA_CFLAGS := $(CC_OPTS) $(INCLUDE)
#
# On a 32-bit machine, strip out 64-bit backdoor code, and vice versa.
#
ifeq ($(CONFIG_X86_64),y)
$(DRIVER)-y := $(filter-out backdoorGcc32.o, $($(DRIVER)-y))
else
$(DRIVER)-y := $(filter-out backdoorGcc64.o, $($(DRIVER)-y))
endif
clean:
rm -rf $(wildcard $(DRIVER).mod.c $(DRIVER).ko .tmp_versions \
Module.symvers Modules.symvers Module.markers modules.order \
$(foreach dir,./,$(addprefix $(dir),.*.cmd .*.o.flags *.o)))
vmmemctl-only/backdoor_balloon.c 0000444 0000000 0000000 00000022460 12275350046 016023 0 ustar root root /*********************************************************
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "backdoor_balloon.h"
#include "backdoor.h"
#include "balloon_def.h"
#include "os.h"
/*
*----------------------------------------------------------------------
*
* BackdoorCmd --
*
* Do the balloon hypercall to the vmkernel.
*
* Results:
* Hypercall status will be returned and out will be filled
* if it's not NULL, either by bx or cx depending on the command.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static int
BackdoorCmd(uint16 cmd, // IN
size_t arg1, // IN
uint32 arg2, // IN
uint32 *out, // OUT
int *resetFlag) // OUT
{
Backdoor_proto bp;
int status;
/* prepare backdoor args */
bp.in.cx.halfs.low = cmd;
bp.in.size = arg1;
bp.in.si.word = arg2;
/* invoke backdoor */
bp.in.ax.word = BALLOON_BDOOR_MAGIC;
bp.in.dx.halfs.low = BALLOON_BDOOR_PORT;
Backdoor_InOut(&bp);
status = bp.out.ax.word;
/* set flag if reset requested */
if (status == BALLOON_ERROR_RESET) {
*resetFlag = 1;
}
if (out) {
if (cmd == BALLOON_BDOOR_CMD_START) {
*out = bp.out.cx.word;
} else {
*out = bp.out.bx.word;
}
}
return status;
}
/*
*----------------------------------------------------------------------
*
* Backdoor_MonitorStart --
*
* Attempts to contact monitor via backdoor to begin operation.
*
* Results:
* Returns BALLOON_SUCCESS if successful, otherwise error code.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
int
Backdoor_MonitorStart(Balloon *b, // IN
uint32 protoVersion) // IN
{
uint32 capabilities;
int status = BackdoorCmd(BALLOON_BDOOR_CMD_START, protoVersion, 0,
&capabilities, &b->resetFlag);
/*
* If return code is BALLOON_SUCCESS_WITH_CAPABILITY, then ESX is
* sending the common capabilities supported by the monitor and the
* guest in cx.
*/
if (status == BALLOON_SUCCESS_WITH_CAPABILITIES) {
b->hypervisorCapabilities = capabilities;
status = BALLOON_SUCCESS;
} else if (status == BALLOON_SUCCESS) {
b->hypervisorCapabilities = BALLOON_BASIC_CMDS;
}
/* update stats */
STATS_INC(b->stats.start);
if (status != BALLOON_SUCCESS) {
STATS_INC(b->stats.startFail);
}
return status;
}
/*
*----------------------------------------------------------------------
*
* Backdoor_MonitorGuestType --
*
* Attempts to contact monitor and report guest OS identity.
*
* Results:
* Returns BALLOON_SUCCESS if successful, otherwise error code.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
int
Backdoor_MonitorGuestType(Balloon *b) // IN
{
int status = BackdoorCmd(BALLOON_BDOOR_CMD_GUEST_ID, b->guestType, 0,
NULL, &b->resetFlag);
/* update stats */
STATS_INC(b->stats.guestType);
if (status != BALLOON_SUCCESS) {
STATS_INC(b->stats.guestTypeFail);
}
return status;
}
/*
*----------------------------------------------------------------------
*
* Backdoor_MonitorGetTarget --
*
* Attempts to contact monitor via backdoor to obtain desired
* balloon size.
*
* Predicts the maximum achievable balloon size and sends it
* to vmm => vmkernel via vEbx register.
*
* OS_ReservedPageGetLimit() returns either predicted max balloon
* pages or BALLOON_MAX_SIZE_USE_CONFIG. In the later scenario,
* vmkernel uses global config options for determining a guest's max
* balloon size. Note that older vmballoon drivers set vEbx to
* BALLOON_MAX_SIZE_USE_CONFIG, i.e., value 0 (zero). So vmkernel
* will fallback to config-based max balloon size estimation.
*
* Results:
* If successful, sets "target" to value obtained from monitor,
* and returns BALLOON_SUCCESS. Otherwise returns error code.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
int
Backdoor_MonitorGetTarget(Balloon *b, // IN
uint32 *target) // OUT
{
unsigned long limit;
uint32 limit32;
int status;
limit = OS_ReservedPageGetLimit();
/* Ensure limit fits in 32-bits */
limit32 = (uint32)limit;
if (limit32 != limit) {
return BALLOON_FAILURE;
}
status = BackdoorCmd(BALLOON_BDOOR_CMD_TARGET, limit, 0, target,
&b->resetFlag);
/* update stats */
STATS_INC(b->stats.target);
if (status != BALLOON_SUCCESS) {
STATS_INC(b->stats.targetFail);
}
return status;
}
/*
*----------------------------------------------------------------------
*
* Backdoor_MonitorLockPage --
*
* Attempts to contact monitor and add PPN corresponding to
* the page handle to set of "balloon locked" pages.
* If the current protocol support batching, it will balloon all
* PPNs listed in the batch page.
*
* Results:
* Returns BALLOON_SUCCESS if successful, otherwise error code.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
int
Backdoor_MonitorLockPage(Balloon *b, // IN
PPN64 ppn) // IN
{
int status;
uint32 ppn32 = (uint32)ppn;
/* Ensure PPN fits in 32-bits, i.e. guest memory is limited to 16TB. */
if (ppn32 != ppn) {
return BALLOON_ERROR_PPN_INVALID;
}
status = BackdoorCmd(BALLOON_BDOOR_CMD_LOCK, ppn32, 0, NULL,
&b->resetFlag);
/* update stats */
STATS_INC(b->stats.lock);
if (status != BALLOON_SUCCESS) {
STATS_INC(b->stats.lockFail);
}
return status;
}
/*
*----------------------------------------------------------------------
*
* Backdoor_MonitorUnlockPage --
*
* Attempts to contact monitor and remove PPN corresponding to
* the page handle from set of "balloon locked" pages.
* If the current protocol support batching, it will remove all
* the PPNs listed in the batch page.
*
* Results:
* Returns BALLOON_SUCCESS if successful, otherwise error code.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
int
Backdoor_MonitorUnlockPage(Balloon *b, // IN
PPN64 ppn) // IN
{
int status;
uint32 ppn32 = (uint32)ppn;
/* Ensure PPN fits in 32-bits, i.e. guest memory is limited to 16TB. */
if (ppn32 != ppn) {
return BALLOON_ERROR_PPN_INVALID;
}
status = BackdoorCmd(BALLOON_BDOOR_CMD_UNLOCK, ppn32, 0, NULL,
&b->resetFlag);
/* update stats */
STATS_INC(b->stats.unlock);
if (status != BALLOON_SUCCESS) {
STATS_INC(b->stats.unlockFail);
}
return status;
}
/*
*----------------------------------------------------------------------
*
* Backdoor_MonitorLockPagesBatched --
*
* Balloon all PPNs listed in the batch page.
*
* Results:
* Returns BALLOON_SUCCESS if successful, otherwise error code.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
int
Backdoor_MonitorLockPagesBatched(Balloon *b, // IN
PPN64 ppn, // IN
uint32 nPages) // IN
{
int status = BackdoorCmd(BALLOON_BDOOR_CMD_BATCHED_LOCK,
(size_t)ppn, nPages, NULL, &b->resetFlag);
/* update stats */
STATS_INC(b->stats.lock);
if (status != BALLOON_SUCCESS) {
STATS_INC(b->stats.lockFail);
}
return status;
}
/*
*----------------------------------------------------------------------
*
* Backdoor_MonitorUnlockPagesBatched --
*
* Unballoon all PPNs listed in the batch page.
*
* Results:
* Returns BALLOON_SUCCESS if successful, otherwise error code.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
int
Backdoor_MonitorUnlockPagesBatched(Balloon *b, // IN
PPN64 ppn, // IN
uint32 nPages) // IN
{
int status = BackdoorCmd(BALLOON_BDOOR_CMD_BATCHED_UNLOCK,
(size_t)ppn, nPages, NULL, &b->resetFlag);
/* update stats */
STATS_INC(b->stats.unlock);
if (status != BALLOON_SUCCESS) {
STATS_INC(b->stats.unlockFail);
}
return status;
}
vmmemctl-only/vmmemctl_version.h 0000444 0000000 0000000 00000002251 12275350046 016123 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmmemctl_version.h --
*
* Version definitions for the Linux memory ballooning driver.
*/
#ifndef _VMMEMCTL_VERSION_H_
#define _VMMEMCTL_VERSION_H_
#define VMMEMCTL_DRIVER_VERSION 1.2.1.2
#define VMMEMCTL_DRIVER_VERSION_COMMAS 1,2,1,2
#define VMMEMCTL_DRIVER_VERSION_STRING "1.2.1.2"
#endif /* _VMMEMCTL_VERSION_H_ */
vmmemctl-only/vmballoon.c 0000444 0000000 0000000 00000074004 12275350046 014523 0 ustar root root /*********************************************************
* Copyright (C) 2000 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmballoon.c --
*
* VMware physical memory management driver for Unix-ish
* (Linux, FreeBSD, Solaris, Mac OS) guests. The driver acts like
* a "balloon" that can be inflated to reclaim physical pages by
* reserving them in the guest and invalidating them in the
* monitor, freeing up the underlying machine pages so they can
* be allocated to other guests. The balloon can also be
* deflated to allow the guest to use more physical memory.
* Higher level policies can control the sizes of balloons in VMs
* in order to manage physical memory resources.
*/
#ifdef __cplusplus
extern "C" {
#endif
/*
* Includes
*/
#include "os.h"
#include "backdoor.h"
#include "backdoor_balloon.h"
#include "vmballoon.h"
/*
* Constants
*/
#ifndef NULL
#define NULL 0
#endif
/*
* When guest is under memory pressure, use a reduced page allocation
* rate for next several cycles.
*/
#define SLOW_PAGE_ALLOCATION_CYCLES 4
/* Maximum number of page allocations without yielding processor */
#define BALLOON_ALLOC_YIELD_THRESHOLD 1024
/*
* Balloon operations
*/
static int BalloonPageFree(Balloon *b);
static int BalloonAdjustSize(Balloon *b, uint32 target);
static void BalloonReset(Balloon *b);
static void BalloonAddPage(Balloon *b, uint16 idx, PageHandle page);
static void BalloonAddPageBatched(Balloon *b, uint16 idx, PageHandle page);
static int BalloonLock(Balloon *b, uint16 nPages);
static int BalloonLockBatched(Balloon *b, uint16 nPages);
static int BalloonUnlock(Balloon *b, uint16 nPages);
static int BalloonUnlockBatched(Balloon *b, uint16 nPages);
/*
* Globals
*/
static Balloon globalBalloon;
static const BalloonOps balloonOps = {
.addPage = BalloonAddPage,
.lock = BalloonLock,
.unlock = BalloonUnlock
};
static const struct BalloonOps balloonOpsBatched = {
.addPage = BalloonAddPageBatched,
.lock = BalloonLockBatched,
.unlock = BalloonUnlockBatched
};
/*
*----------------------------------------------------------------------
*
* Balloon_GetStats --
*
* Returns information about balloon state, including the current and
* target size, rates for allocating and freeing pages, and statistics
* about past activity.
*
* Results:
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
const BalloonStats *
Balloon_GetStats(void)
{
Balloon *b = &globalBalloon;
BalloonStats *stats = &b->stats;
/*
* Fill in additional information about size and rates, which is
* normally kept in the Balloon structure itself.
*/
stats->nPages = b->nPages;
stats->nPagesTarget = b->nPagesTarget;
stats->rateNoSleepAlloc = BALLOON_NOSLEEP_ALLOC_MAX;
stats->rateAlloc = b->rateAlloc;
stats->rateFree = b->rateFree;
return stats;
}
/*
*----------------------------------------------------------------------
*
* BalloonChunk_Create --
*
* Creates a new BalloonChunk object capable of tracking
* BALLOON_CHUNK_PAGES PAs.
*
* We do not bother to define two versions (NOSLEEP and CANSLEEP)
* of OS_Malloc because Chunk_Create does not require a new page
* often.
*
* Results:
* On success: initialized BalloonChunk
* On failure: NULL
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static BalloonChunk *
BalloonChunk_Create(void)
{
BalloonChunk *chunk;
/* allocate memory, fail if unable */
chunk = OS_Malloc(sizeof *chunk);
if (chunk == NULL) {
return NULL;
}
/* initialize */
OS_MemZero(chunk, sizeof *chunk);
DblLnkLst_Init(&chunk->node);
return chunk;
}
/*
*----------------------------------------------------------------------
*
* BalloonChunk_Destroy --
*
* Reclaims all storage associated with specified BalloonChunk.
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static void
BalloonChunk_Destroy(BalloonChunk *chunk) // IN
{
/* reclaim storage */
OS_Free(chunk, sizeof *chunk);
}
/*
*----------------------------------------------------------------------
*
* Balloon_Deallocate --
*
* Frees all allocated pages.
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static void
Balloon_Deallocate(Balloon *b) // IN
{
unsigned int cnt = 0;
/* free all pages, skipping monitor unlock */
while (b->nChunks > 0) {
(void) BalloonPageFree(b);
if (++cnt >= b->rateFree) {
cnt = 0;
OS_Yield();
}
}
/* Release the batch page */
if (b->batchPageMapping != MAPPING_INVALID) {
OS_UnmapPage(b->batchPageMapping);
b->batchPageMapping = MAPPING_INVALID;
b->batchPage = NULL;
}
if (b->pageHandle != PAGE_HANDLE_INVALID) {
OS_ReservedPageFree(b->pageHandle);
b->pageHandle = PAGE_HANDLE_INVALID;
}
}
/*
*----------------------------------------------------------------------
*
* BalloonInitBatching --
*
* Allocate and map the batch page.
*
* Results:
* BALLOON_SUCCESS or an error code in case of failure.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static int
BalloonInitBatching(Balloon *b) // IN
{
b->batchMaxPages = BALLOON_BATCH_MAX_PAGES;
b->pageHandle = OS_ReservedPageAlloc(BALLOON_PAGE_ALLOC_NOSLEEP);
if (b->pageHandle == PAGE_HANDLE_INVALID) {
return BALLOON_PAGE_ALLOC_FAILURE;
}
b->batchPageMapping = OS_MapPageHandle(b->pageHandle);
if (b->batchPageMapping == MAPPING_INVALID) {
OS_ReservedPageFree(b->pageHandle);
b->pageHandle = PAGE_HANDLE_INVALID;
return BALLOON_PAGE_ALLOC_FAILURE;
}
b->batchPage = OS_Mapping2Addr(b->batchPageMapping);
return BALLOON_SUCCESS;
}
/*
*----------------------------------------------------------------------
*
* BalloonReset --
*
* Resets balloon "b" to empty state. Frees all allocated pages
* and attempts to reset contact with the monitor.
*
* Results:
* None.
*
* Side effects:
* Schedules next execution of balloon timer handler.
*
*----------------------------------------------------------------------
*/
static void
BalloonReset(Balloon *b) // IN
{
int status;
/* free all pages, skipping monitor unlock */
Balloon_Deallocate(b);
status = Backdoor_MonitorStart(b, BALLOON_CAPABILITIES);
if (status != BALLOON_SUCCESS) {
return;
}
if ((b->hypervisorCapabilities & BALLOON_BATCHED_CMDS) != 0) {
status = BalloonInitBatching(b);
if (status != BALLOON_SUCCESS) {
/*
* We failed to initialize the batching in the guest, inform
* the monitor about that by sending a null capability.
*
* The guest will retry to init itself in one second.
*/
Backdoor_MonitorStart(b, 0);
return;
}
}
if ((b->hypervisorCapabilities & BALLOON_BATCHED_CMDS) != 0) {
b->balloonOps = &balloonOpsBatched;
} else if ((b->hypervisorCapabilities & BALLOON_BASIC_CMDS) != 0) {
b->balloonOps = &balloonOps;
b->batchMaxPages = 1;
}
/* clear flag */
b->resetFlag = FALSE;
/* report guest type */
(void) Backdoor_MonitorGuestType(b);
}
/*
*----------------------------------------------------------------------
*
* Balloon_QueryAndExecute --
*
* Contacts monitor via backdoor to obtain balloon size target,
* and starts adjusting balloon size to achieve target by allocating
* or deallocating pages. Resets balloon if requested by the monitor.
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
void
Balloon_QueryAndExecute(void)
{
Balloon *b = &globalBalloon;
uint32 target = 0; // Silence compiler warning.
int status;
/* update stats */
STATS_INC(b->stats.timer);
/* reset, if specified */
if (b->resetFlag) {
BalloonReset(b);
}
/* contact monitor via backdoor */
status = Backdoor_MonitorGetTarget(b, &target);
/* decrement slowPageAllocationCycles counter */
if (b->slowPageAllocationCycles > 0) {
b->slowPageAllocationCycles--;
}
if (status == BALLOON_SUCCESS) {
/* update target, adjust size */
b->nPagesTarget = target;
(void) BalloonAdjustSize(b, target);
}
}
/*
*----------------------------------------------------------------------
*
* BalloonErrorPageStore --
*
* Attempt to add "page" to list of non-balloonable pages
* associated with "b".
*
* Results:
* On success: BALLOON_SUCCESS
* On failure: BALLOON_FAILURE (non-balloonable page list is already full)
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static int
BalloonErrorPageStore(Balloon *b, // IN
PageHandle page) // IN
{
/* fail if list already full */
if (b->errors.pageCount >= BALLOON_ERROR_PAGES) {
return BALLOON_FAILURE;
}
/* add page to list */
b->errors.page[b->errors.pageCount++] = page;
STATS_INC(b->stats.primErrorPageAlloc);
return BALLOON_SUCCESS;
}
/*
*----------------------------------------------------------------------
*
* BalloonErrorPagesFree --
*
* Deallocates all pages on the list of non-balloonable pages
* associated with "b".
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static void
BalloonErrorPagesFree(Balloon *b) // IN
{
unsigned int i;
/* free all non-balloonable "error" pages */
for (i = 0; i < b->errors.pageCount; i++) {
OS_ReservedPageFree(b->errors.page[i]);
b->errors.page[i] = PAGE_HANDLE_INVALID;
STATS_INC(b->stats.primErrorPageFree);
}
b->errors.pageCount = 0;
}
/*
*----------------------------------------------------------------------
*
* BalloonGetChunk --
*
* Attempt to find a "chunk" with a free slot to store locked page.
* Try to allocate new chunk if all existing chunks are full.
*
* Results:
* Returns NULL on failure.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static BalloonChunk *
BalloonGetChunk(Balloon *b) // IN
{
BalloonChunk *chunk;
/* Get first chunk from the list */
if (DblLnkLst_IsLinked(&b->chunks)) {
chunk = DblLnkLst_Container(b->chunks.next, BalloonChunk, node);
if (chunk->pageCount < BALLOON_CHUNK_PAGES) {
/* This chunk has free slots, use it */
return chunk;
}
}
/* create new chunk */
chunk = BalloonChunk_Create();
if (chunk != NULL) {
DblLnkLst_LinkFirst(&b->chunks, &chunk->node);
/* update stats */
b->nChunks++;
}
return chunk;
}
/*
*----------------------------------------------------------------------
*
* BalloonGetChunkOrFallback --
*
* Attempt to find a "chunk" with a free slot to store locked page.
* If the allocation fails, use the previously allocated
* fallbackChunk.
*
* Results:
* A valid chunk.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static BalloonChunk *
BalloonGetChunkOrFallback(Balloon *b) // IN
{
BalloonChunk *chunk = BalloonGetChunk(b);
if (chunk == NULL) {
ASSERT(b->fallbackChunk != NULL);
chunk = b->fallbackChunk;
b->fallbackChunk = NULL;
DblLnkLst_LinkFirst(&b->chunks, &chunk->node);
b->nChunks++;
}
return chunk;
}
/*
*----------------------------------------------------------------------
*
* BalloonPageStore --
*
* Add "page" to the given "chunk".
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static void
BalloonPageStore(BalloonChunk *chunk, PageHandle page)
{
chunk->page[chunk->pageCount++] = page;
}
/*
*----------------------------------------------------------------------
*
* BalloonPageFree --
*
* Attempts to deallocate a physical page, deflating balloon "b".
* Never informs monitor.
*
* Results:
* Returns BALLOON_SUCCESS if successful, otherwise error code.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static int
BalloonPageFree(Balloon *b) // IN
{
BalloonChunk *chunk;
PageHandle page;
if (!DblLnkLst_IsLinked(&b->chunks)) {
/* The chunk list is empty, the balloon cannot be deflated */
return BALLOON_FAILURE;
}
chunk = DblLnkLst_Container(b->chunks.next, BalloonChunk, node);
/* deallocate last page */
page = chunk->page[--chunk->pageCount];
/* deallocate page */
OS_ReservedPageFree(page);
STATS_INC(b->stats.primFree);
/* update balloon size */
b->nPages--;
/* reclaim chunk, if empty */
if (chunk->pageCount == 0) {
/* destroy empty chunk */
DblLnkLst_Unlink1(&chunk->node);
BalloonChunk_Destroy(chunk);
/* update stats */
b->nChunks--;
}
return BALLOON_SUCCESS;
}
/*
*----------------------------------------------------------------------
*
* BalloonInflate --
*
* Attempts to allocate physical pages to inflate balloon.
*
* Results:
* Returns BALLOON_SUCCESS if successful, otherwise error code.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static int
BalloonInflate(Balloon *b, // IN
uint32 target) // IN
{
uint32 goal, nPages;
unsigned int rate;
unsigned int i;
unsigned int allocations = 0;
int status = 0;
BalloonPageAllocType allocType = BALLOON_PAGE_ALLOC_NOSLEEP;
/*
* First try NOSLEEP page allocations to inflate balloon.
*
* If we do not throttle nosleep allocations, we can drain all
* free pages in the guest quickly (if the balloon target is high).
* As a side-effect, draining free pages helps to inform (force)
* the guest to start swapping if balloon target is not met yet,
* which is a desired behavior. However, balloon driver can consume
* all available CPU cycles if too many pages are allocated in a
* second. Therefore, we throttle nosleep allocations even when
* the guest is not under memory pressure. OTOH, if we have already
* predicted that the guest is under memory pressure, then we
* slowdown page allocations considerably.
*/
goal = target - b->nPages;
/*
* Start with no sleep allocation rate which may be higher
* than sleeping allocation rate.
*/
rate = b->slowPageAllocationCycles ?
b->rateAlloc : BALLOON_NOSLEEP_ALLOC_MAX;
nPages = 0;
for (i = 0; i < goal; i++) {
PageHandle handle;
STATS_INC(b->stats.primAlloc[allocType]);
handle = OS_ReservedPageAlloc(allocType);
if (handle == PAGE_HANDLE_INVALID) {
STATS_INC(b->stats.primAllocFail[allocType]);
if (allocType == BALLOON_PAGE_ALLOC_CANSLEEP) {
/*
* CANSLEEP page allocation failed, so guest is under severe
* memory pressure. Quickly decrease allocation rate.
*/
b->rateAlloc = MAX(b->rateAlloc / 2, BALLOON_RATE_ALLOC_MIN);
break;
}
/*
* NOSLEEP page allocation failed, so the guest is under memory
* pressure. Let us slow down page allocations for next few cycles
* so that the guest gets out of memory pressure. Also, if we
* already allocated b->rateAlloc pages, let's pause, otherwise
* switch to sleeping allocations.
*/
b->slowPageAllocationCycles = SLOW_PAGE_ALLOCATION_CYCLES;
if (allocations >= b->rateAlloc) {
break;
}
allocType = BALLOON_PAGE_ALLOC_CANSLEEP;
/* Lower rate for sleeping allocations. */
rate = b->rateAlloc;
continue;
}
allocations++;
b->balloonOps->addPage(b, nPages++, handle);
if (nPages == b->batchMaxPages) {
status = b->balloonOps->lock(b, nPages);
nPages = 0;
if (status != BALLOON_SUCCESS) {
break;
}
}
if (allocations % BALLOON_ALLOC_YIELD_THRESHOLD == 0) {
OS_Yield();
}
if (allocations >= rate) {
/* We allocated enough pages, let's take a break. */
break;
}
}
if (nPages > 0) {
status = b->balloonOps->lock(b, nPages);
}
/*
* We reached our goal without failures so try increasing
* allocation rate.
*/
if (status == BALLOON_SUCCESS && i >= b->rateAlloc) {
unsigned int mult = i / b->rateAlloc;
b->rateAlloc = MIN(b->rateAlloc + mult * BALLOON_RATE_ALLOC_INC,
BALLOON_RATE_ALLOC_MAX);
}
/* release non-balloonable pages, succeed */
BalloonErrorPagesFree(b);
return BALLOON_SUCCESS;
}
/*
*----------------------------------------------------------------------
*
* BalloonLockBatched --
*
* Lock all the batched page, previously stored by
* BalloonAddPageBatched.
*
* Results:
* BALLOON_SUCCESS or an error code.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static int
BalloonLockBatched(Balloon *b, // IN
uint16 nPages) // IN
{
int status;
uint32 i;
uint32 nLockedPages;
PageHandle handle;
PPN64 batchPagePPN;
BalloonChunk *chunk = NULL;
batchPagePPN = PA_2_PPN(OS_ReservedPageGetPA(b->pageHandle));
/*
* Make sure that we will always have an available chunk before doing
* the LOCK_BATCHED call.
*/
ASSERT(b->batchMaxPages < BALLOON_CHUNK_PAGES);
b->fallbackChunk = BalloonChunk_Create();
if (b->fallbackChunk == NULL) {
status = BALLOON_PAGE_ALLOC_FAILURE;
} else {
status = Backdoor_MonitorLockPagesBatched(b, batchPagePPN, nPages);
}
if (status != BALLOON_SUCCESS) {
for (i = 0; i < nPages; i++) {
PA64 pa = Balloon_BatchGetPA(b->batchPage, i);
handle = OS_ReservedPageGetHandle(pa);
OS_ReservedPageFree(handle);
}
goto out;
}
nLockedPages = 0;
for (i = 0; i < nPages; i++) {
PA64 pa;
int error;
pa = Balloon_BatchGetPA(b->batchPage, i);
handle = OS_ReservedPageGetHandle(pa);
error = Balloon_BatchGetStatus(b->batchPage, i);
if (error != BALLOON_SUCCESS) {
switch (error) {
case BALLOON_ERROR_PPN_PINNED:
case BALLOON_ERROR_PPN_INVALID:
if (BalloonErrorPageStore(b, handle) == BALLOON_SUCCESS) {
break;
}
// Fallthrough.
case BALLOON_ERROR_RESET:
case BALLOON_ERROR_PPN_NOTNEEDED:
OS_ReservedPageFree(handle);
break;
default:
/*
* If we fall here, there is definitively a bug in the
* driver that needs to be fixed, I'm not sure if
* PINNED and INVALID PPN can be seen as a bug in the
* driver.
*/
ASSERT(FALSE);
}
continue;
}
if (chunk == NULL) {
chunk = BalloonGetChunkOrFallback(b);
}
BalloonPageStore(chunk, handle);
if (chunk->pageCount == BALLOON_CHUNK_PAGES) {
chunk = NULL;
}
nLockedPages++;
}
b->nPages += nLockedPages;
out:
if (b->fallbackChunk != NULL) {
BalloonChunk_Destroy(b->fallbackChunk);
b->fallbackChunk = NULL;
}
return status;
}
/*
*----------------------------------------------------------------------
*
* BalloonUnlockBatched --
*
* Unlock all the batched page, previously stored by
* BalloonAddPageBatched.
*
* Results:
* BALLOON_SUCCESS or an error code.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static int
BalloonUnlockBatched(Balloon *b, // IN
uint16 nPages) // IN
{
uint32 i;
int status = BALLOON_SUCCESS;
uint32 nUnlockedPages;
PPN64 batchPagePPN;
BalloonChunk *chunk = NULL;
batchPagePPN = PA_2_PPN(OS_ReservedPageGetPA(b->pageHandle));
status = Backdoor_MonitorUnlockPagesBatched(b, batchPagePPN, nPages);
if (status != BALLOON_SUCCESS) {
for (i = 0; i < nPages; i++) {
PA64 pa = Balloon_BatchGetPA(b->batchPage, i);
PageHandle handle = OS_ReservedPageGetHandle(pa);
chunk = BalloonGetChunkOrFallback(b);
BalloonPageStore(chunk, handle);
}
goto out;
}
nUnlockedPages = 0;
for (i = 0; i < nPages; i++) {
int status = Balloon_BatchGetStatus(b->batchPage, i);
PA64 pa = Balloon_BatchGetPA(b->batchPage, i);
PageHandle handle = OS_ReservedPageGetHandle(pa);
if (status != BALLOON_SUCCESS) {
chunk = BalloonGetChunkOrFallback(b);
BalloonPageStore(chunk, handle);
continue;
}
OS_ReservedPageFree(handle);
STATS_INC(b->stats.primFree);
nUnlockedPages++;
}
b->nPages -= nUnlockedPages;
out:
if (b->fallbackChunk != NULL) {
BalloonChunk_Destroy(b->fallbackChunk);
b->fallbackChunk = NULL;
}
return status;
}
/*
*----------------------------------------------------------------------
*
* BalloonAddPageBatched --
*
* Add a page to the batch page, that will be ballooned later.
*
* Results:
* Nothing.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static void
BalloonAddPageBatched(Balloon *b, // IN
uint16 idx, // IN
PageHandle page) // IN
{
PA64 pa = OS_ReservedPageGetPA(page);
Balloon_BatchSetPA(b->batchPage, idx, pa);
}
/*
*----------------------------------------------------------------------
*
* BalloonLock --
*
* Lock a page, previously stored with a call to BalloonAddPage,
* by notifying the monitor.
*
* Results:
* BALLOON_SUCCESS or an error code.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static int
BalloonLock(Balloon *b, // IN
uint16 nPages) // IN
{
PPN pagePPN;
BalloonChunk *chunk;
int status;
/* Get the chunk to store allocated page. */
chunk = BalloonGetChunk(b);
if (chunk == NULL) {
OS_ReservedPageFree(b->pageHandle);
status = BALLOON_PAGE_ALLOC_FAILURE;
goto out;
}
/* inform monitor via backdoor */
pagePPN = PA_2_PPN(OS_ReservedPageGetPA(b->pageHandle));
status = Backdoor_MonitorLockPage(b, pagePPN);
if (status != BALLOON_SUCCESS) {
int old_status = status;
if (status == BALLOON_ERROR_RESET ||
status == BALLOON_ERROR_PPN_NOTNEEDED) {
OS_ReservedPageFree(b->pageHandle);
goto out;
}
/* place on list of non-balloonable pages, retry allocation */
status = BalloonErrorPageStore(b, b->pageHandle);
if (status != BALLOON_SUCCESS) {
OS_ReservedPageFree(b->pageHandle);
goto out;
}
status = old_status;
goto out;
}
/* track allocated page */
BalloonPageStore(chunk, b->pageHandle);
/* update balloon size */
b->nPages++;
out:
b->pageHandle = PAGE_HANDLE_INVALID;
return status;
}
/*
*----------------------------------------------------------------------
*
* BalloonUnlock --
*
* Unlock a page, previously stored with a call to
* BalloonAddPage, by notifying the monitor.
*
* Results:
* BALLOON_SUCCESS or an error code.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static int
BalloonUnlock(Balloon *b, // IN
uint16 nPages) // IN
{
PPN pagePPN = PA_2_PPN(OS_ReservedPageGetPA(b->pageHandle));
int status = Backdoor_MonitorUnlockPage(b, pagePPN);
if (status != BALLOON_SUCCESS) {
BalloonChunk *chunk = BalloonGetChunkOrFallback(b);
BalloonPageStore(chunk, b->pageHandle);
goto out;
}
OS_ReservedPageFree(b->pageHandle);
STATS_INC(b->stats.primFree);
/* update balloon size */
b->nPages--;
out:
b->pageHandle = PAGE_HANDLE_INVALID;
if (b->fallbackChunk != NULL) {
BalloonChunk_Destroy(b->fallbackChunk);
b->fallbackChunk = NULL;
}
return status;
}
/*
*----------------------------------------------------------------------
*
* BalloonAddPage --
*
* Add a page to be ballooned later.
*
* Results:
* Nothing.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static void
BalloonAddPage(Balloon *b, // IN
uint16 idx, // IN
PageHandle page) // IN
{
ASSERT(b->pageHandle == PAGE_HANDLE_INVALID);
b->pageHandle = page;
}
/*
*----------------------------------------------------------------------
*
* BalloonDeflate --
*
* Frees physical pages to deflate balloon.
*
* Results:
* Returns BALLOON_SUCCESS if successful, otherwise error code.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static int
BalloonDeflate(Balloon *b, // IN
uint32 target) // IN
{
int status = BALLOON_SUCCESS;
uint32 goal, nPages;
BalloonChunk *chunk = NULL;
goal = b->nPages - target;
/* limit deallocation rate */
goal = MIN(goal, b->rateFree);
nPages = 0;
for ( ; goal > 0; goal--) {
PageHandle lockedHandle;
if (chunk == NULL) {
if (!DblLnkLst_IsLinked(&b->chunks)) {
/* The chunk list is empty, the balloon cannot be deflated */
status = BALLOON_FAILURE;
goto out;
}
chunk = DblLnkLst_Container(b->chunks.next, BalloonChunk, node);
}
lockedHandle = chunk->page[--chunk->pageCount];
if (!chunk->pageCount) {
DblLnkLst_Unlink1(&chunk->node);
/*
* Do not free the chunk, we may need it if the UNLOCK cmd fails
*/
b->fallbackChunk = chunk;
b->nChunks--;
chunk = NULL;
}
b->balloonOps->addPage(b, nPages++, lockedHandle);
if (nPages == b->batchMaxPages) {
status = b->balloonOps->unlock(b, nPages);
nPages = 0;
if (status != BALLOON_SUCCESS) {
if (BALLOON_RATE_ADAPT) {
/* quickly decrease rate if error */
b->rateFree = MAX(b->rateFree / 2, BALLOON_RATE_FREE_MIN);
}
goto out;
}
}
}
if (nPages) {
status = b->balloonOps->unlock(b, nPages);
}
if (status == BALLOON_SUCCESS && BALLOON_RATE_ADAPT) {
/* slowly increase rate if no errors */
b->rateFree = MIN(b->rateFree + BALLOON_RATE_FREE_INC,
BALLOON_RATE_FREE_MAX);
}
out:
return status;
}
/*
*----------------------------------------------------------------------
*
* BalloonAdjustSize --
*
* Attempts to allocate or deallocate physical pages in order
* to reach desired "target" size for balloon "b".
*
* Results:
* Returns BALLOON_SUCCESS if successful, otherwise error code.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static int
BalloonAdjustSize(Balloon *b, // IN
uint32 target) // IN
{
if (b->nPages < target) {
return BalloonInflate(b, target);
} else if (b->nPages > target) {
return BalloonDeflate(b, target);
} else {
/* already at target */
return BALLOON_SUCCESS;
}
}
/*
*----------------------------------------------------------------------
*
* Balloon_Init --
*
* Initializes state of the balloon.
*
* Results:
* Returns TRUE on success (always).
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
Bool
Balloon_Init(BalloonGuest guestType)
{
Balloon *b = &globalBalloon;
DblLnkLst_Init(&b->chunks);
b->guestType = guestType;
/* initialize rates */
b->rateAlloc = BALLOON_RATE_ALLOC_MAX;
b->rateFree = BALLOON_RATE_FREE_MAX;
/* initialize reset flag */
b->resetFlag = TRUE;
b->hypervisorCapabilities = 0;
b->pageHandle = PAGE_HANDLE_INVALID;
b->batchPageMapping = MAPPING_INVALID;
b->batchPage = NULL;
return TRUE;
}
/*
*----------------------------------------------------------------------
*
* Balloon_Cleanup --
*
* Cleanup after ballooning.
*
* Results:
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
void
Balloon_Cleanup(void)
{
Balloon *b = &globalBalloon;
/*
* Deallocate all reserved memory, and reset connection with monitor.
* Reset connection before deallocating memory to avoid potential for
* additional spurious resets from guest touching deallocated pages.
*/
Backdoor_MonitorStart(b, BALLOON_CAPABILITIES);
Balloon_Deallocate(b);
}
#ifdef __cplusplus
}
#endif