Initial export.
git-svn-id: http://v8.googlecode.com/svn/trunk@2 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
new file mode 100644
index 0000000..892e97a
--- /dev/null
+++ b/ChangeLog
@@ -0,0 +1,4 @@
+2008-07-03: Version 0.1.0 (125876)
+
+ Initial export.
+
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..2f61dc7
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,46 @@
+This license applies to all parts of V8 that are not externally
+maintained libraries. The externally maintained libraries used by V8
+are:
+
+ - Jscre, located under third_party/jscre. This code is copyrighted
+ by the University of Cambridge and Apple Inc. and released under a
+ 2-clause BSD license.
+
+ - Dtoa, located under third_party/dtoa. This code is copyrighted by
+ David M. Gay and released under an MIT license.
+
+ - Strongtalk assembler, the basis of the files assembler-arm-inl.h,
+ assembler-arm.cc, assembler-arm.h, assembler-ia32-inl.h,
+ assembler-ia32.cc, assembler-ia32.h, assembler.cc and assembler.h.
+ This code is copyrighted by Sun Microsystems Inc. and released
+ under a 3-clause BSD license.
+
+These libraries have their own licenses; we recommend you read them,
+as their terms may differ from the terms below.
+
+Copyright 2006-2008, Google Inc. All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/SConstruct b/SConstruct
new file mode 100644
index 0000000..81f2bf0
--- /dev/null
+++ b/SConstruct
@@ -0,0 +1,127 @@
+# Copyright 2008 Google Inc. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import platform
+import sys
+from os.path import join, dirname, abspath
+root_dir = dirname(File('SConstruct').rfile().abspath)
+sys.path.append(join(root_dir, 'tools'))
+import js2c
+
+
+def Abort(message):
+ print message
+ sys.exit(1)
+
+
+def GuessOS():
+ id = platform.system()
+ if id == 'Linux':
+ return 'linux'
+ elif id == 'Darwin':
+ return 'macos'
+ elif id == 'Windows':
+ return 'win32'
+ else:
+ Abort("Don't know how to build v8 for OS '%s'." % id)
+
+
+def GuessProcessor():
+ id = platform.machine()
+ if id.startswith('arm'):
+ return 'arm'
+ elif (not id) or id.startswith('x86'):
+ return 'ia32'
+ else:
+ Abort("Don't know how to build v8 for processor '%s'." % id)
+
+
+def GuessToolchain(os):
+ tools = Environment()['TOOLS']
+ if 'gcc' in tools:
+ if os == 'macos' and 'Kernel Version 8' in platform.version():
+ return 'gcc-darwin'
+ else:
+ return 'gcc'
+ elif 'msvc' in tools:
+ return 'msvc'
+ else:
+ tools = ', '.join(tools)
+ Abort("Don't know how to build v8 using these tools: %s" % tools)
+
+
+def GetOptions():
+ result = Options()
+ os_guess = GuessOS()
+ toolchain_guess = GuessToolchain(os_guess)
+ processor_guess = GuessProcessor()
+ result.Add('mode', 'debug or release', 'release')
+ result.Add('toolchain', 'the toolchain to use (gcc, gcc-darwin or msvc)', toolchain_guess)
+ result.Add('os', 'the os to build for (linux, macos or win32)', os_guess)
+ result.Add('processor', 'the processor to build for (arm or ia32)', processor_guess)
+ result.Add('snapshot', 'build using snapshots for faster start-up (on, off)', 'off')
+ result.Add('library', 'which type of library to produce (static, shared, default)', 'default')
+ return result
+
+
+def VerifyOptions(env):
+ if not env['mode'] in ['debug', 'release']:
+ Abort("Unknown build mode '%s'." % env['mode'])
+ if not env['toolchain'] in ['gcc', 'gcc-darwin', 'msvc']:
+ Abort("Unknown toolchain '%s'." % env['toolchain'])
+ if not env['os'] in ['linux', 'macos', 'win32']:
+ Abort("Unknown os '%s'." % env['os'])
+ if not env['processor'] in ['arm', 'ia32']:
+ Abort("Unknown processor '%s'." % env['processor'])
+ if not env['snapshot'] in ['on', 'off']:
+ Abort("Illegal value for option snapshot: '%s'." % env['snapshot'])
+ if not env['library'] in ['static', 'shared', 'default']:
+ Abort("Illegal value for option library: '%s'." % env['library'])
+
+
+def Start():
+ opts = GetOptions()
+ env = Environment(options=opts)
+ Help(opts.GenerateHelpText(env))
+ VerifyOptions(env)
+
+ os = env['os']
+ arch = env['processor']
+ toolchain = env['toolchain']
+ mode = env['mode']
+ use_snapshot = (env['snapshot'] == 'on')
+ library_type = env['library']
+
+ env.SConscript(
+ join('src', 'SConscript'),
+ build_dir=mode,
+ exports='toolchain arch os mode use_snapshot library_type',
+ duplicate=False
+ )
+
+
+Start()
diff --git a/public/debug.h b/public/debug.h
new file mode 100644
index 0000000..fce8eda
--- /dev/null
+++ b/public/debug.h
@@ -0,0 +1,114 @@
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef _V8_DEBUG
+#define _V8_DEBUG
+
+#include "v8.h"
+
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#define EXPORT __attribute__((visibility("default")))
+#else
+#define EXPORT
+#endif
+
+/**
+ * Debugger support for the V8 JavaScript engine.
+ */
+namespace v8 {
+
+// Debug events which can occour in the V8 JavaScript engine.
+enum DebugEvent {
+ Break = 1,
+ Exception = 2,
+ NewFunction = 3,
+ BeforeCompile = 4,
+ AfterCompile = 5,
+ PendingRequestProcessed = 6
+};
+
+
+/**
+ * Debug event callback function.
+ *
+ * \param event the debug event from which occoured (from the DebugEvent
+ * enumeration)
+ * \param exec_state execution state (JavaScript object)
+ * \param event_data event specific data (JavaScript object)
+ * \param data value passed by the user to AddDebugEventListener
+ */
+typedef void (*DebugEventCallback)(DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data,
+ Handle<Value> data);
+
+
+/**
+ * Debug message callback function.
+ *
+ * \param message the debug message
+ * \param length length of the message
+ */
+typedef void (*DebugMessageHandler)(const uint16_t* message, int length,
+ void* data);
+
+
+class EXPORT Debug {
+ public:
+ // Add a C debug event listener.
+ static bool AddDebugEventListener(DebugEventCallback that,
+ Handle<Value> data = Handle<Value>());
+
+ // Add a JavaScript debug event listener.
+ static bool AddDebugEventListener(v8::Handle<v8::Function> that,
+ Handle<Value> data = Handle<Value>());
+
+ // Remove a C debug event listener.
+ static void RemoveDebugEventListener(DebugEventCallback that);
+
+ // Remove a JavaScript debug event listener.
+ static void RemoveDebugEventListener(v8::Handle<v8::Function> that);
+
+ // Generate a stack dump.
+ static void StackDump();
+
+ // Break execution of JavaScript.
+ static void DebugBreak();
+
+ // Message based interface. The message protocol is JSON.
+ static void SetMessageHandler(DebugMessageHandler handler, void* data = NULL);
+ static void SendCommand(const uint16_t* command, int length);
+};
+
+
+} // namespace v8
+
+
+#undef EXPORT
+
+
+#endif // _V8_DEBUG
diff --git a/public/v8.h b/public/v8.h
new file mode 100644
index 0000000..891cb2e
--- /dev/null
+++ b/public/v8.h
@@ -0,0 +1,2086 @@
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/** \mainpage V8 API Reference Guide
+
+ Add text to introduce,
+
+ point back to code.google.com/apis/v8/index.html
+
+ etc etc etc
+ */
+#ifndef _V8
+#define _V8
+
+#include <stdio.h>
+
+#ifdef _WIN32
+typedef int int32_t;
+typedef unsigned int uint32_t;
+typedef unsigned short uint16_t;
+typedef long long int64_t;
+#else
+#include <stdint.h>
+#endif
+
+/**
+ * The v8 javascript engine.
+ */
+namespace v8 {
+
+class Context;
+class String;
+class Value;
+class Utils;
+class Number;
+class Object;
+class Array;
+class Int32;
+class Uint32;
+class External;
+class Primitive;
+class Boolean;
+class Integer;
+class Function;
+class Date;
+class ImplementationUtilities;
+class Signature;
+template <class T> class Handle;
+template <class T> class Local;
+template <class T> class Persistent;
+class FunctionTemplate;
+class ObjectTemplate;
+class Data;
+
+
+// --- W e a k H a n d l e s
+
+
+/**
+ * A weak reference callback function.
+ *
+ * \param object the weak global object to be reclaimed by the garbage collector
+ * \param parameter the value passed in when making the weak global object
+ */
+typedef void (*WeakReferenceCallback)(Persistent<Object> object,
+ void* parameter);
+
+
+// --- H a n d l e s ---
+
+#define TYPE_CHECK(T, S) \
+ while (false) { \
+ *(static_cast<T**>(0)) = static_cast<S*>(0); \
+ }
+
+/**
+ * An object reference managed by the v8 garbage collector.
+ *
+ * All objects returned from v8 have to be tracked by the garbage
+ * collector so that it knows that the objects are still alive. Also,
+ * because the garbage collector may move objects, it is unsafe to
+ * point directly to an object. Instead, all objects are stored in
+ * handles which are known by the garbage collector and updated
+ * whenever an object moves. Handles should always be passed by value
+ * (except in cases like out-parameters) and they should never be
+ * allocated on the heap.
+ *
+ * There are two types of handles: local and persistent handles.
+ * Local handles are light-weight and transient and typically used in
+ * local operations. They are managed by HandleScopes. Persistent
+ * handles can be used when storing objects across several independent
+ * operations and have to be explicitly deallocated when they're no
+ * longer used.
+ *
+ * It is safe to extract the object stored in the handle by
+ * dereferencing the handle (for instance, to extract the Object* from
+ * an Handle<Object>); the value will still be governed by a handle
+ * behind the scenes and the same rules apply to these values as to
+ * their handles.
+ */
+template <class T> class Handle {
+ public:
+
+ /**
+ * Creates an empty handle.
+ */
+ Handle();
+
+ /**
+ * Creates a new handle for the specified value.
+ */
+ explicit Handle(T* val) : val_(val) { }
+
+ /**
+ * Creates a handle for the contents of the specified handle. This
+ * constructor allows you to pass handles as arguments by value and
+ * assign between handles. However, if you try to assign between
+ * incompatible handles, for instance from a Handle<String> to a
+ * Handle<Number> it will cause a compiletime error. Assigning
+ * between compatible handles, for instance assigning a
+ * Handle<String> to a variable declared as Handle<Value>, is legal
+ * because String is a subclass of Value.
+ */
+ template <class S> inline Handle(Handle<S> that)
+ : val_(reinterpret_cast<T*>(*that)) {
+ /**
+ * This check fails when trying to convert between incompatible
+ * handles. For example, converting from a Handle<String> to a
+ * Handle<Number>.
+ */
+ TYPE_CHECK(T, S);
+ }
+
+ /**
+ * Returns true if the handle is empty.
+ */
+ bool IsEmpty() { return val_ == 0; }
+
+ T* operator->();
+
+ T* operator*();
+
+ /**
+ * Sets the handle to be empty. IsEmpty() will then return true.
+ */
+ void Clear() { this->val_ = 0; }
+
+ /**
+ * Checks whether two handles are the same.
+ * Returns true if both are empty, or if the objects
+ * to which they refer are identical.
+ * The handles' references are not checked.
+ */
+ template <class S> bool operator==(Handle<S> that) {
+ void** a = reinterpret_cast<void**>(**this);
+ void** b = reinterpret_cast<void**>(*that);
+ if (a == 0) return b == 0;
+ if (b == 0) return false;
+ return *a == *b;
+ }
+
+ /**
+ * Checks whether two handles are different.
+ * Returns true if only one of the handles is empty, or if
+ * the objects to which they refer are different.
+ * The handles' references are not checked.
+ */
+ template <class S> bool operator!=(Handle<S> that) {
+ return !operator==(that);
+ }
+
+ template <class S> static inline Handle<T> Cast(Handle<S> that) {
+ if (that.IsEmpty()) return Handle<T>();
+ return Handle<T>(T::Cast(*that));
+ }
+
+ private:
+ T* val_;
+};
+
+
+/**
+ * A light-weight stack-allocated object handle. All operations
+ * that return objects from within v8 return them in local handles. They
+ * are created within HandleScopes, and all local handles allocated within a
+ * handle scope are destroyed when the handle scope is destroyed. Hence it
+ * is not necessary to explicitly deallocate local handles.
+ */
+template <class T> class Local : public Handle<T> {
+ public:
+ Local();
+ template <class S> inline Local(Local<S> that)
+ : Handle<T>(reinterpret_cast<T*>(*that)) {
+ /**
+ * This check fails when trying to convert between incompatible
+ * handles. For example, converting from a Handle<String> to a
+ * Handle<Number>.
+ */
+ TYPE_CHECK(T, S);
+ }
+ template <class S> inline Local(S* that) : Handle<T>(that) { }
+ template <class S> static inline Local<T> Cast(Local<S> that) {
+ if (that.IsEmpty()) return Local<T>();
+ return Local<T>(T::Cast(*that));
+ }
+
+ /** Create a local handle for the content of another handle.
+ * The referee is kept alive by the local handle even when
+ * the original handle is destroyed/disposed.
+ */
+ static Local<T> New(Handle<T> that);
+};
+
+
+/**
+ * An object reference that is independent of any handle scope. Where
+ * a Local handle only lives as long as the HandleScope where it was
+ * allocated, a Persistent handle remains valid until it is explicitly
+ * disposed.
+ *
+ * A persistent handle contains a reference to a storage cell within
+ * the v8 engine which holds an object value and which is updated by
+ * the garbage collector whenever the object is moved. A new storage
+ * cell can be created using Persistent::New and existing handles can
+ * be disposed using Persistent::Dispose. Since persistent handles
+ * are passed by value you may have many persistent handle objects
+ * that point to the same storage cell. For instance, if you pass a
+ * persistent handle as an argument to a function you will not get two
+ * different storage cells but rather two references to the same
+ * storage cell.
+ */
+template <class T> class Persistent : public Handle<T> {
+ public:
+
+ /**
+ * Creates an empty persistent handle that doesn't point to any
+ * storage cell.
+ */
+ Persistent();
+
+ /**
+ * Creates a persistent handle for the same storage cell as the
+ * specified handle. This constructor allows you to pass persistent
+ * handles as arguments by value and to assign between persistent
+ * handles. However, if you try to assign between incompatible
+ * persistent handles, for instance from a Persistent<String> to a
+ * Persistent<Number> it will cause a compiletime error. Assigning
+ * between compatible persistent handles, for instance assigning a
+ * Persistent<String> to a variable declared as Persistent<Value>,
+ * is legal because String is a subclass of Value.
+ */
+ template <class S> inline Persistent(Persistent<S> that)
+ : Handle<T>(reinterpret_cast<T*>(*that)) {
+ /**
+ * This check fails when trying to convert between incompatible
+ * handles. For example, converting from a Handle<String> to a
+ * Handle<Number>.
+ */
+ TYPE_CHECK(T, S);
+ }
+
+ template <class S> inline Persistent(S* that) : Handle<T>(that) { }
+
+ template <class S> explicit inline Persistent(Handle<S> that)
+ : Handle<T>(*that) { }
+
+ template <class S> static inline Persistent<T> Cast(Persistent<S> that) {
+ if (that.IsEmpty()) return Persistent<T>();
+ return Persistent<T>(T::Cast(*that));
+ }
+
+ /**
+ * Creates a new persistent handle for an existing (local or
+ * persistent) handle.
+ */
+ static Persistent<T> New(Handle<T> that);
+
+ /**
+ * Releases the storage cell referenced by this persistent handle.
+ * Does not remove the reference to the cell from any handles.
+ * This handle's reference, and any any other references to the storage
+ * cell remain and IsEmpty will still return false.
+ */
+ void Dispose();
+
+ /**
+ * Make the reference to this object weak. When only weak handles
+ * refer to the object, the garbage collector will perform a
+ * callback to the given V8::WeakReferenceCallback function, passing
+ * it the object reference and the given parameters.
+ */
+ void MakeWeak(void* parameters, WeakReferenceCallback callback);
+
+ /** Clears the weak reference to this object.*/
+ void ClearWeak();
+
+ /**
+ *Checks if the handle holds the only reference to an object.
+ */
+ bool IsNearDeath();
+
+ /**
+ * Returns true if the handle's reference is weak.
+ */
+ bool IsWeak();
+
+ private:
+ friend class ImplementationUtilities;
+ friend class ObjectTemplate;
+};
+
+
+/**
+ * A stack-allocated class that governs a number of local handles.
+ * After a handle scope has been created, all local handles will be
+ * allocated within that handle scope until either the handle scope is
+ * deleted or another handle scope is created. If there is already a
+ * handle scope and a new one is created, all allocations will take
+ * place in the new handle scope until that is deleted. After that,
+ * new handles will again be allocated in the original handle scope.
+ *
+ * After the handle scope of a local handle has been deleted the
+ * garbage collector will no longer track the object stored in the
+ * handle and may deallocate it. The behavior of accessing a handle
+ * for which the handle scope has been deleted is undefined.
+ */
+class HandleScope {
+ public:
+ HandleScope() : previous_(current_), is_closed_(false) {
+ current_.extensions = 0;
+ }
+
+ ~HandleScope() {
+ // TODO(1245391): In a perfect world, there would be a way of not
+ // having to check for expl icitly closed scopes maybe through
+ // subclassing HandleScope?
+ if (!is_closed_) RestorePreviousState();
+ }
+
+ /**
+ * TODO(1245391): Consider introducing a subclass for this.
+ * Closes the handle scope and returns the value as a handle in the
+ * previous scope, which is the new current scope after the call.
+ */
+ template <class T> Local<T> Close(Handle<T> value);
+
+ /**
+ * Counts the number of allocated handles.
+ */
+ static int NumberOfHandles();
+
+ /**
+ * Creates a new handle with the given value.
+ */
+ static void** CreateHandle(void* value);
+
+ private:
+ // Make it impossible to create heap-allocated or illegal handle
+ // scopes by disallowing certain operations.
+ HandleScope(const HandleScope&);
+ void operator=(const HandleScope&);
+ void* operator new(size_t size);
+ void operator delete(void*, size_t);
+
+ class Data {
+ public:
+ int extensions;
+ void** next;
+ void** limit;
+ inline void Initialize() {
+ extensions = -1;
+ next = limit = NULL;
+ }
+ };
+
+ static Data current_;
+ const Data previous_;
+
+ /**
+ * Re-establishes the previous scope state. Should not be called for
+ * any other scope than the current scope and not more than once.
+ */
+ void RestorePreviousState() {
+ if (current_.extensions > 0) DeleteExtensions();
+ current_ = previous_;
+#ifdef DEBUG
+ ZapRange(current_.next, current_.limit);
+#endif
+ }
+
+ // TODO(1245391): Consider creating a subclass for this.
+ bool is_closed_;
+ void** RawClose(void** value);
+
+ /** Deallocates any extensions used by the current scope.*/
+ static void DeleteExtensions();
+
+#ifdef DEBUG
+ // Zaps the handles in the half-open interval [start, end).
+ static void ZapRange(void** start, void** end);
+#endif
+
+ friend class ImplementationUtilities;
+};
+
+
+// --- S p e c i a l o b j e c t s ---
+
+
+/**
+ * The superclass of values and API object templates.
+ */
+class Data {
+ private:
+ Data();
+};
+
+
+/**
+ * Pre-compilation data that can be associated with a script. This
+ * data can be calculated for a script in advance of actually
+ * compiling it, and stored between compilations. When script data
+ * is given to the compile method compilation will be faster.
+ */
+class ScriptData {
+ public:
+ virtual ~ScriptData() { }
+ static ScriptData* PreCompile(const char* input, int length);
+ static ScriptData* New(unsigned* data, int length);
+
+ virtual int Length() = 0;
+ virtual unsigned* Data() = 0;
+};
+
+
+/**
+ * The origin, within a file, of a script.
+ */
+class ScriptOrigin {
+ public:
+ ScriptOrigin(Handle<String> resource_name,
+ Handle<Integer> resource_line_offset = Handle<Integer>(),
+ Handle<Integer> resource_column_offset = Handle<Integer>())
+ : resource_name_(resource_name),
+ resource_line_offset_(resource_line_offset),
+ resource_column_offset_(resource_column_offset) { }
+ inline Handle<String> ResourceName();
+ inline Handle<Integer> ResourceLineOffset();
+ inline Handle<Integer> ResourceColumnOffset();
+ private:
+ Handle<String> resource_name_;
+ Handle<Integer> resource_line_offset_;
+ Handle<Integer> resource_column_offset_;
+};
+
+
+/**
+ * A compiled javascript script.
+ */
+class Script {
+ public:
+
+ /**
+ * Compiles the specified script. The ScriptOrigin* and ScriptData*
+ * parameters are owned by the caller of Script::Compile. No
+ * references to these objects are kept after compilation finishes.
+ */
+ static Local<Script> Compile(Handle<String> source,
+ ScriptOrigin* origin = NULL,
+ ScriptData* pre_data = NULL);
+
+ Local<Value> Run();
+};
+
+
+/**
+ * An error message.
+ */
+class Message {
+ public:
+ Local<String> Get();
+ Local<Value> GetSourceLine();
+
+ // TODO(1241256): Rewrite (or remove) this method. We don't want to
+ // deal with ownership of the returned string and we want to use
+ // javascript data structures exclusively.
+ char* GetUnderline(char* source_line, char underline_char);
+
+ Handle<String> GetScriptResourceName();
+
+ // TODO(1240903): Remove this when no longer used in WebKit V8
+ // bindings.
+ Handle<Value> GetSourceData();
+
+ int GetLineNumber();
+
+ // TODO(1245381): Print to a string instead of on a FILE.
+ static void PrintCurrentStackTrace(FILE* out);
+};
+
+
+// --- V a l u e ---
+
+
+/**
+ * The superclass of all javascript values and objects.
+ */
+class Value : public Data {
+ public:
+
+ /**
+ * Returns true if this value is the undefined value. See ECMA-262
+ * 4.3.10.
+ */
+ bool IsUndefined();
+
+ /**
+ * Returns true if this value is the null value. See ECMA-262
+ * 4.3.11.
+ */
+ bool IsNull();
+
+ /**
+ * Returns true if this value is true.
+ */
+ bool IsTrue();
+
+ /**
+ * Returns true if this value is false.
+ */
+ bool IsFalse();
+
+ /**
+ * Returns true if this value is an instance of the String type.
+ * See ECMA-262 8.4.
+ */
+ bool IsString();
+
+ /**
+ * Returns true if this value is a function.
+ */
+ bool IsFunction();
+
+ /**
+ * Returns true if this value is an array.
+ */
+ bool IsArray();
+
+ /**
+ * Returns true if this value is an object.
+ */
+ bool IsObject();
+
+ /**
+ * Returns true if this value is boolean.
+ */
+ bool IsBoolean();
+
+ /**
+ * Returns true if this value is a number.
+ */
+ bool IsNumber();
+
+ /**
+ * Returns true if this value is external.
+ */
+ bool IsExternal();
+
+ /**
+ * Returns true if this value is a 32-bit signed integer.
+ */
+ bool IsInt32();
+
+ Local<Boolean> ToBoolean();
+ Local<Number> ToNumber();
+ Local<String> ToString();
+ Local<String> ToDetailString();
+ Local<Object> ToObject();
+ Local<Integer> ToInteger();
+ Local<Uint32> ToUint32();
+ Local<Int32> ToInt32();
+
+ /**
+ * Attempts to convert a string to an array index.
+ * Returns an empty handle if the conversion fails.
+ */
+ Local<Uint32> ToArrayIndex();
+
+ bool BooleanValue();
+ double NumberValue();
+ int64_t IntegerValue();
+ uint32_t Uint32Value();
+ int32_t Int32Value();
+
+ /** JS == */
+ bool Equals(Handle<Value> that);
+ bool StrictEquals(Handle<Value> that);
+};
+
+
+/**
+ * The superclass of primitive values. See ECMA-262 4.3.2.
+ */
+class Primitive : public Value { };
+
+
+/**
+ * A primitive boolean value (ECMA-262, 4.3.14). Either the true
+ * or false value.
+ */
+class Boolean : public Primitive {
+ public:
+ bool Value();
+ static inline Handle<Boolean> New(bool value);
+};
+
+
+/**
+ * A javascript string value (ECMA-262, 4.3.17).
+ */
+class String : public Primitive {
+ public:
+ int Length();
+
+ /**
+ * Write the contents of the string to an external buffer.
+ * If no arguments are given, expects that buffer is large
+ * enough to hold the entire string and NULL terminator. Copies
+ * the contents of the string and the NULL terminator into
+ * buffer.
+ *
+ * Copies up to length characters into the output buffer.
+ * Only null-terminates if there is enough space in the buffer.
+ *
+ * \param buffer The buffer into which the string will be copied.
+ * \param start The starting position within the string at which
+ * copying begins.
+ * \param length The number of bytes to copy from the string.
+ * \return The number of characters copied to the buffer
+ * excluding the NULL terminator.
+ */
+ int Write(uint16_t* buffer, int start = 0, int length = -1); // UTF-16
+ int WriteAscii(char* buffer,
+ int start = 0,
+ int length = -1); // literally ascii
+
+ /**
+ * Returns true if the string is external
+ */
+ bool IsExternal();
+
+ /**
+ * Returns true if the string is both external and ascii
+ */
+ bool IsExternalAscii();
+ /**
+ * An ExternalStringResource is a wrapper around a two-byte string
+ * buffer that resides outside the V8's heap. Implement an
+ * ExternalStringResource to manage the life cycle of the underlying
+ * buffer.
+ */
+ class ExternalStringResource {
+ public:
+ /**
+ * Override the destructor to manage the life cycle of the underlying
+ * buffer.
+ */
+ virtual ~ExternalStringResource() {}
+ /** The string data from the underlying buffer.*/
+ virtual const uint16_t* data() const = 0;
+ /** The length of the string. That is, the number of two-byte characters.*/
+ virtual size_t length() const = 0;
+ protected:
+ ExternalStringResource() {}
+ private:
+ ExternalStringResource(const ExternalStringResource&);
+ void operator=(const ExternalStringResource&);
+ };
+
+ /**
+ * An ExternalAsciiStringResource is a wrapper around an ascii
+ * string buffer that resides outside V8's heap. Implement an
+ * ExternalAsciiStringResource to manage the life cycle of the
+ * underlying buffer.
+ */
+
+ class ExternalAsciiStringResource {
+ public:
+ /**
+ * Override the destructor to manage the life cycle of the underlying
+ * buffer.
+ */
+ virtual ~ExternalAsciiStringResource() {}
+ /** The string data from the underlying buffer.*/
+ virtual const char* data() const = 0;
+ /** The number of ascii characters in the string.*/
+ virtual size_t length() const = 0;
+ protected:
+ ExternalAsciiStringResource() {}
+ private:
+ ExternalAsciiStringResource(const ExternalAsciiStringResource&);
+ void operator=(const ExternalAsciiStringResource&);
+ };
+
+ /**
+ * Get the ExternalStringResource for an external string. Only
+ * valid if IsExternal() returns true.
+ */
+ ExternalStringResource* GetExternalStringResource();
+
+ /**
+ * Get the ExternalAsciiStringResource for an external ascii string.
+ * Only valid if IsExternalAscii() returns true.
+ */
+ ExternalAsciiStringResource* GetExternalAsciiStringResource();
+
+ static String* Cast(v8::Value* obj);
+
+ /**
+ * Allocates a new string from either utf-8 encoded or ascii data.
+ * The second parameter 'length' gives the buffer length.
+ * If the data is utf-8 encoded, the caller must
+ * be careful to supply the length parameter.
+ * If it is not given, the function calls
+ * 'strlen' to determine the buffer length, it might be
+ * wrong if '\0' character is in the 'data'.
+ */
+ static Local<String> New(const char* data, int length = -1);
+
+ /** Allocates a new string from utf16 data.*/
+ static Local<String> New(const uint16_t* data, int length = -1);
+
+ /** Creates a symbol. Returns one if it exists already.*/
+ static Local<String> NewSymbol(const char* data, int length = -1);
+
+ /**
+ * Creates a new external string using the data defined in the given
+ * resource. The resource is deleted when the external string is no
+ * longer live on V8's heap. The caller of this function should not
+ * delete or modify the resource. Neither should the underlying buffer be
+ * deallocated or modified except through the destructor of the
+ * external string resource.
+ */
+ static Local<String> NewExternal(ExternalStringResource* resource);
+
+ /**
+ * Creates a new external string using the ascii data defined in the given
+ * resource. The resource is deleted when the external string is no
+ * longer live on V8's heap. The caller of this function should not
+ * delete or modify the resource. Neither should the underlying buffer be
+ * deallocated or modified except through the destructor of the
+ * external string resource.
+ */
+ static Local<String> NewExternal(ExternalAsciiStringResource* resource);
+
+ /** Creates an undetectable string from the supplied character.*/
+ static Local<String> NewUndetectable(const char* data, int length = -1);
+
+ /** Creates an undetectable string from the supplied unsigned integer.*/
+ static Local<String> NewUndetectable(const uint16_t* data, int length = -1);
+
+ /**
+ * Converts an object to an ascii string.
+ * Useful if you want to print the object.
+ */
+ class AsciiValue {
+ public:
+ explicit AsciiValue(Handle<v8::Value> obj);
+ ~AsciiValue();
+ char* operator*() { return str_; }
+ private:
+ char* str_;
+ };
+
+ /**
+ * Converts an object to a two-byte string.
+ */
+ class Value {
+ public:
+ explicit Value(Handle<v8::Value> obj);
+ ~Value();
+ uint16_t* operator*() { return str_; }
+ private:
+ uint16_t* str_;
+ };
+};
+
+
+/**
+ * A javascript number value (ECMA-262, 4.3.20)
+ */
+class Number : public Primitive {
+ public:
+ double Value();
+ static Local<Number> New(double value);
+ static Number* Cast(v8::Value* obj);
+ private:
+ Number();
+};
+
+
+/**
+ * A javascript value representing a signed integer.
+ */
+class Integer : public Number {
+ public:
+ static Local<Integer> New(int32_t value);
+ int64_t Value();
+ static Integer* Cast(v8::Value* obj);
+ private:
+ Integer();
+};
+
+
+/**
+ * A javascript value representing a 32-bit signed integer.
+ */
+class Int32 : public Integer {
+ public:
+ int32_t Value();
+ private:
+ Int32();
+};
+
+
+/**
+ * A javascript value representing a 32-bit unsigned integer.
+ */
+class Uint32 : public Integer {
+ public:
+ uint32_t Value();
+ private:
+ Uint32();
+};
+
+
+/**
+ * An instance of the built-in Date constructor (ECMA-262, 15.9).
+ */
+class Date : public Value {
+ public:
+ static Local<Value> New(double time);
+};
+
+
+enum PropertyAttribute {
+ None = 0,
+ ReadOnly = 1 << 0,
+ DontEnum = 1 << 1,
+ DontDelete = 1 << 2
+};
+
+/**
+ * A javascript object (ECMA-262, 4.3.3)
+ */
+class Object : public Value {
+ public:
+ bool Set(Handle<Value> key,
+ Handle<Value> value,
+ PropertyAttribute attribs = None);
+ Local<Value> Get(Handle<Value> key);
+
+ // TODO(1245389): Replace the type-specific versions of these
+ // functions with generic ones that accept a Handle<Value> key.
+ bool Has(Handle<String> key);
+ bool Delete(Handle<String> key);
+ bool Has(uint32_t index);
+ bool Delete(uint32_t index);
+
+ /**
+ * Get the prototype object. This does not skip objects marked to
+ * be skipped by __proto__ and it does not consult the security
+ * handler.
+ */
+ Local<Value> GetPrototype();
+
+ /**
+ * Call builtin Object.prototype.toString on this object.
+ * This is different from Value::ToString() that may call
+ * user-defined toString function. This one does not.
+ */
+ Local<String> ObjectProtoToString();
+
+ // TODO(1245384): Naming, consistent.
+ int InternalFieldCount();
+ Local<Value> GetInternal(int index);
+ void SetInternal(int index, Handle<Value> value);
+
+ // Testers for local properties.
+ bool HasRealNamedProperty(Handle<String> key);
+ bool HasRealIndexedProperty(uint32_t index);
+ bool HasRealNamedCallbackProperty(Handle<String> key);
+
+ /**
+ * If result.IsEmpty() no real property was located in the prototype chain.
+ * This means interceptors in the prototype chain are not called.
+ */
+ Handle<Value> GetRealNamedPropertyInPrototypeChain(Handle<String> key);
+
+ /** Tests for a named lookup interceptor.*/
+ bool HasNamedLookupInterceptor();
+
+ /** Tests for an index lookup interceptor.*/
+ bool HasIndexedLookupInterceptor();
+
+
+ static Local<Object> New();
+ static Object* Cast(Value* obj);
+ private:
+ Object();
+};
+
+
+/**
+ * An instance of the built-in array constructor (ECMA-262, 15.4.2).
+ */
+class Array : public Object {
+ public:
+ uint32_t Length();
+
+ static Local<Array> New(int length = 0);
+ static Array* Cast(Value* obj);
+ private:
+ Array();
+};
+
+
+/**
+ * A javascript function object (ECMA-262, 15.3).
+ */
+class Function : public Object {
+ public:
+ Local<Object> NewInstance();
+ Local<Object> NewInstance(int argc, Handle<Value> argv[]);
+ Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]);
+ void SetName(Handle<String> name);
+ Handle<Value> GetName();
+ static Function* Cast(Value* obj);
+ private:
+ Function();
+};
+
+
+/**
+ * A javascript value that wraps a c++ void*. This type of value is
+ * mainly used to associate c++ data structures with javascript
+ * objects.
+ */
+class External : public Value {
+ public:
+ static Local<External> New(void* value);
+ static External* Cast(Value* obj);
+ void* Value();
+ private:
+ External();
+};
+
+
+// --- T e m p l a t e s ---
+
+
+/**
+ * The superclass of object and function templates.
+ */
+class Template : public Data {
+ public:
+ /** Adds a property to each instance created by this template.*/
+ void Set(Handle<String> name, Handle<Data> value,
+ PropertyAttribute attributes = None);
+ inline void Set(const char* name, Handle<Data> value);
+ private:
+ Template();
+
+ friend class ObjectTemplate;
+ friend class FunctionTemplate;
+};
+
+
+/**
+ * The argument information given to function call callbacks. This
+ * class provides access to information about context of the call,
+ * including the receiver, the number and values of arguments, and
+ * the holder of the function.
+ */
+class Arguments {
+ public:
+ inline int Length() const;
+ inline Local<Value> operator[](int i) const;
+ inline Local<Function> Callee() const;
+ inline Local<Object> This() const;
+ inline Local<Object> Holder() const;
+ inline bool IsConstructCall() const;
+ inline Local<Value> Data() const;
+ private:
+ Arguments();
+ friend class ImplementationUtilities;
+ inline Arguments(Local<Value> data,
+ Local<Object> holder,
+ Local<Function> callee,
+ bool is_construct_call,
+ void** values, int length);
+ Local<Value> data_;
+ Local<Object> holder_;
+ Local<Function> callee_;
+ bool is_construct_call_;
+ void** values_;
+ int length_;
+};
+
+
+/**
+ * The information passed to an accessor callback about the context
+ * of the property access.
+ */
+class AccessorInfo {
+ public:
+ inline AccessorInfo(Local<Object> self,
+ Local<Value> data,
+ Local<Object> holder)
+ : self_(self), data_(data), holder_(holder) { }
+ inline Local<Value> Data() const;
+ inline Local<Object> This() const;
+ inline Local<Object> Holder() const;
+ private:
+ Local<Object> self_;
+ Local<Value> data_;
+ Local<Object> holder_;
+};
+
+
+typedef Handle<Value> (*InvocationCallback)(const Arguments& args);
+
+typedef int (*LookupCallback)(Local<Object> self, Local<String> name);
+
+/**
+ * Accessor[Getter|Setter] are used as callback functions when
+ * setting|getting a particular property. See objectTemplate::SetAccessor.
+ */
+typedef Handle<Value> (*AccessorGetter)(Local<String> property,
+ const AccessorInfo& info);
+
+
+typedef void (*AccessorSetter)(Local<String> property,
+ Local<Value> value,
+ const AccessorInfo& info);
+
+
+/**
+ * NamedProperty[Getter|Setter] are used as interceptors on object.
+ * See ObjectTemplate::SetNamedPropertyHandler.
+ */
+typedef Handle<Value> (*NamedPropertyGetter)(Local<String> property,
+ const AccessorInfo& info);
+
+
+/**
+ * Returns the value if the setter intercepts the request.
+ * Otherwise, returns an empty handle.
+ */
+typedef Handle<Value> (*NamedPropertySetter)(Local<String> property,
+ Local<Value> value,
+ const AccessorInfo& info);
+
+
+/**
+ * Returns a non-empty handle if the interceptor intercepts the request.
+ * The result is true to indicate the property is found.
+ */
+typedef Handle<Boolean> (*NamedPropertyQuery)(Local<String> property,
+ const AccessorInfo& info);
+
+
+/**
+ * Returns a non-empty handle if the deleter intercepts the request.
+ * Otherwise, the return value is the value of deleted expression.
+ */
+typedef Handle<Boolean> (*NamedPropertyDeleter)(Local<String> property,
+ const AccessorInfo& info);
+
+/**
+ * TODO(758124): Add documentation?
+ */
+typedef Handle<Array> (*NamedPropertyEnumerator)(const AccessorInfo& info);
+
+/**
+ * TODO(758124): Add documentation?
+ */
+typedef Handle<Value> (*IndexedPropertyGetter)(uint32_t index,
+ const AccessorInfo& info);
+
+
+/**
+ * Returns the value if the setter intercepts the request.
+ * Otherwise, returns an empty handle.
+ */
+typedef Handle<Value> (*IndexedPropertySetter)(uint32_t index,
+ Local<Value> value,
+ const AccessorInfo& info);
+
+
+/**
+ * Returns a non-empty handle if the interceptor intercepts the request.
+ * The result is true to indicate the property is found.
+ */
+typedef Handle<Boolean> (*IndexedPropertyQuery)(uint32_t index,
+ const AccessorInfo& info);
+
+/**
+ * Returns a non-empty handle if the deleter intercepts the request.
+ * Otherwise, the return value is the value of deleted expression.
+ */
+typedef Handle<Boolean> (*IndexedPropertyDeleter)(uint32_t index,
+ const AccessorInfo& info);
+
+
+typedef Handle<Array> (*IndexedPropertyEnumerator)(const AccessorInfo& info);
+
+
+/**
+ * TODO(758124): Clarify documentation? Determines whether host
+ * objects can read or write an accessor? (What does the default
+ * allow? Both or neither?) If a host object needs access check and
+ * the check failed, some properties (accessors created by API) are
+ * still accessible. Such properties have AccessControl to allow read
+ * or write.
+ */
+enum AccessControl {
+ DEFAULT = 0,
+ ALL_CAN_READ = 1,
+ ALL_CAN_WRITE = 2
+};
+
+
+/**
+ * Undocumented security features.
+ */
+enum AccessType {
+ ACCESS_GET,
+ ACCESS_SET,
+ ACCESS_HAS,
+ ACCESS_DELETE,
+ ACCESS_KEYS
+};
+
+typedef bool (*NamedSecurityCallback)(Local<Object> global,
+ Local<Value> key,
+ AccessType type,
+ Local<Value> data);
+
+typedef bool (*IndexedSecurityCallback)(Local<Object> global,
+ uint32_t index,
+ AccessType type,
+ Local<Value> data);
+
+
+/**
+ * TODO(758124): Make sure this documentation is up to date.
+ *
+ * A FunctionTemplate is used to create functions at runtime. There can only be
+ * ONE function created in an environment.
+ *
+ * A FunctionTemplate can have properties, these properties are added to the
+ * function object which it is created.
+ *
+ * A FunctionTemplate has a corresponding instance template which is used to
+ * create object instances when the function used as a constructor. Properties
+ * added to the instance template are added to each object instance.
+ *
+ * A FunctionTemplate can have a prototype template. The prototype template
+ * is used to create the prototype object of the function.
+ *
+ * Following example illustrates relationship between FunctionTemplate and
+ * various pieces:
+ *
+ * v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+ * t->Set("func_property", v8::Number::New(1));
+ *
+ * v8::Local<v8::Template> proto_t = t->PrototypeTemplate();
+ * proto_t->Set("proto_method", v8::FunctionTemplate::New(InvokeCallback));
+ * proto_t->Set("proto_const", v8::Number::New(2));
+ *
+ * v8::Local<v8::ObjectTemplate> instance_t = t->InstanceTemplate();
+ * instance_t->SetAccessor("instance_accessor", InstanceAccessorCallback);
+ * instance_t->SetNamedPropertyHandler(PropertyHandlerCallback, ...);
+ * instance_t->Set("instance_property", Number::New(3));
+ *
+ * v8::Local<v8::Function> function = t->GetFunction();
+ * v8::Local<v8::Object> instance = function->NewInstance();
+ *
+ * Let's use "function" as the JS variable name of the function object
+ * and "instance" for the instance object created above, the following
+ * JavaScript statements hold:
+ *
+ * func_property in function == true
+ * function.func_property == 1
+ *
+ * function.prototype.proto_method() invokes 'callback'
+ * function.prototype.proto_const == 2
+ *
+ * instance instanceof function == true
+ * instance.instance_accessor calls InstanceAccessorCallback
+ * instance.instance_property == 3
+ *
+ *
+ * Inheritance:
+ *
+ * A FunctionTemplate can inherit from another one by calling Inherit method.
+ * Following graph illustrates the semantic of inheritance:
+ *
+ * FunctionTemplate Parent -> Parent() . prototype -> { }
+ * ^ ^
+ * | Inherit(Parent) | .__proto__
+ * | |
+ * FunctionTemplate Child -> Child() . prototype -> { }
+ *
+ * A FunctionTemplate 'Child' inherits from 'Parent', the prototype object
+ * of Child() function has __proto__ pointing to Parent() function's prototype
+ * object. An instance of Child function has all properties on parents'
+ * instance templates.
+ *
+ * Let Parent be the FunctionTemplate initialized in previous section and
+ * create a Child function template by:
+ *
+ * Local<FunctionTemplate> parent = t;
+ * Local<FunctionTemplate> child = FunctionTemplate::New();
+ * child->Inherit(parent);
+ *
+ * Local<Function> child_function = child->GetFunction();
+ * Local<Object> child_instance = child_function->NewInstance();
+ *
+ * The following JS code holds:
+ * child_func.prototype.__proto__ == function.prototype;
+ * child_instance.instance_accessor calls InstanceAccessorCallback
+ * child_instance.instance_property == 3;
+ */
+class FunctionTemplate : public Template {
+ public:
+ /** Creates a function template.*/
+ static Local<FunctionTemplate> New(InvocationCallback callback = 0,
+ Handle<Value> data = Handle<Value>(),
+ Handle<Signature> signature =
+ Handle<Signature>());
+ /** Returns the unique function instance in the current execution context.*/
+ Local<Function> GetFunction();
+
+ void SetCallHandler(InvocationCallback callback,
+ Handle<Value> data = Handle<Value>());
+ void SetLookupHandler(LookupCallback handler);
+
+ Local<ObjectTemplate> InstanceTemplate();
+
+ /** Causes the function template to inherit from a parent function template.*/
+ void Inherit(Handle<FunctionTemplate> parent);
+
+ /**
+ * A PrototypeTemplate is the template used to create the prototype object
+ * of the function created by this template.
+ */
+ Local<ObjectTemplate> PrototypeTemplate();
+
+ int InternalFieldCount();
+
+ /** Sets the number of internal fields on the object template.*/
+ void SetInternalFieldCount(int value);
+
+ void SetClassName(Handle<String> name);
+
+ /**
+ * Determines whether the __proto__ accessor ignores instances of the function template.
+ * Call with a value of true to make the __proto__ accessor ignore instances of the function template.
+ * Call with a value of false to make the __proto__ accessor not ignore instances of the function template.
+ * By default, instances of a function template are not ignored.
+ * TODO(758124): What does "not ignored" mean?
+ */
+ void SetHiddenPrototype(bool value);
+
+ /**
+ * Returns true if the given object is an instance of this function template.
+ */
+ bool HasInstance(Handle<Value> object);
+
+ private:
+ FunctionTemplate();
+ void AddInstancePropertyAccessor(Handle<String> name,
+ AccessorGetter getter,
+ AccessorSetter setter,
+ Handle<Value> data,
+ AccessControl settings,
+ PropertyAttribute attributes);
+ void SetNamedInstancePropertyHandler(NamedPropertyGetter getter,
+ NamedPropertySetter setter,
+ NamedPropertyQuery query,
+ NamedPropertyDeleter remover,
+ NamedPropertyEnumerator enumerator,
+ Handle<Value> data);
+ void SetIndexedInstancePropertyHandler(IndexedPropertyGetter getter,
+ IndexedPropertySetter setter,
+ IndexedPropertyQuery query,
+ IndexedPropertyDeleter remover,
+ IndexedPropertyEnumerator enumerator,
+ Handle<Value> data);
+ void SetInstanceCallAsFunctionHandler(InvocationCallback callback,
+ Handle<Value> data);
+
+ friend class Context;
+ friend class ObjectTemplate;
+};
+
+
+/**
+ * ObjectTemplate: (TODO(758124): Add comments.)
+ */
+class ObjectTemplate : public Template {
+ public:
+ static Local<ObjectTemplate> New();
+ /** Creates a new instance of this template.*/
+ Local<Object> NewInstance();
+
+ /**
+ * Sets an accessor on the object template.
+ * /param name (TODO(758124): Describe)
+ * /param getter (TODO(758124): Describe)
+ * /param setter (TODO(758124): Describe)
+ * /param data ((TODO(758124): Describe)
+ * /param settings settings must be one of:
+ * DEFAULT = 0, ALL_CAN_READ = 1, or ALL_CAN_WRITE = 2
+ * /param attribute (TODO(758124): Describe)
+ */
+ void SetAccessor(Handle<String> name,
+ AccessorGetter getter,
+ AccessorSetter setter = 0,
+ Handle<Value> data = Handle<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None);
+
+ /**
+ * Sets a named property handler on the object template.
+ * /param getter (TODO(758124): Describe)
+ * /param setter (TODO(758124): Describe)
+ * /param query (TODO(758124): Describe)
+ * /param deleter (TODO(758124): Describe)
+ * /param enumerator (TODO(758124): Describe)
+ * /param data (TODO(758124): Describe)
+ */
+ void SetNamedPropertyHandler(NamedPropertyGetter getter,
+ NamedPropertySetter setter = 0,
+ NamedPropertyQuery query = 0,
+ NamedPropertyDeleter deleter = 0,
+ NamedPropertyEnumerator enumerator = 0,
+ Handle<Value> data = Handle<Value>());
+
+ /**
+ * Sets an indexed property handler on the object template.
+ * /param getter (TODO(758124): Describe)
+ * /param setter (TODO(758124): Describe)
+ * /param query (TODO(758124): Describe)
+ * /param deleter (TODO(758124): Describe)
+ * /param enumerator (TODO(758124): Describe)
+ * /param data (TODO(758124): Describe)
+ */
+ void SetIndexedPropertyHandler(IndexedPropertyGetter getter,
+ IndexedPropertySetter setter = 0,
+ IndexedPropertyQuery query = 0,
+ IndexedPropertyDeleter deleter = 0,
+ IndexedPropertyEnumerator enumerator = 0,
+ Handle<Value> data = Handle<Value>());
+ /**
+ * Sets the callback to be used when calling instances created from
+ * this template as a function. If no callback is set, instances
+ * behave like normal javascript objects that cannot be called as a
+ * function.
+ */
+ void SetCallAsFunctionHandler(InvocationCallback callback,
+ Handle<Value> data = Handle<Value>());
+
+ /** Make object instances of the template as undetectable.*/
+ void MarkAsUndetectable();
+
+ /** TODO(758124): Clarify documentation: Object instances of the
+ * template need access check.*/
+ void SetAccessCheckCallbacks(NamedSecurityCallback named_handler,
+ IndexedSecurityCallback indexed_handler,
+ Handle<Value> data = Handle<Value>());
+
+ private:
+ ObjectTemplate();
+ static Local<ObjectTemplate> New(Handle<FunctionTemplate> constructor);
+ friend class FunctionTemplate;
+};
+
+
+/**
+ * A function signature which specifies which receivers and arguments
+ * in can legally be called with.
+ */
+class Signature : public Data {
+ public:
+ static Local<Signature> New(Handle<FunctionTemplate> receiver =
+ Handle<FunctionTemplate>(),
+ int argc = 0,
+ Handle<FunctionTemplate> argv[] = 0);
+ private:
+ Signature();
+};
+
+
+/**
+ * A utility for determining the type of objects based on which
+ * template they were constructed from.
+ */
+class TypeSwitch : public Data {
+ public:
+ static Local<TypeSwitch> New(Handle<FunctionTemplate> type);
+ static Local<TypeSwitch> New(int argc, Handle<FunctionTemplate> types[]);
+ int match(Handle<Value> value);
+ private:
+ TypeSwitch();
+};
+
+
+// --- E x t e n s i o n s ---
+
+
+/**
+ * Ignore
+ */
+class Extension {
+ public:
+ Extension(const char* name,
+ const char* source = 0,
+ int dep_count = 0,
+ const char** deps = 0);
+ virtual ~Extension() { }
+ virtual v8::Handle<v8::FunctionTemplate>
+ GetNativeFunction(v8::Handle<v8::String> name) {
+ return v8::Handle<v8::FunctionTemplate>();
+ }
+
+ const char* name() { return name_; }
+ const char* source() { return source_; }
+ int dependency_count() { return dep_count_; }
+ const char** dependencies() { return deps_; }
+ void set_auto_enable(bool value) { auto_enable_ = value; }
+ bool auto_enable() { return auto_enable_; }
+
+ private:
+ const char* name_;
+ const char* source_;
+ int dep_count_;
+ const char** deps_;
+ bool auto_enable_;
+};
+
+
+void RegisterExtension(Extension* extension);
+
+
+/**
+ * Ignore
+ */
+class DeclareExtension {
+ public:
+ inline DeclareExtension(Extension* extension) {
+ RegisterExtension(extension);
+ }
+};
+
+
+// --- S t a t i c s ---
+
+
+Handle<Primitive> Undefined();
+Handle<Primitive> Null();
+Handle<Boolean> True();
+Handle<Boolean> False();
+
+
+/**
+ * A set of constraints that specifies the limits of the runtime's
+ * memory use.
+ */
+class ResourceConstraints {
+ public:
+ ResourceConstraints();
+ int max_young_space_size() { return max_young_space_size_; }
+ void set_max_young_space_size(int value) { max_young_space_size_ = value; }
+ int max_old_space_size() { return max_old_space_size_; }
+ void set_max_old_space_size(int value) { max_old_space_size_ = value; }
+ uint32_t* stack_limit() { return stack_limit_; }
+ void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
+ private:
+ int max_young_space_size_;
+ int max_old_space_size_;
+ uint32_t* stack_limit_;
+};
+
+
+bool SetResourceConstraints(ResourceConstraints* constraints);
+
+
+// --- E x c e p t i o n s ---
+
+
+typedef void (*FatalErrorCallback)(const char* location, const char* message);
+
+
+typedef void (*MessageCallback)(Handle<Message> message, Handle<Value> data);
+
+
+/**
+ * Schedules an exception to be thrown when returning to javascript. When an
+ * exception has been scheduled it is illegal to invoke any javascript
+ * operation; the caller must return immediately and only after the exception
+ * has been handled does it become legal to invoke javascript operations.
+ */
+Handle<Value> ThrowException(Handle<Value> exception);
+
+/**
+ * Create new error objects by calling the corresponding error object
+ * constructor with the message.
+ */
+class Exception {
+ public:
+ static Local<Value> RangeError(Handle<String> message);
+ static Local<Value> ReferenceError(Handle<String> message);
+ static Local<Value> SyntaxError(Handle<String> message);
+ static Local<Value> TypeError(Handle<String> message);
+ static Local<Value> Error(Handle<String> message);
+};
+
+
+/**
+ * Ignore
+ */
+struct VersionInfo {
+ int major, minor, build_major, build_minor, revision;
+};
+
+// --- C o u n t e r s C a l l b a c k s
+
+typedef int* (*CounterLookupCallback)(const wchar_t* name);
+
+// --- F a i l e d A c c e s s C h e c k C a l l b a c k ---
+typedef void (*FailedAccessCheckCallback)(Local<Object> target,
+ AccessType type,
+ Local<Value> data);
+
+// --- G a r b a g e C o l l e c t i o n C a l l b a c k s
+
+/**
+ * Applications can register a callback function which is called
+ * before and after a major Garbage Collection.
+ * Allocations are not allowed in the callback function, you therefore.
+ * cannot manipulate objects (set or delete properties for example)
+ * since it is likely such operations will result in the allocation of objects.
+ */
+typedef void (*GCCallback)();
+
+
+// --- C o n t e x t G e n e r a t o r
+
+/**
+ * Applications must provide a callback function which is called to generate
+ * a context if a context wasn't deserialized from the snapshot.
+ */
+
+typedef Persistent<Context> (*ContextGenerator)();
+
+
+/**
+ * Container class for static utility functions.
+ */
+class V8 {
+ public:
+ static void SetFatalErrorHandler(FatalErrorCallback that);
+
+ // TODO(758124): Clarify documentation: Prevent top level from
+ // calling V8::FatalProcessOutOfMemory if HasOutOfMemoryException();
+ static void IgnoreOutOfMemoryException();
+
+ // Check if V8 is dead.
+ static bool IsDead();
+
+ /**
+ * TODO(758124): Clarify documentation - what is the "ones" in
+ * "existing ones": Adds a message listener, does not overwrite any
+ * existing ones with the same callback function.
+ */
+ static bool AddMessageListener(MessageCallback that,
+ Handle<Value> data = Handle<Value>());
+
+ /**
+ * Remove all message listeners from the specified callback function.
+ */
+ static void RemoveMessageListeners(MessageCallback that);
+
+ /**
+ * Sets v8 flags from a string.
+ * TODO(758124): Describe flags?
+ */
+ static void SetFlagsFromString(const char* str, int length);
+
+ /** Sets the version fields in the given VersionInfo struct.*/
+ static void GetVersion(VersionInfo* info);
+
+ /**
+ * Enables the host application to provide a mechanism for recording
+ * statistics counters.
+ */
+ static void SetCounterFunction(CounterLookupCallback);
+
+ /**
+ * Enables the computation of a sliding window of states. The sliding
+ * window information is recorded in statistics counters.
+ */
+ static void EnableSlidingStateWindow();
+
+ /** Callback function for reporting failed access checks.*/
+ static void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
+
+ /**
+ * Enables the host application to receive a notification before a major GC.
+ * Allocations are not allowed in the callback function, you therefore
+ * cannot manipulate objects (set or delete properties for example)
+ * since it is likely such operations will result in the allocation of objects.
+ */
+ static void SetGlobalGCPrologueCallback(GCCallback);
+
+ /**
+ * Enables the host application to receive a notification after a major GC.
+ * (TODO(758124): is the following true for this one too?)
+ * Allocations are not allowed in the callback function, you therefore
+ * cannot manipulate objects (set or delete properties for example)
+ * since it is likely such operations will result in the allocation of objects.
+ */
+ static void SetGlobalGCEpilogueCallback(GCCallback);
+
+ /**
+ * Allows the host application to group objects together. If one object
+ * in the group is alive, all objects in the group are alive.
+ * After each GC, object groups are removed. It is intended to be used
+ * in the before-GC callback function to simulate DOM tree connections
+ * among JS wrapper objects.
+ */
+ static void AddObjectToGroup(void* id, Persistent<Object> obj);
+
+ /**
+ * Initializes from snapshot if possible. Otherwise, attempts to initialize
+ * from scratch.
+ */
+ static bool Initialize();
+
+ private:
+ V8();
+
+ static void** GlobalizeReference(void** handle);
+ static void DisposeGlobal(void** global_handle);
+ static void MakeWeak(void** global_handle, void* data, WeakReferenceCallback);
+ static void ClearWeak(void** global_handle);
+ static bool IsGlobalNearDeath(void** global_handle);
+ static bool IsGlobalWeak(void** global_handle);
+
+ template <class T> friend class Handle;
+ template <class T> friend class Local;
+ template <class T> friend class Persistent;
+ friend class Context;
+};
+
+
+/**
+ * An external exception handler.
+ */
+class TryCatch {
+ public:
+
+ /**
+ * Creates a new try/catch block and registers it with v8.
+ */
+ TryCatch();
+
+ /**
+ * Unregisters and deletes this try/catch block.
+ */
+ ~TryCatch();
+
+ /**
+ * Returns true if an exception has been caught by this try/catch block.
+ */
+ bool HasCaught();
+
+ /**
+ * Returns the exception caught by this try/catch block. If no exception has
+ * been caught an empty handle is returned.
+ *
+ * The returned handle is valid until this TryCatch block has been destroyed.
+ */
+ Local<Value> Exception();
+
+ /**
+ * Clears any exceptions that may have been caught by this try/catch block.
+ * After this method has been called, HasCaught() will return false.
+ *
+ * It is not necessary to clear a try/catch block before using it again; if
+ * another exception is thrown the previously caught exception will just be
+ * overwritten. However, it is often a good idea since it makes it easier
+ * to determine which operation threw a given exception.
+ */
+ void Reset();
+
+ void SetVerbose(bool value);
+
+ public:
+ TryCatch* next_;
+ void* exception_;
+ bool is_verbose_;
+};
+
+
+// --- C o n t e x t ---
+
+
+/**
+ * Ignore
+ */
+class ExtensionConfiguration {
+ public:
+ ExtensionConfiguration(int name_count, const char* names[])
+ : name_count_(name_count), names_(names) { }
+ private:
+ friend class ImplementationUtilities;
+ int name_count_;
+ const char** names_;
+};
+
+
+/**
+ * A sandboxed execution context with its own set of built-in objects
+ * and functions.
+ */
+class Context {
+ public:
+ Local<Object> Global();
+
+ static Persistent<Context> New(ExtensionConfiguration* extensions = 0,
+ Handle<ObjectTemplate> global_template =
+ Handle<ObjectTemplate>(),
+ Handle<Value> global_object = Handle<Value>());
+
+ /** Returns the context that is on the top of the stack.*/
+ static Local<Context> Current();
+
+ /** Returns the security context used to start JS execution.*/
+ static Local<Context> GetSecurityContext();
+
+ /**
+ * Sets the security token for the context. To access an object in
+ * another context, the security tokens must match.
+ */
+ void SetSecurityToken(Handle<Value> token);
+
+ /** Returns the security token of this context.*/
+ Handle<Value> GetSecurityToken();
+
+ void Enter();
+ void Exit();
+
+ /** Returns true if the context has experienced an out of memory situation.*/
+ bool HasOutOfMemoryException();
+
+ /** Returns true if called from within a context.*/
+ static bool InContext();
+
+ /** Returns true if called from within a security context.*/
+ static bool InSecurityContext();
+
+ /**
+ * Stack-allocated class which sets the execution context for all
+ * operations executed within a local scope.
+ */
+ class Scope {
+ public:
+ inline Scope(Handle<Context> context) : context_(context) {
+ context_->Enter();
+ }
+ inline ~Scope() { context_->Exit(); }
+ private:
+ Handle<Context> context_;
+ };
+
+ private:
+ friend class Value;
+ friend class Script;
+ friend class Object;
+ friend class Function;
+};
+
+
+/**
+ * Multiple threads in V8 are allowed, but only one thread at a time is
+ * allowed to use V8. The definition of using V8' includes accessing
+ * handles or holding onto object pointers obtained from V8 handles.
+ * It is up to the user of V8 to ensure (perhaps with locking) that
+ * this constraint is not violated.
+ *
+ * If you wish to start using V8 in a thread you can do this by constructing
+ * a v8::Locker object. After the code using V8 has completed for the
+ * current thread you can call the destructor. This can be combined
+ * with C++ scope-based construction as follows:
+ *
+ * ...
+ * {
+ * v8::Locker locker;
+ * ...
+ * // Code using V8 goes here.
+ * ...
+ * } // Destructor called here
+ *
+ * If you wish to stop using V8 in a thread A you can do this by either
+ * by destroying the v8::Locker object as above or by constructing a
+ * v8::Unlocker object:
+ *
+ * {
+ * v8::Unlocker unlocker;
+ * ...
+ * // Code not using V8 goes here while V8 can run in another thread.
+ * ...
+ * } // Destructor called here.
+ *
+ * The Unlocker object is intended for use in a long-running callback
+ * from V8, where you want to release the V8 lock for other threads to
+ * use.
+ *
+ * The v8::Locker is a recursive lock. That is, you can lock more than
+ * once in a given thread. This can be useful if you have code that can
+ * be called either from code that holds the lock or from code that does
+ * not. The Unlocker is not recursive so you can not have several
+ * Unlockers on the stack at once, and you can not use an Unlocker in a
+ * thread that is not inside a Locker's scope.
+ *
+ * An unlocker will unlock several lockers if it has to and reinstate
+ * the correct depth of locking on its destruction. eg.:
+ *
+ * // V8 not locked.
+ * {
+ * v8::Locker locker;
+ * // V8 locked.
+ * {
+ * v8::Locker another_locker;
+ * // V8 still locked (2 levels).
+ * {
+ * v8::Unlocker unlocker;
+ * // V8 not locked.
+ * }
+ * // V8 locked again (2 levels).
+ * }
+ * // V8 still locked (1 level).
+ * }
+ * // V8 Now no longer locked.
+ */
+class Unlocker {
+ public:
+ Unlocker();
+ ~Unlocker();
+};
+
+
+class Locker {
+ public:
+ Locker();
+ ~Locker();
+#ifdef DEBUG
+ static void AssertIsLocked();
+#else
+ static inline void AssertIsLocked() { }
+#endif
+ /*
+ * Fires a timer every n ms that will switch between
+ * multiple threads that are in contention for the V8 lock.
+ */
+ static void StartPreemption(int every_n_ms);
+ static void StopPreemption();
+ private:
+ bool has_lock_;
+ bool top_level_;
+};
+
+
+
+// --- I m p l e m e n t a t i o n ---
+
+template <class T>
+Handle<T>::Handle() : val_(0) { }
+
+
+template <class T>
+Local<T>::Local() : Handle<T>() { }
+
+
+template <class T>
+Local<T> Local<T>::New(Handle<T> that) {
+ if (that.IsEmpty()) return Local<T>();
+ void** p = reinterpret_cast<void**>(*that);
+ return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(*p)));
+}
+
+
+template <class T>
+Persistent<T> Persistent<T>::New(Handle<T> that) {
+ if (that.IsEmpty()) return Persistent<T>();
+ void** p = reinterpret_cast<void**>(*that);
+ return Persistent<T>(reinterpret_cast<T*>(V8::GlobalizeReference(p)));
+}
+
+
+template <class T>
+bool Persistent<T>::IsNearDeath() {
+ if (this->IsEmpty()) return false;
+ return V8::IsGlobalNearDeath(reinterpret_cast<void**>(**this));
+}
+
+
+template <class T>
+bool Persistent<T>::IsWeak() {
+ if (this->IsEmpty()) return false;
+ return V8::IsGlobalWeak(reinterpret_cast<void**>(**this));
+}
+
+
+template <class T>
+void Persistent<T>::Dispose() {
+ if (this->IsEmpty()) return;
+ V8::DisposeGlobal(reinterpret_cast<void**>(**this));
+}
+
+
+template <class T>
+Persistent<T>::Persistent() : Handle<T>() { }
+
+template <class T>
+void Persistent<T>::MakeWeak(void* parameters, WeakReferenceCallback callback) {
+ V8::MakeWeak(reinterpret_cast<void**>(**this), parameters, callback);
+}
+
+template <class T>
+void Persistent<T>::ClearWeak() {
+ V8::ClearWeak(reinterpret_cast<void**>(**this));
+}
+
+template <class T>
+T* Handle<T>::operator->() {
+ return val_;
+}
+
+
+template <class T>
+T* Handle<T>::operator*() {
+ return val_;
+}
+
+
+Local<Value> Arguments::operator[](int i) const {
+ if (i < 0 || length_ <= i) return Local<Value>(*Undefined());
+ return Local<Value>(reinterpret_cast<Value*>(values_ - i));
+}
+
+
+Local<Function> Arguments::Callee() const {
+ return callee_;
+}
+
+
+Local<Object> Arguments::This() const {
+ return Local<Object>(reinterpret_cast<Object*>(values_ + 1));
+}
+
+
+Local<Object> Arguments::Holder() const {
+ return holder_;
+}
+
+
+Local<Value> Arguments::Data() const {
+ return data_;
+}
+
+
+bool Arguments::IsConstructCall() const {
+ return is_construct_call_;
+}
+
+
+int Arguments::Length() const {
+ return length_;
+}
+
+
+Local<Value> AccessorInfo::Data() const {
+ return data_;
+}
+
+
+Local<Object> AccessorInfo::This() const {
+ return self_;
+}
+
+
+Local<Object> AccessorInfo::Holder() const {
+ return holder_;
+}
+
+
+template <class T>
+Local<T> HandleScope::Close(Handle<T> value) {
+ void** after = RawClose(reinterpret_cast<void**>(*value));
+ return Local<T>(reinterpret_cast<T*>(after));
+}
+
+Handle<String> ScriptOrigin::ResourceName() {
+ return resource_name_;
+}
+
+
+Handle<Integer> ScriptOrigin::ResourceLineOffset() {
+ return resource_line_offset_;
+}
+
+
+Handle<Integer> ScriptOrigin::ResourceColumnOffset() {
+ return resource_column_offset_;
+}
+
+
+Handle<Boolean> Boolean::New(bool value) {
+ return value ? True() : False();
+}
+
+
+void Template::Set(const char* name, v8::Handle<Data> value) {
+ Set(v8::String::New(name), value);
+}
+
+
+/**
+ * \example evaluator.cc
+ * A simple evaluator that takes a list of expressions on the
+ * command-line and executes them.
+ */
+
+
+/**
+ * \example process.cc
+ */
+
+
+} // namespace v8
+
+
+#undef EXPORT
+#undef TYPE_CHECK
+
+
+#endif // _V8
diff --git a/src/SConscript b/src/SConscript
new file mode 100644
index 0000000..b41a275
--- /dev/null
+++ b/src/SConscript
@@ -0,0 +1,392 @@
+# Copyright 2008 Google Inc. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+from os.path import join, dirname, abspath
+root_dir = dirname(File('SConstruct').rfile().abspath)
+sys.path.append(join(root_dir, 'tools'))
+import js2c
+Import('toolchain arch os mode use_snapshot library_type')
+
+
+BUILD_OPTIONS_MAP = {
+ 'gcc': {
+ 'debug': {
+ 'default': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-pedantic -Wall -W -Wno-unused-parameter -Werror'
+ },
+ 'dtoa': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-Werror'
+ },
+ 'jscre': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-w'
+ }
+ },
+ 'release': {
+ 'default': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-pedantic -Wall -W -Wno-unused-parameter -Werror'
+ },
+ 'dtoa': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-Werror'
+ },
+ 'jscre': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-w'
+ }
+ }
+ },
+ 'gcc-darwin': {
+ 'debug': {
+ 'default': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-pedantic -Wall -W -Wno-unused-parameter -Werror'
+ },
+ 'dtoa': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-Werror'
+ },
+ 'jscre': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-w'
+ }
+ },
+ 'release': {
+ 'default': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-pedantic -Wall -W -Wno-unused-parameter -Werror'
+ },
+ 'dtoa': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-Werror'
+ },
+ 'jscre': {
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
+ 'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
+ 'DIALECTFLAGS': '-ansi',
+ 'LIBS': 'pthread',
+ 'WARNINGFLAGS': '-w'
+ }
+ }
+ },
+ 'msvc': {
+ 'debug': {
+ 'default': {
+ 'ARFLAGS': '/NOLOGO',
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS /Od /Gm /MTd',
+ 'CCPDBFLAGS': '/Zi',
+ 'CPPDEFINES': ['WIN32', '_CRT_SECURE_NO_DEPRECATE', '_CRT_NONSTDC_NO_DEPRECATE', '_USE_32BIT_TIME_T', 'PCRE_STATIC', 'ENABLE_LOGGING_AND_PROFILING', 'DEBUG', '_DEBUG', 'ENABLE_DISASSEMBLER'],
+ 'CXXFLAGS': '$CCFLAGS /EHsc /GS- /GR-',
+ 'DIALECTFLAGS': '/nologo',
+ 'LIBS': 'WS2_32',
+ 'LINKFLAGS': '/NOLOGO /SUBSYSTEM:CONSOLE /MACHINE:X86 /INCREMENTAL:NO /DEBUG',
+ 'PDB': '${TARGET}.pdb',
+ 'WARNINGFLAGS': '/W3 /WX /wd4355 /wd4800'
+ },
+ 'dtoa': {
+ 'ARFLAGS': '/NOLOGO',
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS /Od /Gm /MTd',
+ 'CCPDBFLAGS': '/Zi',
+ 'CPPDEFINES': ['WIN32', '_CRT_SECURE_NO_DEPRECATE', '_CRT_NONSTDC_NO_DEPRECATE', '_USE_32BIT_TIME_T', 'PCRE_STATIC', 'ENABLE_LOGGING_AND_PROFILING', 'DEBUG', '_DEBUG', 'ENABLE_DISASSEMBLER'],
+ 'CXXFLAGS': '$CCFLAGS /EHsc /GS- /GR-',
+ 'DIALECTFLAGS': '/nologo',
+ 'LIBS': 'WS2_32',
+ 'LINKFLAGS': '/NOLOGO /SUBSYSTEM:CONSOLE /MACHINE:X86 /INCREMENTAL:NO /DEBUG',
+ 'PDB': '${TARGET}.pdb',
+ 'WARNINGFLAGS': '/WX /wd4018 /wd4244'
+ },
+ 'jscre': {
+ 'ARFLAGS': '/NOLOGO',
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS /Od /Gm /MTd',
+ 'CCPDBFLAGS': '/Zi',
+ 'CPPDEFINES': ['WIN32', '_CRT_SECURE_NO_DEPRECATE', '_CRT_NONSTDC_NO_DEPRECATE', '_USE_32BIT_TIME_T', 'PCRE_STATIC', 'ENABLE_LOGGING_AND_PROFILING', 'DEBUG', '_DEBUG', 'ENABLE_DISASSEMBLER', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
+ 'CXXFLAGS': '$CCFLAGS /EHsc /GS- /GR-',
+ 'DIALECTFLAGS': '/nologo',
+ 'LIBS': 'WS2_32',
+ 'LINKFLAGS': '/NOLOGO /SUBSYSTEM:CONSOLE /MACHINE:X86 /INCREMENTAL:NO /DEBUG',
+ 'PDB': '${TARGET}.pdb',
+ 'WARNINGFLAGS': '/WX /wd4003 /wd4005 /wd4018 /wd4133'
+ }
+ },
+ 'release': {
+ 'default': {
+ 'ARFLAGS': '/NOLOGO',
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS /O2 /MT',
+ 'CCPDBFLAGS': '/Zi',
+ 'CPPDEFINES': ['WIN32', '_CRT_SECURE_NO_DEPRECATE', '_CRT_NONSTDC_NO_DEPRECATE', '_USE_32BIT_TIME_T', 'PCRE_STATIC', 'ENABLE_LOGGING_AND_PROFILING'],
+ 'CXXFLAGS': '$CCFLAGS /EHsc /GS- /GR-',
+ 'DIALECTFLAGS': '/nologo',
+ 'LIBS': 'WS2_32',
+ 'LINKFLAGS': '/NOLOGO /SUBSYSTEM:CONSOLE /MACHINE:X86 /INCREMENTAL:NO /OPT:REF /OPT:ICF /SUBSYSTEM:CONSOLE',
+ 'PDB': '${TARGET}.pdb',
+ 'WARNINGFLAGS': '/W3 /WX /wd4355 /wd4800'
+ },
+ 'dtoa': {
+ 'ARFLAGS': '/NOLOGO',
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS /O2 /MT',
+ 'CCPDBFLAGS': '/Zi',
+ 'CPPDEFINES': ['WIN32', '_CRT_SECURE_NO_DEPRECATE', '_CRT_NONSTDC_NO_DEPRECATE', '_USE_32BIT_TIME_T', 'PCRE_STATIC', 'ENABLE_LOGGING_AND_PROFILING'],
+ 'CXXFLAGS': '$CCFLAGS /EHsc /GS- /GR-',
+ 'DIALECTFLAGS': '/nologo',
+ 'LIBS': 'WS2_32',
+ 'LINKFLAGS': '/NOLOGO /SUBSYSTEM:CONSOLE /MACHINE:X86 /INCREMENTAL:NO /OPT:REF /OPT:ICF /SUBSYSTEM:CONSOLE',
+ 'PDB': '${TARGET}.pdb',
+ 'WARNINGFLAGS': '/WX /wd4018 /wd4244'
+ },
+ 'jscre': {
+ 'ARFLAGS': '/NOLOGO',
+ 'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS /O2 /MT',
+ 'CCPDBFLAGS': '/Zi',
+ 'CPPDEFINES': ['WIN32', '_CRT_SECURE_NO_DEPRECATE', '_CRT_NONSTDC_NO_DEPRECATE', '_USE_32BIT_TIME_T', 'PCRE_STATIC', 'ENABLE_LOGGING_AND_PROFILING', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
+ 'CXXFLAGS': '$CCFLAGS /EHsc /GS- /GR-',
+ 'DIALECTFLAGS': '/nologo',
+ 'LIBS': 'WS2_32',
+ 'LINKFLAGS': '/NOLOGO /SUBSYSTEM:CONSOLE /MACHINE:X86 /INCREMENTAL:NO /OPT:REF /OPT:ICF /SUBSYSTEM:CONSOLE',
+ 'PDB': '${TARGET}.pdb',
+ 'WARNINGFLAGS': '/WX /wd4003 /wd4005 /wd4018 /wd4133'
+ }
+ }
+ }
+}
+
+
+PLATFORM_INDEPENDENT_SOURCES = '''
+accessors.cc
+allocation.cc
+api.cc
+assembler.cc
+ast.cc
+bootstrapper.cc
+builtins.cc
+checks.cc
+code-stubs.cc
+codegen.cc
+compiler.cc
+contexts.cc
+conversions.cc
+counters.cc
+dateparser.cc
+debug.cc
+execution.cc
+factory.cc
+flags.cc
+frames.cc
+global-handles.cc
+handles.cc
+hashmap.cc
+heap.cc
+ic.cc
+jsregexp.cc
+log.cc
+mark-compact.cc
+messages.cc
+objects-debug.cc
+objects.cc
+parser.cc
+prettyprinter.cc
+property.cc
+rewriter.cc
+runtime.cc
+scanner.cc
+scopeinfo.cc
+scopes.cc
+serialize.cc
+snapshot-common.cc
+spaces.cc
+string-stream.cc
+stub-cache.cc
+token.cc
+top.cc
+unicode.cc
+usage-analyzer.cc
+utils.cc
+v8-counters.cc
+v8.cc
+v8threads.cc
+variables.cc
+zone.cc
+'''.split()
+
+
+PLATFORM_DEPENDENT_SOURCES = {
+ 'arch:arm': ['assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc', 'cpu-arm.cc', 'disasm-arm.cc', 'disassembler-arm.cc', 'frames-arm.cc', 'ic-arm.cc', 'macro-assembler-arm.cc', 'simulator-arm.cc', 'stub-cache-arm.cc'],
+ 'arch:ia32': ['assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc', 'cpu-ia32.cc', 'disasm-ia32.cc', 'disassembler-ia32.cc', 'frames-ia32.cc', 'ic-ia32.cc', 'macro-assembler-ia32.cc', 'simulator-ia32.cc', 'stub-cache-ia32.cc'],
+ 'os:linux': ['platform-linux.cc'],
+ 'os:macos': ['platform-macos.cc'],
+ 'os:win32': ['platform-win32.cc']
+}
+
+
+LIBRARY_FILES = '''
+runtime.js
+v8natives.js
+array.js
+string.js
+uri.js
+math.js
+messages.js
+apinatives.js
+debug-delay.js
+mirror-delay.js
+date-delay.js
+regexp-delay.js
+'''.split()
+
+
+JSCRE_FILES = '''
+pcre_compile.cpp
+pcre_exec.cpp
+pcre_tables.cpp
+pcre_ucp_searchfuncs.cpp
+pcre_xclass.cpp
+'''.split()
+
+
+def Abort(message):
+ print message
+ sys.exit(1)
+
+
+def BuildObject(env, input, **kw):
+ if library_type == 'static':
+ return env.StaticObject(input, **kw)
+ elif library_type == 'shared':
+ return env.SharedObject(input, **kw)
+ else:
+ return env.Object(input, **kw)
+
+
+def ConfigureBuild():
+ env = Environment()
+ options = BUILD_OPTIONS_MAP[toolchain][mode]['default']
+ env.Replace(**options)
+ env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
+ env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile $LOGFILE')
+
+ # Build the standard platform-independent source files.
+ source_files = PLATFORM_INDEPENDENT_SOURCES
+ source_files += PLATFORM_DEPENDENT_SOURCES["arch:%s" % arch]
+ source_files += PLATFORM_DEPENDENT_SOURCES["os:%s" % os]
+ full_source_files = [s for s in source_files]
+
+ # Combine the javascript library files into a single C++ file and
+ # compile it.
+ library_files = [s for s in LIBRARY_FILES]
+ library_files.append('macros.py')
+ libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries_empty.cc'], library_files)
+ libraries_obj = BuildObject(env, libraries_src, CPPPATH=['.'])
+
+ # Build JSCRE.
+ jscre_env = env.Copy()
+ jscre_options = BUILD_OPTIONS_MAP[toolchain][mode]['jscre']
+ jscre_env.Replace(**jscre_options)
+ jscre_files = [join('third_party', 'jscre', s) for s in JSCRE_FILES]
+ jscre_obj = BuildObject(jscre_env, jscre_files)
+
+ # Build dtoa.
+ dtoa_env = env.Copy()
+ dtoa_options = BUILD_OPTIONS_MAP[toolchain][mode]['dtoa']
+ dtoa_env.Replace(**dtoa_options)
+ dtoa_files = ['dtoa-config.c']
+ dtoa_obj = BuildObject(dtoa_env, dtoa_files)
+
+ full_source_objs = BuildObject(env, full_source_files)
+ non_snapshot_files = [jscre_obj, dtoa_obj, full_source_objs]
+
+ # Create snapshot if necessary.
+ empty_snapshot_obj = BuildObject(env, 'snapshot-empty.cc')
+ if use_snapshot:
+ mksnapshot_src = 'mksnapshot.cc'
+ mksnapshot = env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
+ snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
+ snapshot_obj = BuildObject(env, snapshot_cc, CPPPATH=['.'])
+ libraries_obj = BuildObject(env, libraries_empty_src, CPPPATH=['.'])
+ else:
+ snapshot_obj = empty_snapshot_obj
+
+ all_files = [non_snapshot_files, libraries_obj, snapshot_obj]
+ if library_type == 'static':
+ env.StaticLibrary('v8', all_files)
+ elif library_type == 'shared':
+ # There seems to be a glitch in the way scons decides where to put
+ # .pdb files when compiling using msvc so we specify it manually.
+ # This should not affect any other platforms.
+ env.SharedLibrary('v8', all_files, PDB='v8.dll.pdb')
+ else:
+ env.Library('v8', all_files)
+
+
+ConfigureBuild()
diff --git a/src/accessors.cc b/src/accessors.cc
new file mode 100644
index 0000000..8c5bf89
--- /dev/null
+++ b/src/accessors.cc
@@ -0,0 +1,511 @@
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "accessors.h"
+#include "execution.h"
+#include "factory.h"
+#include "scopeinfo.h"
+#include "top.h"
+#include "zone-inl.h"
+
+namespace v8 { namespace internal {
+
+
+template <class C>
+static C* FindInPrototypeChain(Object* obj, bool* found_it) {
+ ASSERT(!*found_it);
+ while (!Is<C>(obj)) {
+ if (obj == Heap::null_value()) return NULL;
+ obj = obj->GetPrototype();
+ }
+ *found_it = true;
+ return C::cast(obj);
+}
+
+
+// Entry point that never should be called.
+Object* Accessors::IllegalSetter(JSObject*, Object*, void*) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+Object* Accessors::IllegalGetAccessor(Object* object, void*) {
+ UNREACHABLE();
+ return object;
+}
+
+
+Object* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
+ // According to ECMA-262, section 8.6.2.2, page 28, setting
+ // read-only properties must be silently ignored.
+ return value;
+}
+
+
+//
+// Accessors::ArrayLength
+//
+
+
+Object* Accessors::ArrayGetLength(Object* object, void*) {
+ // Traverse the prototype chain until we reach an array.
+ bool found_it = false;
+ JSArray* holder = FindInPrototypeChain<JSArray>(object, &found_it);
+ if (!found_it) return Smi::FromInt(0);
+ return holder->length();
+}
+
+
+// The helper function will 'flatten' Number objects.
+Object* Accessors::FlattenNumber(Object* value) {
+ if (value->IsNumber() || !value->IsJSValue()) return value;
+ JSValue* wrapper = JSValue::cast(value);
+ ASSERT(
+ Top::context()->global_context()->number_function()->has_initial_map());
+ Map* number_map =
+ Top::context()->global_context()->number_function()->initial_map();
+ if (wrapper->map() == number_map) return wrapper->value();
+ return value;
+}
+
+
+Object* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
+ value = FlattenNumber(value);
+
+ // Need to call methods that may trigger GC.
+ HandleScope scope;
+
+ // Protect raw pointers.
+ Handle<JSObject> object_handle(object);
+ Handle<Object> value_handle(value);
+
+ bool has_exception;
+ Handle<Object> uint32_v = Execution::ToUint32(value_handle, &has_exception);
+ if (has_exception) return Failure::Exception();
+ Handle<Object> number_v = Execution::ToNumber(value_handle, &has_exception);
+ if (has_exception) return Failure::Exception();
+
+ // Restore raw pointers,
+ object = *object_handle;
+ value = *value_handle;
+
+ if (uint32_v->Number() == number_v->Number()) {
+ if (object->IsJSArray()) {
+ return JSArray::cast(object)->SetElementsLength(*uint32_v);
+ } else {
+ // This means one of the object's prototypes is a JSArray and
+ // the object does not have a 'length' property.
+ return object->AddProperty(Heap::length_symbol(), value, NONE);
+ }
+ }
+
+ return Top::Throw(*Factory::NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)));
+}
+
+
+const AccessorDescriptor Accessors::ArrayLength = {
+ ArrayGetLength,
+ ArraySetLength,
+ 0
+};
+
+
+//
+// Accessors::StringLength
+//
+
+
+Object* Accessors::StringGetLength(Object* object, void*) {
+ Object* value = object;
+ if (object->IsJSValue()) value = JSValue::cast(object)->value();
+ if (value->IsString()) return Smi::FromInt(String::cast(value)->length());
+ // If object is not a string we return 0 to be compatible with WebKit.
+ // Note: Firefox returns the length of ToString(object).
+ return Smi::FromInt(0);
+}
+
+
+const AccessorDescriptor Accessors::StringLength = {
+ StringGetLength,
+ IllegalSetter,
+ 0
+};
+
+
+//
+// Accessors::ScriptSource
+//
+
+
+Object* Accessors::ScriptGetSource(Object* object, void*) {
+ Object* script = JSValue::cast(object)->value();
+ return Script::cast(script)->source();
+}
+
+
+const AccessorDescriptor Accessors::ScriptSource = {
+ ScriptGetSource,
+ IllegalSetter,
+ 0
+};
+
+
+//
+// Accessors::ScriptName
+//
+
+
+Object* Accessors::ScriptGetName(Object* object, void*) {
+ Object* script = JSValue::cast(object)->value();
+ return Script::cast(script)->name();
+}
+
+
+const AccessorDescriptor Accessors::ScriptName = {
+ ScriptGetName,
+ IllegalSetter,
+ 0
+};
+
+
+//
+// Accessors::ScriptLineOffset
+//
+
+
+Object* Accessors::ScriptGetLineOffset(Object* object, void*) {
+ Object* script = JSValue::cast(object)->value();
+ return Script::cast(script)->line_offset();
+}
+
+
+const AccessorDescriptor Accessors::ScriptLineOffset = {
+ ScriptGetLineOffset,
+ IllegalSetter,
+ 0
+};
+
+
+//
+// Accessors::ScriptColumnOffset
+//
+
+
+Object* Accessors::ScriptGetColumnOffset(Object* object, void*) {
+ Object* script = JSValue::cast(object)->value();
+ return Script::cast(script)->column_offset();
+}
+
+
+const AccessorDescriptor Accessors::ScriptColumnOffset = {
+ ScriptGetColumnOffset,
+ IllegalSetter,
+ 0
+};
+
+
+//
+// Accessors::ScriptType
+//
+
+
+Object* Accessors::ScriptGetType(Object* object, void*) {
+ Object* script = JSValue::cast(object)->value();
+ return Script::cast(script)->type();
+}
+
+
+const AccessorDescriptor Accessors::ScriptType = {
+ ScriptGetType,
+ IllegalSetter,
+ 0
+};
+
+
+//
+// Accessors::FunctionPrototype
+//
+
+
+Object* Accessors::FunctionGetPrototype(Object* object, void*) {
+ bool found_it = false;
+ JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
+ if (!found_it) return Heap::undefined_value();
+ if (!function->has_prototype()) {
+ Object* prototype = Heap::AllocateFunctionPrototype(function);
+ if (prototype->IsFailure()) return prototype;
+ Object* result = function->SetPrototype(prototype);
+ if (result->IsFailure()) return result;
+ }
+ return function->prototype();
+}
+
+
+Object* Accessors::FunctionSetPrototype(JSObject* object,
+ Object* value,
+ void*) {
+ bool found_it = false;
+ JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
+ if (!found_it) return Heap::undefined_value();
+ if (function->has_initial_map()) {
+ // If the function has allocated the initial map
+ // replace it with a copy containing the new prototype.
+ Object* new_map = function->initial_map()->Copy();
+ if (new_map->IsFailure()) return new_map;
+ Object* result = Map::cast(new_map)->EnsureNoMapTransitions();
+ if (result->IsFailure()) return result;
+ function->set_initial_map(Map::cast(new_map));
+ }
+ Object* prototype = function->SetPrototype(value);
+ if (prototype->IsFailure()) return prototype;
+ ASSERT(function->prototype() == value);
+ return function;
+}
+
+
+const AccessorDescriptor Accessors::FunctionPrototype = {
+ FunctionGetPrototype,
+ FunctionSetPrototype,
+ 0
+};
+
+
+//
+// Accessors::FunctionLength
+//
+
+
+Object* Accessors::FunctionGetLength(Object* object, void*) {
+ bool found_it = false;
+ JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
+ if (!found_it) return Smi::FromInt(0);
+ // Check if already compiled.
+ if (!function->is_compiled()) {
+ // If the function isn't compiled yet, the length is not computed
+ // correctly yet. Compile it now and return the right length.
+ HandleScope scope;
+ Handle<JSFunction> function_handle(function);
+ if (!CompileLazy(function_handle, KEEP_EXCEPTION)) {
+ return Failure::Exception();
+ }
+ return Smi::FromInt(function_handle->shared()->length());
+ } else {
+ return Smi::FromInt(function->shared()->length());
+ }
+}
+
+
+const AccessorDescriptor Accessors::FunctionLength = {
+ FunctionGetLength,
+ ReadOnlySetAccessor,
+ 0
+};
+
+
+//
+// Accessors::FunctionName
+//
+
+
+Object* Accessors::FunctionGetName(Object* object, void*) {
+ bool found_it = false;
+ JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
+ if (!found_it) return Heap::undefined_value();
+ return holder->shared()->name();
+}
+
+
+const AccessorDescriptor Accessors::FunctionName = {
+ FunctionGetName,
+ ReadOnlySetAccessor,
+ 0
+};
+
+
+//
+// Accessors::FunctionArguments
+//
+
+
+Object* Accessors::FunctionGetArguments(Object* object, void*) {
+ HandleScope scope;
+ bool found_it = false;
+ JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
+ if (!found_it) return Heap::undefined_value();
+ Handle<JSFunction> function(holder);
+
+ // Find the top invocation of the function by traversing frames.
+ for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
+ // Skip all frames that aren't invocations of the given function.
+ JavaScriptFrame* frame = it.frame();
+ if (frame->function() != *function) continue;
+
+ // If there is an arguments variable in the stack, we return that.
+ int index = ScopeInfo<>::StackSlotIndex(frame->FindCode(),
+ Heap::arguments_symbol());
+ if (index >= 0) return frame->GetExpression(index);
+
+ // If there isn't an arguments variable in the stack, we need to
+ // find the frame that holds the actual arguments passed to the
+ // function on the stack.
+ it.AdvanceToArgumentsFrame();
+ frame = it.frame();
+
+ // Get the number of arguments and construct an arguments object
+ // mirror for the right frame.
+ const int length = frame->GetProvidedParametersCount();
+ Handle<JSObject> arguments = Factory::NewArgumentsObject(function, length);
+
+ // Copy the parameters to the arguments object.
+ FixedArray* array = FixedArray::cast(arguments->elements());
+ ASSERT(array->length() == length);
+ for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
+
+ // Return the freshly allocated arguments object.
+ return *arguments;
+ }
+
+ // No frame corresponding to the given function found. Return null.
+ return Heap::null_value();
+}
+
+
+const AccessorDescriptor Accessors::FunctionArguments = {
+ FunctionGetArguments,
+ ReadOnlySetAccessor,
+ 0
+};
+
+
+//
+// Accessors::FunctionCaller
+//
+
+
+Object* Accessors::FunctionGetCaller(Object* object, void*) {
+ HandleScope scope;
+ bool found_it = false;
+ JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
+ if (!found_it) return Heap::undefined_value();
+ Handle<JSFunction> function(holder);
+
+ // Find the top invocation of the function by traversing frames.
+ for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
+ // Skip all frames that aren't invocations of the given function.
+ if (it.frame()->function() != *function) continue;
+ // Once we have found the frame, we need to go to the caller
+ // frame. This may require skipping through a number of top-level
+ // frames, e.g. frames for scripts not functions.
+ while (true) {
+ it.Advance();
+ if (it.done()) return Heap::null_value();
+ JSFunction* caller = JSFunction::cast(it.frame()->function());
+ if (!caller->shared()->is_toplevel()) return caller;
+ }
+ }
+
+ // No frame corresponding to the given function found. Return null.
+ return Heap::null_value();
+}
+
+
+const AccessorDescriptor Accessors::FunctionCaller = {
+ FunctionGetCaller,
+ ReadOnlySetAccessor,
+ 0
+};
+
+
+//
+// Accessors::ObjectPrototype
+//
+
+
+Object* Accessors::ObjectGetPrototype(Object* receiver, void*) {
+ Object* current = receiver->GetPrototype();
+ while (current->IsJSObject() &&
+ JSObject::cast(current)->map()->is_hidden_prototype()) {
+ current = current->GetPrototype();
+ }
+ return current;
+}
+
+
+Object* Accessors::ObjectSetPrototype(JSObject* receiver,
+ Object* value,
+ void*) {
+ // Before we can set the prototype we need to be sure
+ // prototype cycles are prevented.
+ // It is suficient to validate the receiver is not in the new prototype chain.
+
+ // Silently ignore the change if value is not a JSObject or null.
+ // SpiderMonkey behaves this way.
+ if (!value->IsJSObject() && !value->IsNull()) return value;
+
+ for (Object* pt = value; pt != Heap::null_value(); pt = pt->GetPrototype()) {
+ if (JSObject::cast(pt) == receiver) {
+ // Cycle detected.
+ HandleScope scope;
+ return Top::Throw(*Factory::NewError("cyclic_proto",
+ HandleVector<Object>(NULL, 0)));
+ }
+ }
+
+ // Find the first object in the chain whose prototype object is not
+ // hidden and set the new prototype on that object.
+ JSObject* current = receiver;
+ Object* current_proto = receiver->GetPrototype();
+ while (current_proto->IsJSObject() &&
+ JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
+ current = JSObject::cast(current_proto);
+ current_proto = current_proto->GetPrototype();
+ }
+
+ // Set the new prototype of the object.
+ Object* new_map = current->map()->Copy();
+ if (new_map->IsFailure()) return new_map;
+ Object* result = Map::cast(new_map)->EnsureNoMapTransitions();
+ if (result->IsFailure()) return result;
+ Map::cast(new_map)->set_prototype(value);
+ current->set_map(Map::cast(new_map));
+
+ // To be consistant with other Set functions, return the value.
+ return value;
+}
+
+
+const AccessorDescriptor Accessors::ObjectPrototype = {
+ ObjectGetPrototype,
+ ObjectSetPrototype,
+ 0
+};
+
+} } // namespace v8::internal
diff --git a/src/accessors.h b/src/accessors.h
new file mode 100644
index 0000000..d81a0bd
--- /dev/null
+++ b/src/accessors.h
@@ -0,0 +1,97 @@
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ACCESSORS_H_
+#define V8_ACCESSORS_H_
+
+namespace v8 { namespace internal {
+
+// The list of accessor descriptors. This is a second-order macro
+// taking a macro to be applied to all accessor descriptor names.
+#define ACCESSOR_DESCRIPTOR_LIST(V) \
+ V(FunctionPrototype) \
+ V(FunctionLength) \
+ V(FunctionName) \
+ V(FunctionArguments) \
+ V(FunctionCaller) \
+ V(ArrayLength) \
+ V(StringLength) \
+ V(ScriptSource) \
+ V(ScriptName) \
+ V(ScriptLineOffset) \
+ V(ScriptColumnOffset) \
+ V(ScriptType) \
+ V(ObjectPrototype)
+
+// Accessors contains all prodefined proxy accessors.
+
+class Accessors : public AllStatic {
+ public:
+ // Accessor descriptors.
+#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
+ static const AccessorDescriptor name;
+ ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
+#undef ACCESSOR_DESCRIPTOR_DECLARATION
+
+ enum DescriptorId {
+#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
+ k##name,
+ ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
+#undef ACCESSOR_DESCRIPTOR_DECLARATION
+ descriptorCount
+ };
+
+ // Accessor functions called directly from the runtime system.
+ static Object* FunctionGetPrototype(Object* object, void*);
+ static Object* FunctionSetPrototype(JSObject* object, Object* value, void*);
+ private:
+ // Accessor functions only used through the descriptor.
+ static Object* FunctionGetLength(Object* object, void*);
+ static Object* FunctionGetName(Object* object, void*);
+ static Object* FunctionGetArguments(Object* object, void*);
+ static Object* FunctionGetCaller(Object* object, void*);
+ static Object* ArraySetLength(JSObject* object, Object* value, void*);
+ static Object* ArrayGetLength(Object* object, void*);
+ static Object* StringGetLength(Object* object, void*);
+ static Object* ScriptGetName(Object* object, void*);
+ static Object* ScriptGetSource(Object* object, void*);
+ static Object* ScriptGetLineOffset(Object* object, void*);
+ static Object* ScriptGetColumnOffset(Object* object, void*);
+ static Object* ScriptGetType(Object* object, void*);
+ static Object* ObjectGetPrototype(Object* receiver, void*);
+ static Object* ObjectSetPrototype(JSObject* receiver, Object* value, void*);
+
+ // Helper functions.
+ static Object* FlattenNumber(Object* value);
+ static Object* IllegalSetter(JSObject*, Object*, void*);
+ static Object* IllegalGetAccessor(Object* object, void*);
+ static Object* ReadOnlySetAccessor(JSObject*, Object* value, void*);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ACCESSORS_H_
diff --git a/src/allocation.cc b/src/allocation.cc
new file mode 100644
index 0000000..6650636
--- /dev/null
+++ b/src/allocation.cc
@@ -0,0 +1,187 @@
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+namespace v8 { namespace internal {
+
+
+void* Malloced::New(size_t size) {
+ ASSERT(NativeAllocationChecker::allocation_allowed());
+ void* result = malloc(size);
+ if (result == NULL) V8::FatalProcessOutOfMemory("Malloced operator new");
+ return result;
+}
+
+
+void Malloced::Delete(void* p) {
+ free(p);
+}
+
+
+void Malloced::FatalProcessOutOfMemory() {
+ V8::FatalProcessOutOfMemory("Out of memory");
+}
+
+
+#ifdef DEBUG
+
+static void* invalid = static_cast<void*>(NULL);
+
+void* Embedded::operator new(size_t size) {
+ UNREACHABLE();
+ return invalid;
+}
+
+
+void Embedded::operator delete(void* p) {
+ UNREACHABLE();
+}
+
+
+void* AllStatic::operator new(size_t size) {
+ UNREACHABLE();
+ return invalid;
+}
+
+
+void AllStatic::operator delete(void* p) {
+ UNREACHABLE();
+}
+
+#endif
+
+
+char* StrDup(const char* str) {
+ int length = strlen(str);
+ char* result = NewArray<char>(length + 1);
+ memcpy(result, str, length * kCharSize);
+ result[length] = '\0';
+ return result;
+}
+
+
+int NativeAllocationChecker::allocation_disallowed_ = 0;
+
+
+PreallocatedStorage PreallocatedStorage::in_use_list_(0);
+PreallocatedStorage PreallocatedStorage::free_list_(0);
+bool PreallocatedStorage::preallocated_ = false;
+
+
+void PreallocatedStorage::Init(size_t size) {
+ ASSERT(free_list_.next_ == &free_list_);
+ ASSERT(free_list_.previous_ == &free_list_);
+ PreallocatedStorage* free_chunk =
+ reinterpret_cast<PreallocatedStorage*>(new char[size]);
+ free_list_.next_ = free_list_.previous_ = free_chunk;
+ free_chunk->next_ = free_chunk->previous_ = &free_list_;
+ free_chunk->size_ = size - sizeof(PreallocatedStorage);
+ preallocated_ = true;
+}
+
+
+void* PreallocatedStorage::New(size_t size) {
+ if (!preallocated_) {
+ return FreeStoreAllocationPolicy::New(size);
+ }
+ ASSERT(free_list_.next_ != &free_list_);
+ ASSERT(free_list_.previous_ != &free_list_);
+ size = (size + kPointerSize - 1) & ~(kPointerSize - 1);
+ // Search for exact fit.
+ for (PreallocatedStorage* storage = free_list_.next_;
+ storage != &free_list_;
+ storage = storage->next_) {
+ if (storage->size_ == size) {
+ storage->Unlink();
+ storage->LinkTo(&in_use_list_);
+ return reinterpret_cast<void*>(storage + 1);
+ }
+ }
+ // Search for first fit.
+ for (PreallocatedStorage* storage = free_list_.next_;
+ storage != &free_list_;
+ storage = storage->next_) {
+ if (storage->size_ >= size + sizeof(PreallocatedStorage)) {
+ storage->Unlink();
+ storage->LinkTo(&in_use_list_);
+ PreallocatedStorage* left_over =
+ reinterpret_cast<PreallocatedStorage*>(
+ reinterpret_cast<char*>(storage + 1) + size);
+ left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage);
+ ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) ==
+ storage->size_);
+ storage->size_ = size;
+ left_over->LinkTo(&free_list_);
+ return reinterpret_cast<void*>(storage + 1);
+ }
+ }
+ // Allocation failure.
+ ASSERT(false);
+ return NULL;
+}
+
+
+// We don't attempt to coalesce.
+void PreallocatedStorage::Delete(void* p) {
+ if (p == NULL) {
+ return;
+ }
+ if (!preallocated_) {
+ FreeStoreAllocationPolicy::Delete(p);
+ return;
+ }
+ PreallocatedStorage* storage = reinterpret_cast<PreallocatedStorage*>(p) - 1;
+ ASSERT(storage->next_->previous_ == storage);
+ ASSERT(storage->previous_->next_ == storage);
+ storage->Unlink();
+ storage->LinkTo(&free_list_);
+}
+
+
+void PreallocatedStorage::LinkTo(PreallocatedStorage* other) {
+ next_ = other->next_;
+ other->next_->previous_ = this;
+ previous_ = other;
+ other->next_ = this;
+}
+
+
+void PreallocatedStorage::Unlink() {
+ next_->previous_ = previous_;
+ previous_->next_ = next_;
+}
+
+
+PreallocatedStorage::PreallocatedStorage(size_t size)
+ : size_(size) {
+ previous_ = next_ = this;
+}
+
+} } // namespace v8::internal
diff --git a/src/allocation.h b/src/allocation.h
new file mode 100644
index 0000000..5c9ff5e
--- /dev/null
+++ b/src/allocation.h
@@ -0,0 +1,167 @@
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ALLOCATION_H_
+#define V8_ALLOCATION_H_
+
+namespace v8 { namespace internal {
+
+
+// A class that controls whether allocation is allowed. This is for
+// the C++ heap only!
+class NativeAllocationChecker {
+ public:
+ typedef enum { ALLOW, DISALLOW } NativeAllocationAllowed;
+ explicit inline NativeAllocationChecker(NativeAllocationAllowed allowed)
+ : allowed_(allowed) {
+#ifdef DEBUG
+ if (allowed == DISALLOW) {
+ allocation_disallowed_++;
+ }
+#endif
+ }
+ ~NativeAllocationChecker() {
+#ifdef DEBUG
+ if (allowed_ == DISALLOW) {
+ allocation_disallowed_--;
+ }
+#endif
+ ASSERT(allocation_disallowed_ >= 0);
+ }
+ static inline bool allocation_allowed() {
+ return allocation_disallowed_ == 0;
+ }
+ private:
+ // This static counter ensures that NativeAllocationCheckers can be nested.
+ static int allocation_disallowed_;
+ // This flag applies to this particular instance.
+ NativeAllocationAllowed allowed_;
+};
+
+
+// Superclass for classes managed with new & delete.
+class Malloced {
+ public:
+ void* operator new(size_t size) { return New(size); }
+ void operator delete(void* p) { Delete(p); }
+
+ static void FatalProcessOutOfMemory();
+ static void* New(size_t size);
+ static void Delete(void* p);
+};
+
+
+// A macro is used for defining the base class used for embedded instances.
+// The reason is some compilers allocate a minimum of one word for the
+// superclass. The macro prevents the use of new & delete in debug mode.
+// In release mode we are not willing to pay this overhead.
+
+#ifdef DEBUG
+// Superclass for classes with instances allocated inside stack
+// activations or inside other objects.
+class Embedded {
+ public:
+ void* operator new(size_t size);
+ void operator delete(void* p);
+};
+#define BASE_EMBEDDED : public Embedded
+#else
+#define BASE_EMBEDDED
+#endif
+
+
+// Superclass for classes only using statics.
+class AllStatic {
+#ifdef DEBUG
+ public:
+ void* operator new(size_t size);
+ void operator delete(void* p);
+#endif
+};
+
+
+template <typename T>
+static T* NewArray(int size) {
+ ASSERT(NativeAllocationChecker::allocation_allowed());
+ T* result = new T[size];
+ if (result == NULL) Malloced::FatalProcessOutOfMemory();
+ return result;
+}
+
+
+template <typename T>
+static void DeleteArray(T* array) {
+ delete[] array;
+}
+
+
+// The normal strdup function uses malloc. This version of StrDup
+// uses new and calls the FatalProcessOutOfMemory handler if
+// allocation fails.
+char* StrDup(const char* str);
+
+
+// Allocation policy for allocating in the C free store using malloc
+// and free. Used as the default policy for lists.
+class FreeStoreAllocationPolicy {
+ public:
+ INLINE(static void* New(size_t size)) { return Malloced::New(size); }
+ INLINE(static void Delete(void* p)) { Malloced::Delete(p); }
+};
+
+
+// Allocation policy for allocating in preallocated space.
+// Used as an allocation policy for ScopeInfo when generating
+// stack traces.
+class PreallocatedStorage : public AllStatic {
+ public:
+ explicit PreallocatedStorage(size_t size);
+ size_t size() { return size_; }
+ static void* New(size_t size);
+ static void Delete(void* p);
+
+ // Preallocate a set number of bytes.
+ static void Init(size_t size);
+
+ private:
+ size_t size_;
+ PreallocatedStorage* previous_;
+ PreallocatedStorage* next_;
+ static bool preallocated_;
+
+ static PreallocatedStorage in_use_list_;
+ static PreallocatedStorage free_list_;
+
+ void LinkTo(PreallocatedStorage* other);
+ void Unlink();
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PreallocatedStorage);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ALLOCATION_H_
diff --git a/src/api.cc b/src/api.cc
new file mode 100644
index 0000000..950cdf3
--- /dev/null
+++ b/src/api.cc
@@ -0,0 +1,2730 @@
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "bootstrapper.h"
+#include "compiler.h"
+#include "debug.h"
+#include "execution.h"
+#include "global-handles.h"
+#include "platform.h"
+#include "serialize.h"
+#include "snapshot.h"
+
+
+namespace i = v8::internal;
+#define LOG_API(expr) LOG(ApiEntryCall(expr))
+
+
+namespace v8 {
+
+
+#define ON_BAILOUT(location, code) \
+ if (IsDeadCheck(location)) { \
+ code; \
+ UNREACHABLE(); \
+ }
+
+
+#define EXCEPTION_PREAMBLE() \
+ thread_local.IncrementCallDepth(); \
+ ASSERT(!i::Top::external_caught_exception()); \
+ bool has_pending_exception = false
+
+
+#define EXCEPTION_BAILOUT_CHECK(value) \
+ do { \
+ thread_local.DecrementCallDepth(); \
+ if (has_pending_exception) { \
+ if (thread_local.CallDepthIsZero() && i::Top::is_out_of_memory()) { \
+ if (!thread_local.IgnoreOutOfMemory()) \
+ i::V8::FatalProcessOutOfMemory(NULL); \
+ } \
+ bool call_depth_is_zero = thread_local.CallDepthIsZero(); \
+ i::Top::optional_reschedule_exception(call_depth_is_zero); \
+ return value; \
+ } \
+ } while (false)
+
+
+// --- D a t a t h a t i s s p e c i f i c t o a t h r e a d ---
+
+
+static i::HandleScopeImplementer thread_local;
+
+
+// --- E x c e p t i o n B e h a v i o r ---
+
+
+static bool has_shut_down = false;
+static FatalErrorCallback exception_behavior = NULL;
+
+
+static void DefaultFatalErrorHandler(const char* location,
+ const char* message) {
+ API_Fatal(location, message);
+}
+
+
+
+static FatalErrorCallback& GetFatalErrorHandler() {
+ if (exception_behavior == NULL) {
+ exception_behavior = DefaultFatalErrorHandler;
+ }
+ return exception_behavior;
+}
+
+
+
+// When V8 cannot allocated memory FatalProcessOutOfMemory is called.
+// The default fatal error handler is called and execution is stopped.
+void i::V8::FatalProcessOutOfMemory(const char* location) {
+ has_shut_down = true;
+ FatalErrorCallback callback = GetFatalErrorHandler();
+ callback(location, "Allocation failed - process out of memory");
+ // If the callback returns, we stop execution.
+ UNREACHABLE();
+}
+
+
+void V8::SetFatalErrorHandler(FatalErrorCallback that) {
+ exception_behavior = that;
+}
+
+
+bool Utils::ReportApiFailure(const char* location, const char* message) {
+ FatalErrorCallback callback = GetFatalErrorHandler();
+ callback(location, message);
+ has_shut_down = true;
+ return false;
+}
+
+
+bool V8::IsDead() {
+ return has_shut_down;
+}
+
+
+static inline bool ApiCheck(bool condition,
+ const char* location,
+ const char* message) {
+ return condition ? true : Utils::ReportApiFailure(location, message);
+}
+
+
+static bool ReportV8Dead(const char* location) {
+ FatalErrorCallback callback = GetFatalErrorHandler();
+ callback(location, "V8 is no longer useable");
+ return true;
+}
+
+
+static bool ReportEmptyHandle(const char* location) {
+ FatalErrorCallback callback = GetFatalErrorHandler();
+ callback(location, "Reading from empty handle");
+ return true;
+}
+
+
+/**
+ * IsDeadCheck checks that the vm is useable. If, for instance, the vm has been
+ * out of memory at some point this check will fail. It should be called on
+ * entry to all methods that touch anything in the heap, except destructors
+ * which you sometimes can't avoid calling after the vm has crashed. Functions
+ * that call EnsureInitialized or ON_BAILOUT don't have to also call
+ * IsDeadCheck. ON_BAILOUT has the advantage over EnsureInitialized that you
+ * can arrange to return if the VM is dead. This is needed to ensure that no VM
+ * heap allocations are attempted on a dead VM. EnsureInitialized has the
+ * advantage over ON_BAILOUT that it actually initializes the VM if this has not
+ * yet been done.
+ */
+static inline bool IsDeadCheck(const char* location) {
+ return has_shut_down ? ReportV8Dead(location) : false;
+}
+
+
+static inline bool EmptyCheck(const char* location, v8::Handle<v8::Data> obj) {
+ return obj.IsEmpty() ? ReportEmptyHandle(location) : false;
+}
+
+
+static inline bool EmptyCheck(const char* location, v8::Data* obj) {
+ return (obj == 0) ? ReportEmptyHandle(location) : false;
+}
+
+// --- S t a t i c s ---
+
+
+static i::StringInputBuffer write_input_buffer;
+
+
+static void EnsureInitialized(const char* location) {
+ if (IsDeadCheck(location)) return;
+ ApiCheck(v8::V8::Initialize(), location, "Error initializing V8");
+}
+
+
+v8::Handle<v8::Primitive> ImplementationUtilities::Undefined() {
+ if (IsDeadCheck("v8::Undefined()")) return v8::Handle<v8::Primitive>();
+ EnsureInitialized("v8::Undefined()");
+ return v8::Handle<Primitive>(ToApi<Primitive>(i::Factory::undefined_value()));
+}
+
+
+v8::Handle<v8::Primitive> ImplementationUtilities::Null() {
+ if (IsDeadCheck("v8::Null()")) return v8::Handle<v8::Primitive>();
+ EnsureInitialized("v8::Null()");
+ return v8::Handle<Primitive>(ToApi<Primitive>(i::Factory::null_value()));
+}
+
+
+v8::Handle<v8::Boolean> ImplementationUtilities::True() {
+ if (IsDeadCheck("v8::True()")) return v8::Handle<v8::Boolean>();
+ EnsureInitialized("v8::True()");
+ return v8::Handle<v8::Boolean>(ToApi<Boolean>(i::Factory::true_value()));
+}
+
+
+v8::Handle<v8::Boolean> ImplementationUtilities::False() {
+ if (IsDeadCheck("v8::False()")) return v8::Handle<v8::Boolean>();
+ EnsureInitialized("v8::False()");
+ return v8::Handle<v8::Boolean>(ToApi<Boolean>(i::Factory::false_value()));
+}
+
+
+void V8::SetFlagsFromString(const char* str, int length) {
+ i::FlagList::SetFlagsFromString(str, length);
+}
+
+
+v8::Handle<Value> ThrowException(v8::Handle<v8::Value> value) {
+ if (IsDeadCheck("v8::ThrowException()")) return v8::Handle<Value>();
+ i::Top::ScheduleThrow(*Utils::OpenHandle(*value));
+ return v8::Undefined();
+}
+
+
+RegisteredExtension* RegisteredExtension::first_extension_ = NULL;
+
+
+RegisteredExtension::RegisteredExtension(Extension* extension)
+ : extension_(extension), state_(UNVISITED) { }
+
+
+void RegisteredExtension::Register(RegisteredExtension* that) {
+ that->next_ = RegisteredExtension::first_extension_;
+ RegisteredExtension::first_extension_ = that;
+}
+
+
+void RegisterExtension(Extension* that) {
+ RegisteredExtension* extension = new RegisteredExtension(that);
+ RegisteredExtension::Register(extension);
+}
+
+
+Extension::Extension(const char* name,
+ const char* source,
+ int dep_count,
+ const char** deps)
+ : name_(name),
+ source_(source),
+ dep_count_(dep_count),
+ deps_(deps),
+ auto_enable_(false) { }
+
+
+v8::Handle<Primitive> Undefined() {
+ LOG_API("Undefined");
+ return ImplementationUtilities::Undefined();
+}
+
+
+v8::Handle<Primitive> Null() {
+ LOG_API("Null");
+ return ImplementationUtilities::Null();
+}
+
+
+v8::Handle<Boolean> True() {
+ LOG_API("True");
+ return ImplementationUtilities::True();
+}
+
+
+v8::Handle<Boolean> False() {
+ LOG_API("False");
+ return ImplementationUtilities::False();
+}
+
+
+ResourceConstraints::ResourceConstraints()
+ : max_young_space_size_(0),
+ max_old_space_size_(0),
+ stack_limit_(NULL) { }
+
+
+bool SetResourceConstraints(ResourceConstraints* constraints) {
+ bool result = i::Heap::ConfigureHeap(constraints->max_young_space_size(),
+ constraints->max_old_space_size());
+ if (!result) return false;
+ if (constraints->stack_limit() != NULL) {
+ uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
+ i::StackGuard::SetStackLimit(limit);
+ }
+ return true;
+}
+
+
+void** V8::GlobalizeReference(void** obj) {
+ LOG_API("Persistent::New");
+ if (IsDeadCheck("V8::Persistent::New")) return NULL;
+ i::Handle<i::Object> result =
+ i::GlobalHandles::Create(*reinterpret_cast<i::Object**>(obj));
+ return reinterpret_cast<void**>(result.location());
+}
+
+
+void V8::MakeWeak(void** object, void* parameters,
+ WeakReferenceCallback callback) {
+ LOG_API("MakeWeak");
+ i::GlobalHandles::MakeWeak(reinterpret_cast<i::Object**>(object), parameters,
+ callback);
+}
+
+
+void V8::ClearWeak(void** obj) {
+ LOG_API("ClearWeak");
+ i::GlobalHandles::ClearWeakness(reinterpret_cast<i::Object**>(obj));
+}
+
+
+bool V8::IsGlobalNearDeath(void** obj) {
+ LOG_API("IsGlobalNearDeath");
+ if (has_shut_down) return false;
+ return i::GlobalHandles::IsNearDeath(reinterpret_cast<i::Object**>(obj));
+}
+
+
+bool V8::IsGlobalWeak(void** obj) {
+ LOG_API("IsGlobalWeak");
+ if (has_shut_down) return false;
+ return i::GlobalHandles::IsWeak(reinterpret_cast<i::Object**>(obj));
+}
+
+
+void V8::DisposeGlobal(void** obj) {
+ LOG_API("DisposeGlobal");
+ if (has_shut_down) return;
+ i::GlobalHandles::Destroy(reinterpret_cast<i::Object**>(obj));
+}
+
+// --- H a n d l e s ---
+
+
+HandleScope::Data HandleScope::current_ = { -1, NULL, NULL };
+
+
+int HandleScope::NumberOfHandles() {
+ int n = thread_local.Blocks()->length();
+ if (n == 0) return 0;
+ return ((n - 1) * i::kHandleBlockSize) +
+ (current_.next - thread_local.Blocks()->last());
+}
+
+
+void** v8::HandleScope::CreateHandle(void* value) {
+ void** result = current_.next;
+ if (result == current_.limit) {
+ // Make sure there's at least one scope on the stack and that the
+ // top of the scope stack isn't a barrier.
+ if (!ApiCheck(current_.extensions >= 0,
+ "v8::HandleScope::CreateHandle()",
+ "Cannot create a handle without a HandleScope")) {
+ return NULL;
+ }
+ // If there's more room in the last block, we use that. This is used
+ // for fast creation of scopes after scope barriers.
+ if (!thread_local.Blocks()->is_empty()) {
+ void** limit = &thread_local.Blocks()->last()[i::kHandleBlockSize];
+ if (current_.limit != limit) {
+ current_.limit = limit;
+ }
+ }
+
+ // If we still haven't found a slot for the handle, we extend the
+ // current handle scope by allocating a new handle block.
+ if (result == current_.limit) {
+ // If there's a spare block, use it for growing the current scope.
+ result = thread_local.GetSpareOrNewBlock();
+ // Add the extension to the global list of blocks, but count the
+ // extension as part of the current scope.
+ thread_local.Blocks()->Add(result);
+ current_.extensions++;
+ current_.limit = &result[i::kHandleBlockSize];
+ }
+ }
+
+ // Update the current next field, set the value in the created
+ // handle, and return the result.
+ ASSERT(result < current_.limit);
+ current_.next = result + 1;
+ *result = value;
+ return result;
+}
+
+
+void Context::Enter() {
+ if (IsDeadCheck("v8::Context::Enter()")) return;
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+
+ thread_local.AddEnteredContext(i::GlobalHandles::Create(i::Top::context()));
+ i::Top::set_context(*env);
+
+ thread_local.AddSecurityContext(
+ i::GlobalHandles::Create(i::Top::security_context()));
+ i::Top::set_security_context(*env);
+}
+
+
+void Context::Exit() {
+ if (has_shut_down) return;
+
+ // Content of 'last_context' and 'last_security_context' could be NULL.
+ i::Handle<i::Object> last_context = thread_local.RemoveLastEnteredContext();
+ i::Top::set_context(static_cast<i::Context*>(*last_context));
+ i::GlobalHandles::Destroy(last_context.location());
+
+ i::Handle<i::Object> last_security_context =
+ thread_local.RemoveLastSecurityContext();
+ i::Top::set_security_context(
+ static_cast<i::Context*>(*last_security_context));
+ i::GlobalHandles::Destroy(last_security_context.location());
+}
+
+
+void v8::HandleScope::DeleteExtensions() {
+ ASSERT(current_.extensions != 0);
+ thread_local.DeleteExtensions(current_.extensions);
+}
+
+
+#ifdef DEBUG
+void HandleScope::ZapRange(void** start, void** end) {
+ if (start == NULL) return;
+ for (void** p = start; p < end; p++) {
+ *p = reinterpret_cast<void*>(v8::internal::kHandleZapValue);
+ }
+}
+#endif
+
+
+void** v8::HandleScope::RawClose(void** value) {
+ if (!ApiCheck(!is_closed_,
+ "v8::HandleScope::Close()",
+ "Local scope has already been closed")) {
+ return 0;
+ }
+ LOG_API("CloseHandleScope");
+
+ // Read the result before popping the handle block.
+ i::Object* result = reinterpret_cast<i::Object*>(*value);
+ is_closed_ = true;
+ RestorePreviousState();
+
+ // Allocate a new handle on the previous handle block.
+ i::Handle<i::Object> handle(result);
+ return reinterpret_cast<void**>(handle.location());
+}
+
+
+// --- N e a n d e r ---
+
+
+// A constructor cannot easily return an error value, therefore it is necessary
+// to check for a dead VM with ON_BAILOUT before constructing any Neander
+// objects. To remind you about this there is no HandleScope in the
+// NeanderObject constructor. When you add one to the site calling the
+// constructor you should check that you ensured the VM was not dead first.
+NeanderObject::NeanderObject(int size) {
+ EnsureInitialized("v8::Nowhere");
+ value_ = i::Factory::NewNeanderObject();
+ i::Handle<i::FixedArray> elements = i::Factory::NewFixedArray(size);
+ value_->set_elements(*elements);
+}
+
+
+int NeanderObject::size() {
+ return i::FixedArray::cast(value_->elements())->length();
+}
+
+
+NeanderArray::NeanderArray() : obj_(2) {
+ obj_.set(0, i::Smi::FromInt(0));
+}
+
+
+int NeanderArray::length() {
+ return i::Smi::cast(obj_.get(0))->value();
+}
+
+
+i::Object* NeanderArray::get(int offset) {
+ ASSERT(0 <= offset);
+ ASSERT(offset < length());
+ return obj_.get(offset + 1);
+}
+
+
+// This method cannot easily return an error value, therefore it is necessary
+// to check for a dead VM with ON_BAILOUT before calling it. To remind you
+// about this there is no HandleScope in this method. When you add one to the
+// site calling this method you should check that you ensured the VM was not
+// dead first.
+void NeanderArray::add(i::Handle<i::Object> value) {
+ int length = this->length();
+ int size = obj_.size();
+ if (length == size - 1) {
+ i::Handle<i::FixedArray> new_elms = i::Factory::NewFixedArray(2 * size);
+ for (int i = 0; i < length; i++)
+ new_elms->set(i + 1, get(i));
+ obj_.value()->set_elements(*new_elms);
+ }
+ obj_.set(length + 1, *value);
+ obj_.set(0, i::Smi::FromInt(length + 1));
+}
+
+
+void NeanderArray::set(int index, i::Object* value) {
+ if (index < 0 || index >= this->length()) return;
+ obj_.set(index + 1, value);
+}
+
+
+// --- T e m p l a t e ---
+
+
+static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
+ that->set_tag(i::Smi::FromInt(type));
+}
+
+
+void Template::Set(v8::Handle<String> name, v8::Handle<Data> value,
+ v8::PropertyAttribute attribute) {
+ if (IsDeadCheck("v8::Template::SetProperty()")) return;
+ HandleScope scope;
+ i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list());
+ if (list->IsUndefined()) {
+ list = NeanderArray().value();
+ Utils::OpenHandle(this)->set_property_list(*list);
+ }
+ NeanderArray array(list);
+ array.add(Utils::OpenHandle(*name));
+ array.add(Utils::OpenHandle(*value));
+ array.add(Utils::OpenHandle(*v8::Integer::New(attribute)));
+}
+
+
+// --- F u n c t i o n T e m p l a t e ---
+static void InitializeFunctionTemplate(
+ i::Handle<i::FunctionTemplateInfo> info) {
+ info->set_tag(i::Smi::FromInt(Consts::FUNCTION_TEMPLATE));
+ info->set_flag(0);
+}
+
+
+int FunctionTemplate::InternalFieldCount() {
+ if (IsDeadCheck("v8::FunctionTemplate::InternalFieldCount()")) {
+ return 0;
+ }
+ return i::Smi::cast(Utils::OpenHandle(this)->internal_field_count())->value();
+}
+
+
+Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
+ if (IsDeadCheck("v8::FunctionTemplate::PrototypeTemplate()")) {
+ return Local<ObjectTemplate>();
+ }
+ i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template());
+ if (result->IsUndefined()) {
+ result = Utils::OpenHandle(*ObjectTemplate::New());
+ Utils::OpenHandle(this)->set_prototype_template(*result);
+ }
+ return Local<ObjectTemplate>(ToApi<ObjectTemplate>(result));
+}
+
+
+void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
+ if (IsDeadCheck("v8::FunctionTemplate::Inherit()")) return;
+ Utils::OpenHandle(this)->set_parent_template(*Utils::OpenHandle(*value));
+}
+
+
+void FunctionTemplate::SetInternalFieldCount(int value) {
+ if (IsDeadCheck("v8::FunctionTemplate::SetInternalFieldCount()")) return;
+ ApiCheck(i::Smi::IsValid(value),
+ "v8::FunctionTemplate::SetInternalFieldCount()",
+ "Invalid internal field count");
+ Utils::OpenHandle(this)->set_internal_field_count(i::Smi::FromInt(value));
+}
+
+
+// To distinguish the function templates, so that we can find them in the
+// function cache of the global context.
+static int next_serial_number = 0;
+
+
+Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback,
+ v8::Handle<Value> data, v8::Handle<Signature> signature) {
+ EnsureInitialized("v8::FunctionTemplate::New()");
+ LOG_API("FunctionTemplate::New");
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
+ i::Handle<i::FunctionTemplateInfo> obj =
+ i::Handle<i::FunctionTemplateInfo>::cast(struct_obj);
+ InitializeFunctionTemplate(obj);
+ obj->set_serial_number(i::Smi::FromInt(next_serial_number++));
+ obj->set_internal_field_count(i::Smi::FromInt(0));
+ if (callback != 0) {
+ if (data.IsEmpty()) data = v8::Undefined();
+ Utils::ToLocal(obj)->SetCallHandler(callback, data);
+ }
+ obj->set_undetectable(false);
+ obj->set_needs_access_check(false);
+
+ if (!signature.IsEmpty())
+ obj->set_signature(*Utils::OpenHandle(*signature));
+ return Utils::ToLocal(obj);
+}
+
+
+Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
+ int argc, Handle<FunctionTemplate> argv[]) {
+ EnsureInitialized("v8::Signature::New()");
+ LOG_API("Signature::New");
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::SIGNATURE_INFO_TYPE);
+ i::Handle<i::SignatureInfo> obj =
+ i::Handle<i::SignatureInfo>::cast(struct_obj);
+ if (!receiver.IsEmpty()) obj->set_receiver(*Utils::OpenHandle(*receiver));
+ if (argc > 0) {
+ i::Handle<i::FixedArray> args = i::Factory::NewFixedArray(argc);
+ for (int i = 0; i < argc; i++) {
+ if (!argv[i].IsEmpty())
+ args->set(i, *Utils::OpenHandle(*argv[i]));
+ }
+ obj->set_args(*args);
+ }
+ return Utils::ToLocal(obj);
+}
+
+
+Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) {
+ Handle<FunctionTemplate> types[1] = { type };
+ return TypeSwitch::New(1, types);
+}
+
+
+Local<TypeSwitch> TypeSwitch::New(int argc, Handle<FunctionTemplate> types[]) {
+ EnsureInitialized("v8::TypeSwitch::New()");
+ LOG_API("TypeSwitch::New");
+ i::Handle<i::FixedArray> vector = i::Factory::NewFixedArray(argc);
+ for (int i = 0; i < argc; i++)
+ vector->set(i, *Utils::OpenHandle(*types[i]));
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::TYPE_SWITCH_INFO_TYPE);
+ i::Handle<i::TypeSwitchInfo> obj =
+ i::Handle<i::TypeSwitchInfo>::cast(struct_obj);
+ obj->set_types(*vector);
+ return Utils::ToLocal(obj);
+}
+
+
+int TypeSwitch::match(v8::Handle<Value> value) {
+ LOG_API("TypeSwitch::match");
+ i::Handle<i::Object> obj = Utils::OpenHandle(*value);
+ i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
+ i::FixedArray* types = i::FixedArray::cast(info->types());
+ for (int i = 0; i < types->length(); i++) {
+ if (obj->IsInstanceOf(i::FunctionTemplateInfo::cast(types->get(i))))
+ return i + 1;
+ }
+ return 0;
+}
+
+
+void FunctionTemplate::SetCallHandler(InvocationCallback callback,
+ v8::Handle<Value> data) {
+ if (IsDeadCheck("v8::FunctionTemplate::SetCallHandler()")) return;
+ HandleScope scope;
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
+ i::Handle<i::CallHandlerInfo> obj =
+ i::Handle<i::CallHandlerInfo>::cast(struct_obj);
+ obj->set_callback(*FromCData(callback));
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ Utils::OpenHandle(this)->set_call_code(*obj);
+}
+
+
+void FunctionTemplate::SetLookupHandler(LookupCallback handler) {
+ if (IsDeadCheck("v8::FunctionTemplate::SetLookupHandler()")) return;
+ HandleScope scope;
+ Utils::OpenHandle(this)->set_lookup_callback(*FromCData(handler));
+}
+
+
+void FunctionTemplate::AddInstancePropertyAccessor(
+ v8::Handle<String> name,
+ AccessorGetter getter,
+ AccessorSetter setter,
+ v8::Handle<Value> data,
+ v8::AccessControl settings,
+ v8::PropertyAttribute attributes) {
+ if (IsDeadCheck("v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
+ return;
+ }
+ HandleScope scope;
+ i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo();
+ ASSERT(getter != NULL);
+ obj->set_getter(*FromCData(getter));
+ obj->set_setter(*FromCData(setter));
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ obj->set_name(*Utils::OpenHandle(*name));
+ if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
+ if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
+ obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
+
+ i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors());
+ if (list->IsUndefined()) {
+ list = NeanderArray().value();
+ Utils::OpenHandle(this)->set_property_accessors(*list);
+ }
+ NeanderArray array(list);
+ array.add(obj);
+}
+
+
+Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
+ if (IsDeadCheck("v8::FunctionTemplate::InstanceTemplate()")
+ || EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
+ return Local<ObjectTemplate>();
+ if (Utils::OpenHandle(this)->instance_template()->IsUndefined()) {
+ Local<ObjectTemplate> templ =
+ ObjectTemplate::New(v8::Handle<FunctionTemplate>(this));
+ Utils::OpenHandle(this)->set_instance_template(*Utils::OpenHandle(*templ));
+ }
+ i::Handle<i::ObjectTemplateInfo> result(i::ObjectTemplateInfo::cast(
+ Utils::OpenHandle(this)->instance_template()));
+ return Utils::ToLocal(result);
+}
+
+
+// --- O b j e c t T e m p l a t e ---
+
+
+Local<ObjectTemplate> ObjectTemplate::New() {
+ return New(Local<FunctionTemplate>());
+}
+
+
+Local<ObjectTemplate> ObjectTemplate::New(
+ v8::Handle<FunctionTemplate> constructor) {
+ if (IsDeadCheck("v8::ObjectTemplate::New()")) return Local<ObjectTemplate>();
+ EnsureInitialized("v8::ObjectTemplate::New()");
+ LOG_API("ObjectTemplate::New");
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::OBJECT_TEMPLATE_INFO_TYPE);
+ i::Handle<i::ObjectTemplateInfo> obj =
+ i::Handle<i::ObjectTemplateInfo>::cast(struct_obj);
+ InitializeTemplate(obj, Consts::OBJECT_TEMPLATE);
+ if (!constructor.IsEmpty())
+ obj->set_constructor(*Utils::OpenHandle(*constructor));
+ return Utils::ToLocal(obj);
+}
+
+
+// Ensure that the object template has a constructor. If no
+// constructor is available we create one.
+static void EnsureConstructor(ObjectTemplate* object_template) {
+ if (Utils::OpenHandle(object_template)->constructor()->IsUndefined()) {
+ Local<FunctionTemplate> templ = FunctionTemplate::New();
+ i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
+ constructor->set_instance_template(*Utils::OpenHandle(object_template));
+ Utils::OpenHandle(object_template)->set_constructor(*constructor);
+ }
+}
+
+
+void ObjectTemplate::SetAccessor(v8::Handle<String> name,
+ AccessorGetter getter,
+ AccessorSetter setter,
+ v8::Handle<Value> data,
+ AccessControl settings,
+ PropertyAttribute attribute) {
+ if (IsDeadCheck("v8::ObjectTemplate::SetAccessor()")) return;
+ HandleScope scope;
+ EnsureConstructor(this);
+ i::FunctionTemplateInfo* constructor =
+ i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ i::Handle<i::FunctionTemplateInfo> cons(constructor);
+ Utils::ToLocal(cons)->AddInstancePropertyAccessor(name,
+ getter,
+ setter,
+ data,
+ settings,
+ attribute);
+}
+
+
+void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
+ NamedPropertySetter setter,
+ NamedPropertyQuery query,
+ NamedPropertyDeleter remover,
+ NamedPropertyEnumerator enumerator,
+ Handle<Value> data) {
+ if (IsDeadCheck("v8::ObjectTemplate::SetNamedPropertyHandler()")) return;
+ HandleScope scope;
+ EnsureConstructor(this);
+ i::FunctionTemplateInfo* constructor =
+ i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ i::Handle<i::FunctionTemplateInfo> cons(constructor);
+ Utils::ToLocal(cons)->SetNamedInstancePropertyHandler(getter,
+ setter,
+ query,
+ remover,
+ enumerator,
+ data);
+}
+
+
+void ObjectTemplate::MarkAsUndetectable() {
+ if (IsDeadCheck("v8::ObjectTemplate::MarkAsUndetectable()")) return;
+ HandleScope scope;
+ EnsureConstructor(this);
+ i::FunctionTemplateInfo* constructor =
+ i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ i::Handle<i::FunctionTemplateInfo> cons(constructor);
+ cons->set_undetectable(true);
+}
+
+
+void ObjectTemplate::SetAccessCheckCallbacks(
+ NamedSecurityCallback named_callback,
+ IndexedSecurityCallback indexed_callback,
+ Handle<Value> data) {
+ if (IsDeadCheck("v8::ObjectTemplate::SetAccessCheckCallbacks()")) return;
+ HandleScope scope;
+ EnsureConstructor(this);
+
+ i::Handle<i::Struct> struct_info =
+ i::Factory::NewStruct(i::ACCESS_CHECK_INFO_TYPE);
+ i::Handle<i::AccessCheckInfo> info =
+ i::Handle<i::AccessCheckInfo>::cast(struct_info);
+ info->set_named_callback(*FromCData(named_callback));
+ info->set_indexed_callback(*FromCData(indexed_callback));
+ if (data.IsEmpty()) data = v8::Undefined();
+ info->set_data(*Utils::OpenHandle(*data));
+
+ i::FunctionTemplateInfo* constructor =
+ i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ i::Handle<i::FunctionTemplateInfo> cons(constructor);
+ cons->set_needs_access_check(true);
+ cons->set_access_check_info(*info);
+}
+
+
+void ObjectTemplate::SetIndexedPropertyHandler(
+ IndexedPropertyGetter getter,
+ IndexedPropertySetter setter,
+ IndexedPropertyQuery query,
+ IndexedPropertyDeleter remover,
+ IndexedPropertyEnumerator enumerator,
+ Handle<Value> data) {
+ if (IsDeadCheck("v8::ObjectTemplate::SetIndexedPropertyHandler()")) return;
+ HandleScope scope;
+ EnsureConstructor(this);
+ i::FunctionTemplateInfo* constructor =
+ i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ i::Handle<i::FunctionTemplateInfo> cons(constructor);
+ Utils::ToLocal(cons)->SetIndexedInstancePropertyHandler(getter,
+ setter,
+ query,
+ remover,
+ enumerator,
+ data);
+}
+
+
+void ObjectTemplate::SetCallAsFunctionHandler(InvocationCallback callback,
+ Handle<Value> data) {
+ if (IsDeadCheck("v8::ObjectTemplate::SetCallAsFunctionHandler()")) return;
+ HandleScope scope;
+ EnsureConstructor(this);
+ i::FunctionTemplateInfo* constructor =
+ i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ i::Handle<i::FunctionTemplateInfo> cons(constructor);
+ Utils::ToLocal(cons)->SetInstanceCallAsFunctionHandler(callback, data);
+}
+
+
+void FunctionTemplate::SetClassName(Handle<String> name) {
+ if (IsDeadCheck("v8::FunctionTemplate::SetClassName()")) return;
+ Utils::OpenHandle(this)->set_class_name(*Utils::OpenHandle(*name));
+}
+
+
+void FunctionTemplate::SetHiddenPrototype(bool value) {
+ if (IsDeadCheck("v8::FunctionTemplate::SetHiddenPrototype()")) return;
+ Utils::OpenHandle(this)->set_hidden_prototype(value);
+}
+
+
+void FunctionTemplate::SetNamedInstancePropertyHandler(
+ NamedPropertyGetter getter,
+ NamedPropertySetter setter,
+ NamedPropertyQuery query,
+ NamedPropertyDeleter remover,
+ NamedPropertyEnumerator enumerator,
+ Handle<Value> data) {
+ if (IsDeadCheck("v8::FunctionTemplate::SetNamedInstancePropertyHandler()")) {
+ return;
+ }
+ HandleScope scope;
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
+ i::Handle<i::InterceptorInfo> obj =
+ i::Handle<i::InterceptorInfo>::cast(struct_obj);
+ if (getter != 0) obj->set_getter(*FromCData(getter));
+ if (setter != 0) obj->set_setter(*FromCData(setter));
+ if (query != 0) obj->set_query(*FromCData(query));
+ if (remover != 0) obj->set_deleter(*FromCData(remover));
+ if (enumerator != 0) obj->set_enumerator(*FromCData(enumerator));
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ Utils::OpenHandle(this)->set_named_property_handler(*obj);
+}
+
+
+void FunctionTemplate::SetIndexedInstancePropertyHandler(
+ IndexedPropertyGetter getter,
+ IndexedPropertySetter setter,
+ IndexedPropertyQuery query,
+ IndexedPropertyDeleter remover,
+ IndexedPropertyEnumerator enumerator,
+ Handle<Value> data) {
+ if (IsDeadCheck(
+ "v8::FunctionTemplate::SetIndexedInstancePropertyHandler()")) {
+ return;
+ }
+ HandleScope scope;
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
+ i::Handle<i::InterceptorInfo> obj =
+ i::Handle<i::InterceptorInfo>::cast(struct_obj);
+ if (getter != 0) obj->set_getter(*FromCData(getter));
+ if (setter != 0) obj->set_setter(*FromCData(setter));
+ if (query != 0) obj->set_query(*FromCData(query));
+ if (remover != 0) obj->set_deleter(*FromCData(remover));
+ if (enumerator != 0) obj->set_enumerator(*FromCData(enumerator));
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ Utils::OpenHandle(this)->set_indexed_property_handler(*obj);
+}
+
+
+void FunctionTemplate::SetInstanceCallAsFunctionHandler(
+ InvocationCallback callback,
+ Handle<Value> data) {
+ if (IsDeadCheck("v8::FunctionTemplate::SetInstanceCallAsFunctionHandler()")) {
+ return;
+ }
+ HandleScope scope;
+ i::Handle<i::Struct> struct_obj =
+ i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
+ i::Handle<i::CallHandlerInfo> obj =
+ i::Handle<i::CallHandlerInfo>::cast(struct_obj);
+ obj->set_callback(*FromCData(callback));
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ Utils::OpenHandle(this)->set_instance_call_handler(*obj);
+}
+
+
+ScriptData* ScriptData::PreCompile(const char* input, int length) {
+ unibrow::Utf8InputBuffer<> buf(input, length);
+ return i::PreParse(&buf, NULL);
+}
+
+
+ScriptData* ScriptData::New(unsigned* data, int length) {
+ return new i::ScriptDataImpl(i::Vector<unsigned>(data, length));
+}
+
+
+// --- S c r i p t ---
+
+
+Local<Script> Script::Compile(v8::Handle<String> source,
+ v8::ScriptOrigin* origin,
+ v8::ScriptData* script_data) {
+ ON_BAILOUT("v8::Script::Compile()", return Local<Script>());
+ LOG_API("Script::Compile");
+ i::Handle<i::String> str = Utils::OpenHandle(*source);
+ i::Handle<i::String> name_obj;
+ int line_offset = 0;
+ int column_offset = 0;
+ if (origin != NULL) {
+ if (!origin->ResourceName().IsEmpty()) {
+ name_obj = Utils::OpenHandle(*origin->ResourceName());
+ }
+ if (!origin->ResourceLineOffset().IsEmpty()) {
+ line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
+ }
+ if (!origin->ResourceColumnOffset().IsEmpty()) {
+ column_offset = static_cast<int>(origin->ResourceColumnOffset()->Value());
+ }
+ }
+ EXCEPTION_PREAMBLE();
+ i::ScriptDataImpl* pre_data = static_cast<i::ScriptDataImpl*>(script_data);
+ // We assert that the pre-data is sane, even though we can actually
+ // handle it if it turns out not to be in release mode.
+ ASSERT(pre_data == NULL || pre_data->SanityCheck());
+ // If the pre-data isn't sane we simply ignore it
+ if (pre_data != NULL && !pre_data->SanityCheck())
+ pre_data = NULL;
+ i::Handle<i::JSFunction> boilerplate = i::Compiler::Compile(str,
+ name_obj,
+ line_offset,
+ column_offset,
+ NULL,
+ pre_data);
+ has_pending_exception = boilerplate.is_null();
+ EXCEPTION_BAILOUT_CHECK(Local<Script>());
+ i::Handle<i::JSFunction> result =
+ i::Factory::NewFunctionFromBoilerplate(boilerplate,
+ i::Top::global_context());
+ return Local<Script>(ToApi<Script>(result));
+}
+
+
+Local<Value> Script::Run() {
+ ON_BAILOUT("v8::Script::Run()", return Local<Value>());
+ LOG_API("Script::Run");
+ i::Object* raw_result = NULL;
+ {
+ HandleScope scope;
+ i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> global(i::Top::context()->global());
+ i::Handle<i::Object> result =
+ i::Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<Value>());
+ raw_result = *result;
+ }
+ i::Handle<i::Object> result(raw_result);
+ return Utils::ToLocal(result);
+}
+
+
+// --- E x c e p t i o n s ---
+
+
+v8::TryCatch::TryCatch()
+ : next_(i::Top::try_catch_handler()),
+ exception_(i::Heap::the_hole_value()),
+ is_verbose_(false) {
+ i::Top::RegisterTryCatchHandler(this);
+}
+
+
+v8::TryCatch::~TryCatch() {
+ i::Top::UnregisterTryCatchHandler(this);
+}
+
+
+bool v8::TryCatch::HasCaught() {
+ return !reinterpret_cast<i::Object*>(exception_)->IsTheHole();
+}
+
+
+v8::Local<Value> v8::TryCatch::Exception() {
+ if (HasCaught()) {
+ // Check for out of memory exception.
+ i::Object* exception = reinterpret_cast<i::Object*>(exception_);
+ return v8::Utils::ToLocal(i::Handle<i::Object>(exception));
+ } else {
+ return v8::Local<Value>();
+ }
+}
+
+
+void v8::TryCatch::Reset() {
+ exception_ = i::Heap::the_hole_value();
+}
+
+
+void v8::TryCatch::SetVerbose(bool value) {
+ is_verbose_ = value;
+}
+
+
+// --- M e s s a g e ---
+
+
+Local<String> Message::Get() {
+ ON_BAILOUT("v8::Message::Get()", return Local<String>());
+ HandleScope scope;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(obj);
+ Local<String> result = Utils::ToLocal(raw_result);
+ return scope.Close(result);
+}
+
+
+v8::Handle<String> Message::GetScriptResourceName() {
+ if (IsDeadCheck("v8::Message::GetScriptResourceName()")) {
+ return Local<String>();
+ }
+ HandleScope scope;
+ i::Handle<i::JSObject> obj =
+ i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
+ // Return this.script.name.
+ i::Handle<i::JSValue> script =
+ i::Handle<i::JSValue>::cast(GetProperty(obj, "script"));
+ i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name());
+ if (!resource_name->IsString()) {
+ return Local<String>();
+ }
+ Local<String> result =
+ Utils::ToLocal(i::Handle<i::String>::cast(resource_name));
+ return scope.Close(result);
+}
+
+
+// TODO(1240903): Remove this when no longer used in WebKit V8 bindings.
+Handle<Value> Message::GetSourceData() {
+ Handle<String> data = GetScriptResourceName();
+ if (data.IsEmpty()) return v8::Undefined();
+ return data;
+}
+
+static i::Handle<i::Object> CallV8HeapFunction(const char* name,
+ i::Handle<i::Object> recv,
+ int argc,
+ i::Object** argv[],
+ bool* has_pending_exception) {
+ i::Handle<i::String> fmt_str = i::Factory::LookupAsciiSymbol(name);
+ i::Object* object_fun = i::Top::builtins()->GetProperty(*fmt_str);
+ i::Handle<i::JSFunction> fun =
+ i::Handle<i::JSFunction>(i::JSFunction::cast(object_fun));
+ i::Handle<i::Object> value =
+ i::Execution::Call(fun, recv, argc, argv, has_pending_exception);
+ return value;
+}
+
+
+static i::Handle<i::Object> CallV8HeapFunction(const char* name,
+ i::Handle<i::Object> data,
+ bool* has_pending_exception) {
+ i::Object** argv[1] = { data.location() };
+ return CallV8HeapFunction(name,
+ i::Top::builtins(),
+ 1,
+ argv,
+ has_pending_exception);
+}
+
+
+int Message::GetLineNumber() {
+ ON_BAILOUT("v8::Message::GetLineNumber()", return -1);
+ HandleScope scope;
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> result = CallV8HeapFunction("GetLineNumber",
+ Utils::OpenHandle(this),
+ &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(0);
+ return static_cast<int>(result->Number());
+}
+
+
+Local<Value> Message::GetSourceLine() {
+ ON_BAILOUT("v8::Message::GetSourceLine()", return Local<Value>());
+ HandleScope scope;
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> result = CallV8HeapFunction("GetSourceLine",
+ Utils::OpenHandle(this),
+ &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<v8::Value>());
+ return scope.Close(Utils::ToLocal(result));
+}
+
+
+char* Message::GetUnderline(char* source_line, char underline_char) {
+ if (IsDeadCheck("v8::Message::GetUnderline()")) return 0;
+ HandleScope scope;
+
+ i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
+ int start_pos = static_cast<int>(GetProperty(data_obj, "startPos")->Number());
+ int end_pos = static_cast<int>(GetProperty(data_obj, "endPos")->Number());
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> start_col_obj = CallV8HeapFunction(
+ "GetPositionInLine",
+ data_obj,
+ &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(0);
+ int start_col = static_cast<int>(start_col_obj->Number());
+ int end_col = start_col + (end_pos - start_pos);
+
+ // Any tabs before or between the selected columns have to be
+ // expanded into spaces. We assume that a tab character advances
+ // the cursor up until the next 8-character boundary and at least
+ // one character.
+ int real_start_col = 0;
+ for (int i = 0; i < start_col; i++) {
+ real_start_col++;
+ if (source_line[i] == '\t') {
+ real_start_col++;
+ while (real_start_col % 8 != 0)
+ real_start_col++;
+ }
+ }
+ int real_end_col = real_start_col;
+ for (int i = start_col; i < end_col; i++) {
+ real_end_col++;
+ if (source_line[i] == '\t') {
+ while (real_end_col % 8 != 0)
+ real_end_col++;
+ }
+ }
+ char* result = i::NewArray<char>(real_end_col + 1);
+ for (int i = 0; i < real_start_col; i++)
+ result[i] = ' ';
+ for (int i = real_start_col; i < real_end_col; i++)
+ result[i] = underline_char;
+ result[real_end_col] = '\0';
+ return result;
+}
+
+
+void Message::PrintCurrentStackTrace(FILE* out) {
+ if (IsDeadCheck("v8::Message::PrintCurrentStackTrace()")) return;
+ i::Top::PrintCurrentStackTrace(out);
+}
+
+
+// --- D a t a ---
+
+bool Value::IsUndefined() {
+ if (IsDeadCheck("v8::Value::IsUndefined()")) return false;
+ return Utils::OpenHandle(this)->IsUndefined();
+}
+
+
+bool Value::IsNull() {
+ if (IsDeadCheck("v8::Value::IsNull()")) return false;
+ return Utils::OpenHandle(this)->IsNull();
+}
+
+
+bool Value::IsTrue() {
+ if (IsDeadCheck("v8::Value::IsTrue()")) return false;
+ return Utils::OpenHandle(this)->IsTrue();
+}
+
+
+bool Value::IsFalse() {
+ if (IsDeadCheck("v8::Value::IsFalse()")) return false;
+ return Utils::OpenHandle(this)->IsFalse();
+}
+
+
+bool Value::IsFunction() {
+ if (IsDeadCheck("v8::Value::IsFunction()")) return false;
+ return Utils::OpenHandle(this)->IsJSFunction();
+}
+
+
+bool Value::IsString() {
+ if (IsDeadCheck("v8::Value::IsString()")) return false;
+ return Utils::OpenHandle(this)->IsString();
+}
+
+
+bool Value::IsArray() {
+ if (IsDeadCheck("v8::Value::IsArray()")) return false;
+ return Utils::OpenHandle(this)->IsJSArray();
+}
+
+
+bool Value::IsObject() {
+ if (IsDeadCheck("v8::Value::IsObject()")) return false;
+ return Utils::OpenHandle(this)->IsJSObject();
+}
+
+
+bool Value::IsNumber() {
+ if (IsDeadCheck("v8::Value::IsNumber()")) return false;
+ return Utils::OpenHandle(this)->IsNumber();
+}
+
+
+bool Value::IsBoolean() {
+ if (IsDeadCheck("v8::Value::IsBoolean()")) return false;
+ return Utils::OpenHandle(this)->IsBoolean();
+}
+
+
+bool Value::IsExternal() {
+ if (IsDeadCheck("v8::Value::IsExternal()")) return false;
+ return Utils::OpenHandle(this)->IsProxy();
+}
+
+
+bool Value::IsInt32() {
+ if (IsDeadCheck("v8::Value::IsInt32()")) return false;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) return true;
+ if (obj->IsNumber()) {
+ double value = obj->Number();
+ return i::FastI2D(i::FastD2I(value)) == value;
+ }
+ return false;
+}
+
+
+Local<String> Value::ToString() {
+ if (IsDeadCheck("v8::Value::ToString()")) return Local<String>();
+ LOG_API("ToString");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> str;
+ if (obj->IsString()) {
+ str = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ str = i::Execution::ToString(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<String>());
+ }
+ return Local<String>(ToApi<String>(str));
+}
+
+
+Local<String> Value::ToDetailString() {
+ if (IsDeadCheck("v8::Value::ToDetailString()")) return Local<String>();
+ LOG_API("ToDetailString");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> str;
+ if (obj->IsString()) {
+ str = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ str = i::Execution::ToDetailString(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<String>());
+ }
+ return Local<String>(ToApi<String>(str));
+}
+
+
+Local<v8::Object> Value::ToObject() {
+ if (IsDeadCheck("v8::Value::ToObject()")) return Local<v8::Object>();
+ LOG_API("ToObject");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> val;
+ if (obj->IsJSObject()) {
+ val = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ val = i::Execution::ToObject(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<v8::Object>());
+ }
+ return Local<v8::Object>(ToApi<Object>(val));
+}
+
+
+Local<Boolean> Value::ToBoolean() {
+ if (IsDeadCheck("v8::Value::ToBoolean()")) return Local<Boolean>();
+ LOG_API("ToBoolean");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> val =
+ obj->IsBoolean() ? obj : i::Execution::ToBoolean(obj);
+ return Local<Boolean>(ToApi<Boolean>(val));
+}
+
+
+Local<Number> Value::ToNumber() {
+ if (IsDeadCheck("v8::Value::ToNumber()")) return Local<Number>();
+ LOG_API("ToNumber");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> num;
+ if (obj->IsNumber()) {
+ num = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ num = i::Execution::ToNumber(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<Number>());
+ }
+ return Local<Number>(ToApi<Number>(num));
+}
+
+
+Local<Integer> Value::ToInteger() {
+ if (IsDeadCheck("v8::Value::ToInteger()")) return Local<Integer>();
+ LOG_API("ToInteger");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> num;
+ if (obj->IsSmi()) {
+ num = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ num = i::Execution::ToInteger(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<Integer>());
+ }
+ return Local<Integer>(ToApi<Integer>(num));
+}
+
+
+External* External::Cast(v8::Value* that) {
+ if (IsDeadCheck("v8::External::Cast()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsProxy(),
+ "v8::External::Cast()",
+ "Could not convert to external");
+ return static_cast<External*>(that);
+}
+
+
+v8::Object* v8::Object::Cast(Value* that) {
+ if (IsDeadCheck("v8::Object::Cast()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsJSObject(),
+ "v8::Object::Cast()",
+ "Could not convert to object");
+ return static_cast<v8::Object*>(that);
+}
+
+
+v8::Function* v8::Function::Cast(Value* that) {
+ if (IsDeadCheck("v8::Function::Cast()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsJSFunction(),
+ "v8::Function::Cast()",
+ "Could not convert to function");
+ return static_cast<v8::Function*>(that);
+}
+
+
+v8::String* v8::String::Cast(v8::Value* that) {
+ if (IsDeadCheck("v8::String::Cast()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsString(),
+ "v8::String::Cast()",
+ "Could not convert to string");
+ return static_cast<v8::String*>(that);
+}
+
+
+v8::Number* v8::Number::Cast(v8::Value* that) {
+ if (IsDeadCheck("v8::Number::Cast()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsNumber(),
+ "v8::Number::Cast()",
+ "Could not convert to number");
+ return static_cast<v8::Number*>(that);
+}
+
+
+v8::Integer* v8::Integer::Cast(v8::Value* that) {
+ if (IsDeadCheck("v8::Integer::Cast()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsNumber(),
+ "v8::Integer::Cast()",
+ "Could not convert to number");
+ return static_cast<v8::Integer*>(that);
+}
+
+
+v8::Array* v8::Array::Cast(Value* that) {
+ if (IsDeadCheck("v8::Array::Cast()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsJSArray(),
+ "v8::Array::Cast()",
+ "Could not convert to array");
+ return static_cast<v8::Array*>(that);
+}
+
+
+bool Value::BooleanValue() {
+ if (IsDeadCheck("v8::Value::BooleanValue()")) return false;
+ LOG_API("BooleanValue");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> value =
+ obj->IsBoolean() ? obj : i::Execution::ToBoolean(obj);
+ return value->IsTrue();
+}
+
+
+double Value::NumberValue() {
+ if (IsDeadCheck("v8::Value::NumberValue()")) return i::OS::nan_value();
+ LOG_API("NumberValue");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> num;
+ if (obj->IsNumber()) {
+ num = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ num = i::Execution::ToNumber(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(i::OS::nan_value());
+ }
+ return num->Number();
+}
+
+
+int64_t Value::IntegerValue() {
+ if (IsDeadCheck("v8::Value::IntegerValue()")) return 0;
+ LOG_API("IntegerValue");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> num;
+ if (obj->IsNumber()) {
+ num = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ num = i::Execution::ToInteger(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(0);
+ }
+ if (num->IsSmi()) {
+ return i::Smi::cast(*num)->value();
+ } else {
+ return static_cast<int64_t>(num->Number());
+ }
+}
+
+
+Local<Int32> Value::ToInt32() {
+ if (IsDeadCheck("v8::Value::ToInt32()")) return Local<Int32>();
+ LOG_API("ToInt32");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> num;
+ if (obj->IsSmi()) {
+ num = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ num = i::Execution::ToInt32(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<Int32>());
+ }
+ return Local<Int32>(ToApi<Int32>(num));
+}
+
+
+Local<Uint32> Value::ToUint32() {
+ if (IsDeadCheck("v8::Value::ToUint32()")) return Local<Uint32>();
+ LOG_API("ToUInt32");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> num;
+ if (obj->IsSmi()) {
+ num = obj;
+ } else {
+ EXCEPTION_PREAMBLE();
+ num = i::Execution::ToUint32(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<Uint32>());
+ }
+ return Local<Uint32>(ToApi<Uint32>(num));
+}
+
+
+Local<Uint32> Value::ToArrayIndex() {
+ if (IsDeadCheck("v8::Value::ToArrayIndex()")) return Local<Uint32>();
+ LOG_API("ToArrayIndex");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) {
+ if (i::Smi::cast(*obj)->value() >= 0) return Utils::Uint32ToLocal(obj);
+ return Local<Uint32>();
+ }
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> string_obj =
+ i::Execution::ToString(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<Uint32>());
+ i::Handle<i::String> str = i::Handle<i::String>::cast(string_obj);
+ uint32_t index;
+ if (str->AsArrayIndex(&index)) {
+ i::Handle<i::Object> value;
+ if (index <= static_cast<uint32_t>(i::Smi::kMaxValue)) {
+ value = i::Handle<i::Object>(i::Smi::FromInt(index));
+ } else {
+ value = i::Factory::NewNumber(index);
+ }
+ return Utils::Uint32ToLocal(value);
+ }
+ return Local<Uint32>();
+}
+
+
+int32_t Value::Int32Value() {
+ if (IsDeadCheck("v8::Value::Int32Value()")) return 0;
+ LOG_API("Int32Value");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) {
+ return i::Smi::cast(*obj)->value();
+ } else {
+ LOG_API("Int32Value (slow)");
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> num =
+ i::Execution::ToInt32(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(0);
+ if (num->IsSmi()) {
+ return i::Smi::cast(*num)->value();
+ } else {
+ return static_cast<int32_t>(num->Number());
+ }
+ }
+}
+
+
+bool Value::Equals(Handle<Value> that) {
+ if (IsDeadCheck("v8::Value::Equals()")
+ || EmptyCheck("v8::Value::Equals()", this)
+ || EmptyCheck("v8::Value::Equals()", that))
+ return false;
+ LOG_API("Equals");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> other = Utils::OpenHandle(*that);
+ i::Object** args[1] = { other.location() };
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> result =
+ CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(false);
+ return *result == i::Smi::FromInt(i::EQUAL);
+}
+
+
+bool Value::StrictEquals(Handle<Value> that) {
+ if (IsDeadCheck("v8::Value::StrictEquals()")
+ || EmptyCheck("v8::Value::StrictEquals()", this)
+ || EmptyCheck("v8::Value::StrictEquals()", that))
+ return false;
+ LOG_API("StrictEquals");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> other = Utils::OpenHandle(*that);
+ // Must check HeapNumber first, since NaN !== NaN.
+ if (obj->IsHeapNumber()) {
+ if (!other->IsNumber()) return false;
+ double x = obj->Number();
+ double y = other->Number();
+ // Must check explicitly for NaN:s on Windows, but -0 works fine.
+ return x == y && !isnan(x) && !isnan(y);
+ } else if (*obj == *other) { // Also covers Booleans.
+ return true;
+ } else if (obj->IsSmi()) {
+ return other->IsNumber() && obj->Number() == other->Number();
+ } else if (obj->IsString()) {
+ return other->IsString() &&
+ i::String::cast(*obj)->Equals(i::String::cast(*other));
+ } else if (obj->IsUndefined() || obj->IsUndetectableObject()) {
+ return other->IsUndefined() || other->IsUndetectableObject();
+ } else {
+ return false;
+ }
+}
+
+
+uint32_t Value::Uint32Value() {
+ if (IsDeadCheck("v8::Value::Uint32Value()")) return 0;
+ LOG_API("Uint32Value");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) {
+ return i::Smi::cast(*obj)->value();
+ } else {
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> num =
+ i::Execution::ToUint32(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(0);
+ if (num->IsSmi()) {
+ return i::Smi::cast(*num)->value();
+ } else {
+ return static_cast<uint32_t>(num->Number());
+ }
+ }
+}
+
+
+bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
+ v8::PropertyAttribute attribs) {
+ ON_BAILOUT("v8::Object::Set()", return false);
+ i::Handle<i::Object> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+ i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> obj = i::SetProperty(
+ self,
+ key_obj,
+ value_obj,
+ static_cast<PropertyAttributes>(attribs));
+ has_pending_exception = obj.is_null();
+ EXCEPTION_BAILOUT_CHECK(false);
+ return true;
+}
+
+
+Local<Value> v8::Object::Get(v8::Handle<Value> key) {
+ ON_BAILOUT("v8::Object::Get()", return Local<v8::Value>());
+ i::Handle<i::Object> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> result = i::GetProperty(self, key_obj);
+ has_pending_exception = result.is_null();
+ EXCEPTION_BAILOUT_CHECK(Local<Value>());
+ return Utils::ToLocal(result);
+}
+
+
+Local<Value> v8::Object::GetPrototype() {
+ ON_BAILOUT("v8::Object::GetPrototype()", return Local<v8::Value>());
+ i::Handle<i::Object> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> result = i::GetPrototype(self);
+ return Utils::ToLocal(result);
+}
+
+
+Local<String> v8::Object::ObjectProtoToString() {
+ ON_BAILOUT("v8::Object::ObjectProtoToString()", return Local<v8::String>());
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+
+ i::Handle<i::Object> name(self->class_name());
+
+ // Native implementation of Object.prototype.toString (v8natives.js):
+ // var c = %ClassOf(this);
+ // if (c === 'Arguments') c = 'Object';
+ // return "[object " + c + "]";
+
+ if (!name->IsString()) {
+ return v8::String::New("[object ]");
+
+ } else {
+ i::Handle<i::String> class_name = i::Handle<i::String>::cast(name);
+ if (class_name->IsEqualTo(i::CStrVector("Arguments"))) {
+ return v8::String::New("[object Object]");
+
+ } else {
+ const char* prefix = "[object ";
+ Local<String> str = Utils::ToLocal(class_name);
+ const char* postfix = "]";
+
+ size_t prefix_len = strlen(prefix);
+ size_t str_len = str->Length();
+ size_t postfix_len = strlen(postfix);
+
+ size_t buf_len = prefix_len + str_len + postfix_len;
+ char* buf = i::NewArray<char>(buf_len);
+
+ // Write prefix.
+ char* ptr = buf;
+ memcpy(ptr, prefix, prefix_len * v8::internal::kCharSize);
+ ptr += prefix_len;
+
+ // Write real content.
+ str->WriteAscii(ptr, 0, str_len);
+ ptr += str_len;
+
+ // Write postfix.
+ memcpy(ptr, postfix, postfix_len * v8::internal::kCharSize);
+
+ // Copy the buffer into a heap-allocated string and return it.
+ Local<String> result = v8::String::New(buf, buf_len);
+ i::DeleteArray(buf);
+ return result;
+ }
+ }
+}
+
+
+bool v8::Object::Delete(v8::Handle<String> key) {
+ ON_BAILOUT("v8::Object::Delete()", return false);
+ HandleScope scope;
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+ return i::DeleteProperty(self, key_obj)->IsTrue();
+}
+
+
+bool v8::Object::Has(v8::Handle<String> key) {
+ ON_BAILOUT("v8::Object::Has()", return false);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+ return self->HasProperty(*key_obj);
+}
+
+
+bool v8::Object::Delete(uint32_t index) {
+ ON_BAILOUT("v8::Object::DeleteProperty()", return false);
+ HandleScope scope;
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ return i::DeleteElement(self, index)->IsTrue();
+}
+
+
+bool v8::Object::Has(uint32_t index) {
+ ON_BAILOUT("v8::Object::HasProperty()", return false);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ return self->HasElement(index);
+}
+
+
+bool v8::Object::HasRealNamedProperty(Handle<String> key) {
+ ON_BAILOUT("v8::Object::HasRealNamedProperty()", return false);
+ return Utils::OpenHandle(this)->HasRealNamedProperty(
+ *Utils::OpenHandle(*key));
+}
+
+
+bool v8::Object::HasRealIndexedProperty(uint32_t index) {
+ ON_BAILOUT("v8::Object::HasRealIndexedProperty()", return false);
+ return Utils::OpenHandle(this)->HasRealElementProperty(index);
+}
+
+
+bool v8::Object::HasRealNamedCallbackProperty(Handle<String> key) {
+ ON_BAILOUT("v8::Object::HasRealNamedCallbackProperty()", return false);
+ return Utils::OpenHandle(this)->HasRealNamedCallbackProperty(
+ *Utils::OpenHandle(*key));
+}
+
+
+bool v8::Object::HasNamedLookupInterceptor() {
+ ON_BAILOUT("v8::Object::HasNamedLookupInterceptor()", return false);
+ return Utils::OpenHandle(this)->HasNamedInterceptor();
+}
+
+
+bool v8::Object::HasIndexedLookupInterceptor() {
+ ON_BAILOUT("v8::Object::HasIndexedLookupInterceptor()", return false);
+ return Utils::OpenHandle(this)->HasIndexedInterceptor();
+}
+
+
+Handle<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
+ Handle<String> key) {
+ ON_BAILOUT("v8::Object::GetRealNamedPropertyInPrototypeChain()",
+ return Local<Value>());
+ i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
+ i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+ i::LookupResult lookup;
+ self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
+ if (lookup.IsValid()) {
+ PropertyAttributes attributes;
+ i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
+ &lookup,
+ *key_obj,
+ &attributes));
+ return Utils::ToLocal(result);
+ }
+ return Local<Value>(); // No real property was found in protoype chain.
+}
+
+
+Local<v8::Object> Function::NewInstance() {
+ return NewInstance(0, NULL);
+}
+
+
+Local<v8::Object> Function::NewInstance(int argc,
+ v8::Handle<v8::Value> argv[]) {
+ ON_BAILOUT("v8::Function::NewInstance()", return Local<v8::Object>());
+ LOG_API("Function::NewInstance");
+ HandleScope scope;
+ i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
+ STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+ i::Object*** args = reinterpret_cast<i::Object***>(argv);
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> returned =
+ i::Execution::New(function, argc, args, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<v8::Object>());
+ return scope.Close(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
+}
+
+
+Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
+ v8::Handle<v8::Value> argv[]) {
+ ON_BAILOUT("v8::Function::Call()", return Local<v8::Value>());
+ LOG_API("Function::Call");
+ i::Object* raw_result = NULL;
+ {
+ HandleScope scope;
+ i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
+ i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
+ STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+ i::Object*** args = reinterpret_cast<i::Object***>(argv);
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> returned =
+ i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<Object>());
+ raw_result = *returned;
+ }
+ i::Handle<i::Object> result(raw_result);
+ return Utils::ToLocal(result);
+}
+
+
+void Function::SetName(v8::Handle<v8::String> name) {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ func->shared()->set_name(*Utils::OpenHandle(*name));
+}
+
+
+Handle<Value> Function::GetName() {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ return Utils::ToLocal(i::Handle<i::Object>(func->shared()->name()));
+}
+
+
+int String::Length() {
+ if (IsDeadCheck("v8::String::Length()")) return 0;
+ return Utils::OpenHandle(this)->length();
+}
+
+
+int String::WriteAscii(char* buffer, int start, int length) {
+ if (IsDeadCheck("v8::String::WriteAscii()")) return 0;
+ LOG_API("String::WriteAscii");
+ ASSERT(start >= 0 && length >= -1);
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ // Flatten the string for efficiency. This applies whether we are
+ // using StringInputBuffer or Get(i) to access the characters.
+ str->TryFlatten();
+ int end = length;
+ if ( (length == -1) || (length > str->length() - start) )
+ end = str->length() - start;
+ if (end < 0) return 0;
+ write_input_buffer.Reset(start, *str);
+ int i;
+ for (i = 0; i < end; i++) {
+ char c = static_cast<char>(write_input_buffer.GetNext());
+ if (c == '\0') c = ' ';
+ buffer[i] = c;
+ }
+ if (length == -1 || i < length)
+ buffer[i] = '\0';
+ return i;
+}
+
+
+int String::Write(uint16_t* buffer, int start, int length) {
+ if (IsDeadCheck("v8::String::Write()")) return 0;
+ LOG_API("String::Write");
+ ASSERT(start >= 0 && length >= -1);
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ // Flatten the string for efficiency. This applies whether we are
+ // using StringInputBuffer or Get(i) to access the characters.
+ str->TryFlatten();
+ int end = length;
+ if ( (length == -1) || (length > str->length() - start) )
+ end = str->length() - start;
+ if (end < 0) return 0;
+ write_input_buffer.Reset(start, *str);
+ int i;
+ for (i = 0; i < end; i++)
+ buffer[i] = write_input_buffer.GetNext();
+ if (length == -1 || i < length)
+ buffer[i] = '\0';
+ return i;
+}
+
+
+bool v8::String::IsExternal() {
+ EnsureInitialized("v8::String::IsExternal()");
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ return str->IsExternalTwoByteString();
+}
+
+
+bool v8::String::IsExternalAscii() {
+ EnsureInitialized("v8::String::IsExternalAscii()");
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ return str->IsExternalAsciiString();
+}
+
+
+v8::String::ExternalStringResource* v8::String::GetExternalStringResource() {
+ EnsureInitialized("v8::String::GetExternalStringResource()");
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ ASSERT(str->IsExternalTwoByteString());
+ void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
+ return reinterpret_cast<ExternalStringResource*>(resource);
+}
+
+
+v8::String::ExternalAsciiStringResource*
+ v8::String::GetExternalAsciiStringResource() {
+ EnsureInitialized("v8::String::GetExternalAsciiStringResource()");
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ ASSERT(str->IsExternalAsciiString());
+ void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
+ return reinterpret_cast<ExternalAsciiStringResource*>(resource);
+}
+
+
+double Number::Value() {
+ if (IsDeadCheck("v8::Number::Value()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return obj->Number();
+}
+
+
+bool Boolean::Value() {
+ if (IsDeadCheck("v8::Boolean::Value()")) return false;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return obj->IsTrue();
+}
+
+
+int64_t Integer::Value() {
+ if (IsDeadCheck("v8::Integer::Value()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) {
+ return i::Smi::cast(*obj)->value();
+ } else {
+ return static_cast<int64_t>(obj->Number());
+ }
+}
+
+
+int32_t Int32::Value() {
+ if (IsDeadCheck("v8::Int32::Value()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) {
+ return i::Smi::cast(*obj)->value();
+ } else {
+ return static_cast<int32_t>(obj->Number());
+ }
+}
+
+
+void* External::Value() {
+ if (IsDeadCheck("v8::External::Value()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return reinterpret_cast<void*>(i::Proxy::cast(*obj)->proxy());
+}
+
+
+int v8::Object::InternalFieldCount() {
+ if (IsDeadCheck("v8::Object::InternalFieldCount()")) return 0;
+ i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ return obj->GetInternalFieldCount();
+}
+
+
+Local<Value> v8::Object::GetInternal(int index) {
+ if (IsDeadCheck("v8::Object::GetInternal()")) return Local<Value>();
+ i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> value(obj->GetInternalField(index));
+ return Utils::ToLocal(value);
+}
+
+
+void v8::Object::SetInternal(int index, v8::Handle<Value> value) {
+ if (IsDeadCheck("v8::Object::SetInternal()")) return;
+ i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> val = Utils::OpenHandle(*value);
+ obj->SetInternalField(index, *val);
+}
+
+
+// --- E n v i r o n m e n t ---
+
+bool v8::V8::Initialize() {
+ if (i::V8::HasBeenSetup()) return true;
+ HandleScope scope;
+ if (i::Snapshot::Initialize()) {
+ i::Serializer::disable();
+ return true;
+ } else {
+ return i::V8::Initialize(NULL);
+ }
+}
+
+
+Persistent<Context> v8::Context::New(v8::ExtensionConfiguration* extensions,
+ v8::Handle<ObjectTemplate> global_template,
+ v8::Handle<Value> global_object) {
+ EnsureInitialized("v8::Context::New()");
+ LOG_API("Context::New");
+ ON_BAILOUT("v8::Context::New()", return Persistent<Context>());
+ // Make sure that the global_template has a constructor.
+ if (!global_template.IsEmpty() &&
+ Utils::OpenHandle(*global_template)->constructor()->IsUndefined()) {
+ Local<FunctionTemplate> templ = FunctionTemplate::New();
+ Utils::OpenHandle(*templ)->set_instance_template(
+ *Utils::OpenHandle(*global_template));
+ i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
+ Utils::OpenHandle(*global_template)->set_constructor(*constructor);
+ }
+
+ i::Handle<i::Context> env = i::Bootstrapper::CreateEnvironment(
+ Utils::OpenHandle(*global_object),
+ global_template, extensions);
+ if (!ApiCheck(!env.is_null(),
+ "v8::Context::New()",
+ "Could not initialize environment"))
+ return Persistent<Context>();
+ return Persistent<Context>(Utils::ToLocal(env));
+}
+
+
+void v8::Context::SetSecurityToken(Handle<Value> token) {
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ i::Handle<i::Object> token_handle = Utils::OpenHandle(*token);
+ // The global object of an environment is always a real global
+ // object with security token and reference to the builtins object.
+ i::JSGlobalObject::cast(env->global())->set_security_token(*token_handle);
+}
+
+
+Handle<Value> v8::Context::GetSecurityToken() {
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ i::Object* security_token =
+ i::JSGlobalObject::cast(env->global())->security_token();
+ i::Handle<i::Object> token_handle(security_token);
+ return Utils::ToLocal(token_handle);
+}
+
+
+bool Context::HasOutOfMemoryException() {
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ return env->has_out_of_memory();
+}
+
+
+bool Context::InContext() {
+ return i::Top::context() != NULL;
+}
+
+
+bool Context::InSecurityContext() {
+ return i::Top::security_context() != NULL;
+}
+
+
+v8::Local<v8::Context> Context::Current() {
+ if (IsDeadCheck("v8::Context::Current()")) return Local<Context>();
+ i::Handle<i::Context> context(i::Top::global_context());
+ return Utils::ToLocal(context);
+}
+
+
+v8::Local<v8::Context> Context::GetSecurityContext() {
+ if (IsDeadCheck("v8::Context::GetSecurityContext()")) return Local<Context>();
+ ASSERT(i::Top::security_context() != NULL);
+ i::Handle<i::Context> context(i::Top::security_context());
+ return Utils::ToLocal(context);
+}
+
+
+v8::Local<v8::Object> Context::Global() {
+ if (IsDeadCheck("v8::Context::Global()")) return Local<v8::Object>();
+ i::Object** ctx = reinterpret_cast<i::Object**>(this);
+ i::Handle<i::Context> context =
+ i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+ i::Handle<i::JSObject> global(context->global());
+ return Utils::ToLocal(global);
+}
+
+
+Local<v8::Object> ObjectTemplate::NewInstance() {
+ ON_BAILOUT("v8::ObjectTemplate::NewInstance()", return Local<v8::Object>());
+ LOG_API("ObjectTemplate::NewInstance");
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> obj =
+ i::Execution::InstantiateObject(Utils::OpenHandle(this),
+ &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<v8::Object>());
+ return Utils::ToLocal(i::Handle<i::JSObject>::cast(obj));
+}
+
+
+Local<v8::Function> FunctionTemplate::GetFunction() {
+ ON_BAILOUT("v8::FunctionTemplate::GetFunction()",
+ return Local<v8::Function>());
+ LOG_API("FunctionTemplate::GetFunction");
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> obj =
+ i::Execution::InstantiateFunction(Utils::OpenHandle(this),
+ &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<v8::Function>());
+ return Utils::ToLocal(i::Handle<i::JSFunction>::cast(obj));
+}
+
+
+bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
+ ON_BAILOUT("v8::FunctionTemplate::HasInstanceOf()", return false);
+ i::Object* obj = *Utils::OpenHandle(*value);
+ return obj->IsInstanceOf(*Utils::OpenHandle(this));
+}
+
+
+Local<External> v8::External::New(void* data) {
+ STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
+ LOG_API("External::New");
+ EnsureInitialized("v8::External::New()");
+ i::Handle<i::Proxy> obj = i::Factory::NewProxy(static_cast<i::Address>(data));
+ return Utils::ToLocal(obj);
+}
+
+
+Local<String> v8::String::New(const char* data, int length) {
+ EnsureInitialized("v8::String::New()");
+ LOG_API("String::New(char)");
+ if (length == -1) length = strlen(data);
+ i::Handle<i::String> result =
+ i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
+ return Utils::ToLocal(result);
+}
+
+
+Local<String> v8::String::NewUndetectable(const char* data, int length) {
+ EnsureInitialized("v8::String::NewUndetectable()");
+ LOG_API("String::NewUndetectable(char)");
+ if (length == -1) length = strlen(data);
+ i::Handle<i::String> result =
+ i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
+ result->MarkAsUndetectable();
+ return Utils::ToLocal(result);
+}
+
+
+static int TwoByteStringLength(const uint16_t* data) {
+ int length = 0;
+ while (data[length] != '\0') length++;
+ return length;
+}
+
+
+Local<String> v8::String::New(const uint16_t* data, int length) {
+ EnsureInitialized("v8::String::New()");
+ LOG_API("String::New(uint16_)");
+ if (length == -1) length = TwoByteStringLength(data);
+ i::Handle<i::String> result =
+ i::Factory::NewStringFromTwoByte(i::Vector<const uint16_t>(data, length));
+ return Utils::ToLocal(result);
+}
+
+
+Local<String> v8::String::NewUndetectable(const uint16_t* data, int length) {
+ EnsureInitialized("v8::String::NewUndetectable()");
+ LOG_API("String::NewUndetectable(uint16_)");
+ if (length == -1) length = TwoByteStringLength(data);
+ i::Handle<i::String> result =
+ i::Factory::NewStringFromTwoByte(i::Vector<const uint16_t>(data, length));
+ result->MarkAsUndetectable();
+ return Utils::ToLocal(result);
+}
+
+
+i::Handle<i::String> NewExternalStringHandle(
+ v8::String::ExternalStringResource* resource) {
+ i::Handle<i::String> result =
+ i::Factory::NewExternalStringFromTwoByte(resource);
+ return result;
+}
+
+
+i::Handle<i::String> NewExternalAsciiStringHandle(
+ v8::String::ExternalAsciiStringResource* resource) {
+ i::Handle<i::String> result =
+ i::Factory::NewExternalStringFromAscii(resource);
+ return result;
+}
+
+
+static void DisposeExternalString(v8::Persistent<v8::Object> obj,
+ void* parameter) {
+ v8::String::ExternalStringResource* resource =
+ reinterpret_cast<v8::String::ExternalStringResource*>(parameter);
+ const size_t total_size = resource->length() * sizeof(*resource->data());
+ i::Counters::total_external_string_memory.Decrement(total_size);
+ delete resource;
+ obj.Dispose();
+}
+
+
+static void DisposeExternalAsciiString(v8::Persistent<v8::Object> obj,
+ void* parameter) {
+ v8::String::ExternalAsciiStringResource* resource =
+ reinterpret_cast<v8::String::ExternalAsciiStringResource*>(parameter);
+ const size_t total_size = resource->length() * sizeof(*resource->data());
+ i::Counters::total_external_string_memory.Decrement(total_size);
+ delete resource;
+ obj.Dispose();
+}
+
+
+Local<String> v8::String::NewExternal(
+ v8::String::ExternalStringResource* resource) {
+ EnsureInitialized("v8::String::NewExternal()");
+ LOG_API("String::NewExternal");
+ const size_t total_size = resource->length() * sizeof(*resource->data());
+ i::Counters::total_external_string_memory.Increment(total_size);
+ i::Handle<i::String> result = NewExternalStringHandle(resource);
+ i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
+ i::GlobalHandles::MakeWeak(handle.location(),
+ resource,
+ &DisposeExternalString);
+ return Utils::ToLocal(result);
+}
+
+
+Local<String> v8::String::NewExternal(
+ v8::String::ExternalAsciiStringResource* resource) {
+ EnsureInitialized("v8::String::NewExternal()");
+ LOG_API("String::NewExternal");
+ const size_t total_size = resource->length() * sizeof(*resource->data());
+ i::Counters::total_external_string_memory.Increment(total_size);
+ i::Handle<i::String> result = NewExternalAsciiStringHandle(resource);
+ i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
+ i::GlobalHandles::MakeWeak(handle.location(),
+ resource,
+ &DisposeExternalAsciiString);
+ return Utils::ToLocal(result);
+}
+
+
+Local<v8::Object> v8::Object::New() {
+ EnsureInitialized("v8::Object::New()");
+ LOG_API("Object::New");
+ i::Handle<i::JSObject> obj =
+ i::Factory::NewJSObject(i::Top::object_function());
+ return Utils::ToLocal(obj);
+}
+
+
+Local<v8::Value> v8::Date::New(double time) {
+ EnsureInitialized("v8::Date::New()");
+ LOG_API("Date::New");
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> obj =
+ i::Execution::NewDate(time, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(Local<v8::Value>());
+ return Utils::ToLocal(obj);
+}
+
+
+Local<v8::Array> v8::Array::New(int length) {
+ EnsureInitialized("v8::Array::New()");
+ LOG_API("Array::New");
+ i::Handle<i::JSArray> obj = i::Factory::NewJSArray(length);
+ return Utils::ToLocal(obj);
+}
+
+
+uint32_t v8::Array::Length() {
+ if (IsDeadCheck("v8::Array::Length()")) return 0;
+ i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
+ i::Object* length = obj->length();
+ if (length->IsSmi()) {
+ return i::Smi::cast(length)->value();
+ } else {
+ return static_cast<uint32_t>(length->Number());
+ }
+}
+
+
+Local<String> v8::String::NewSymbol(const char* data, int length) {
+ EnsureInitialized("v8::String::NewSymbol()");
+ LOG_API("String::NewSymbol(char)");
+ if (length == -1) length = strlen(data);
+ i::Handle<i::String> result =
+ i::Factory::LookupSymbol(i::Vector<const char>(data, length));
+ return Utils::ToLocal(result);
+}
+
+
+Local<Number> v8::Number::New(double value) {
+ EnsureInitialized("v8::Number::New()");
+ i::Handle<i::Object> result = i::Factory::NewNumber(value);
+ return Utils::NumberToLocal(result);
+}
+
+
+Local<Integer> v8::Integer::New(int32_t value) {
+ EnsureInitialized("v8::Integer::New()");
+ if (i::Smi::IsValid(value)) {
+ return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value)));
+ }
+ i::Handle<i::Object> result = i::Factory::NewNumber(value);
+ return Utils::IntegerToLocal(result);
+}
+
+
+void V8::IgnoreOutOfMemoryException() {
+ thread_local.SetIgnoreOutOfMemory(true);
+}
+
+
+bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
+ EnsureInitialized("v8::V8::AddMessageListener()");
+ ON_BAILOUT("v8::V8::AddMessageListener()", return false);
+ HandleScope scope;
+ NeanderArray listeners(i::Factory::message_listeners());
+ NeanderObject obj(2);
+ obj.set(0, *i::Factory::NewProxy(FUNCTION_ADDR(that)));
+ obj.set(1, data.IsEmpty() ?
+ i::Heap::undefined_value() :
+ *Utils::OpenHandle(*data));
+ listeners.add(obj.value());
+ return true;
+}
+
+
+void V8::RemoveMessageListeners(MessageCallback that) {
+ EnsureInitialized("v8::V8::RemoveMessageListener()");
+ ON_BAILOUT("v8::V8::RemoveMessageListeners()", return);
+ HandleScope scope;
+ NeanderArray listeners(i::Factory::message_listeners());
+ for (int i = 0; i < listeners.length(); i++) {
+ if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
+
+ NeanderObject listener(i::JSObject::cast(listeners.get(i)));
+ i::Handle<i::Proxy> callback_obj(i::Proxy::cast(listener.get(0)));
+ if (callback_obj->proxy() == FUNCTION_ADDR(that)) {
+ listeners.set(i, i::Heap::undefined_value());
+ }
+ }
+}
+
+
+void V8::SetCounterFunction(CounterLookupCallback callback) {
+ if (IsDeadCheck("v8::V8::SetCounterFunction()")) return;
+ i::StatsTable::SetCounterFunction(callback);
+}
+
+
+void V8::EnableSlidingStateWindow() {
+ if (IsDeadCheck("v8::V8::EnableSlidingStateWindow()")) return;
+ i::Logger::EnableSlidingStateWindow();
+}
+
+
+void V8::SetFailedAccessCheckCallbackFunction(
+ FailedAccessCheckCallback callback) {
+ if (IsDeadCheck("v8::V8::SetFailedAccessCheckCallbackFunction()")) return;
+ i::Top::SetFailedAccessCheckCallback(callback);
+}
+
+
+void V8::AddObjectToGroup(void* group_id, Persistent<Object> obj) {
+ if (IsDeadCheck("v8::V8::AddObjectToGroup()")) return;
+ i::GlobalHandles::AddToGroup(group_id, reinterpret_cast<i::Object**>(*obj));
+}
+
+
+void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
+ if (IsDeadCheck("v8::V8::SetGlobalGCPrologueCallback()")) return;
+ i::Heap::SetGlobalGCPrologueCallback(callback);
+}
+
+
+void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
+ if (IsDeadCheck("v8::V8::SetGlobalGCEpilogueCallback()")) return;
+ i::Heap::SetGlobalGCEpilogueCallback(callback);
+}
+
+
+String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj) {
+ EnsureInitialized("v8::String::AsciiValue::AsciiValue()");
+ HandleScope scope;
+ Handle<String> str = obj->ToString();
+ int length = str->Length();
+ str_ = i::NewArray<char>(length + 1);
+ str->WriteAscii(str_);
+}
+
+
+String::AsciiValue::~AsciiValue() {
+ i::DeleteArray(str_);
+}
+
+
+String::Value::Value(v8::Handle<v8::Value> obj) {
+ EnsureInitialized("v8::String::Value::Value()");
+ HandleScope scope;
+ Handle<String> str = obj->ToString();
+ int length = str->Length();
+ str_ = i::NewArray<uint16_t>(length + 1);
+ str->Write(str_);
+}
+
+
+String::Value::~Value() {
+ i::DeleteArray(str_);
+}
+
+Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
+ LOG_API("RangeError");
+ ON_BAILOUT("v8::Exception::RangeError()", return Local<Value>());
+ i::Object* error;
+ {
+ HandleScope scope;
+ i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
+ i::Handle<i::Object> result = i::Factory::NewRangeError(message);
+ error = *result;
+ }
+ i::Handle<i::Object> result(error);
+ return Utils::ToLocal(result);
+}
+
+Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
+ LOG_API("ReferenceError");
+ ON_BAILOUT("v8::Exception::ReferenceError()", return Local<Value>());
+ i::Object* error;
+ {
+ HandleScope scope;
+ i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
+ i::Handle<i::Object> result = i::Factory::NewReferenceError(message);
+ error = *result;
+ }
+ i::Handle<i::Object> result(error);
+ return Utils::ToLocal(result);
+}
+
+Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
+ LOG_API("SyntaxError");
+ ON_BAILOUT("v8::Exception::SyntaxError()", return Local<Value>());
+ i::Object* error;
+ {
+ HandleScope scope;
+ i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
+ i::Handle<i::Object> result = i::Factory::NewSyntaxError(message);
+ error = *result;
+ }
+ i::Handle<i::Object> result(error);
+ return Utils::ToLocal(result);
+}
+
+Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
+ LOG_API("TypeError");
+ ON_BAILOUT("v8::Exception::TypeError()", return Local<Value>());
+ i::Object* error;
+ {
+ HandleScope scope;
+ i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
+ i::Handle<i::Object> result = i::Factory::NewTypeError(message);
+ error = *result;
+ }
+ i::Handle<i::Object> result(error);
+ return Utils::ToLocal(result);
+}
+
+Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
+ LOG_API("Error");
+ ON_BAILOUT("v8::Exception::Error()", return Local<Value>());
+ i::Object* error;
+ {
+ HandleScope scope;
+ i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
+ i::Handle<i::Object> result = i::Factory::NewError(message);
+ error = *result;
+ }
+ i::Handle<i::Object> result(error);
+ return Utils::ToLocal(result);
+}
+
+
+// --- D e b u g S u p p o r t ---
+
+
+bool Debug::AddDebugEventListener(DebugEventCallback that, Handle<Value> data) {
+ EnsureInitialized("v8::V8::AddDebugEventListener()");
+ ON_BAILOUT("v8::V8::AddDebugEventListener()", return false);
+ HandleScope scope;
+ NeanderArray listeners(i::Factory::debug_event_listeners());
+ NeanderObject obj(2);
+ obj.set(0, *i::Factory::NewProxy(FUNCTION_ADDR(that)));
+ obj.set(1, data.IsEmpty() ?
+ i::Heap::undefined_value() :
+ *Utils::OpenHandle(*data));
+ listeners.add(obj.value());
+ i::Debugger::UpdateActiveDebugger();
+ return true;
+}
+
+
+bool Debug::AddDebugEventListener(v8::Handle<v8::Function> that,
+ Handle<Value> data) {
+ ON_BAILOUT("v8::V8::AddDebugEventListener()", return false);
+ HandleScope scope;
+ NeanderArray listeners(i::Factory::debug_event_listeners());
+ NeanderObject obj(2);
+ obj.set(0, *Utils::OpenHandle(*that));
+ obj.set(1, data.IsEmpty() ?
+ i::Heap::undefined_value() :
+ *Utils::OpenHandle(*data));
+ listeners.add(obj.value());
+ i::Debugger::UpdateActiveDebugger();
+ return true;
+}
+
+
+void Debug::RemoveDebugEventListener(DebugEventCallback that) {
+ EnsureInitialized("v8::V8::RemoveDebugEventListener()");
+ ON_BAILOUT("v8::V8::RemoveDebugEventListener()", return);
+ HandleScope scope;
+ NeanderArray listeners(i::Factory::debug_event_listeners());
+ for (int i = 0; i < listeners.length(); i++) {
+ if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
+
+ NeanderObject listener(i::JSObject::cast(listeners.get(i)));
+ // When removing a C debug event listener only consider proxy objects.
+ if (listener.get(0)->IsProxy()) {
+ i::Handle<i::Proxy> callback_obj(i::Proxy::cast(listener.get(0)));
+ if (callback_obj->proxy() == FUNCTION_ADDR(that)) {
+ listeners.set(i, i::Heap::undefined_value());
+ }
+ }
+ }
+ i::Debugger::UpdateActiveDebugger();
+}
+
+
+void Debug::RemoveDebugEventListener(v8::Handle<v8::Function> that) {
+ ON_BAILOUT("v8::V8::RemoveDebugEventListener()", return);
+ HandleScope scope;
+ NeanderArray listeners(i::Factory::debug_event_listeners());
+ for (int i = 0; i < listeners.length(); i++) {
+ if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
+
+ NeanderObject listener(i::JSObject::cast(listeners.get(i)));
+ // When removing a JavaScript debug event listener only consider JavaScript
+ // function objects.
+ if (listener.get(0)->IsJSFunction()) {
+ i::JSFunction* callback = i::JSFunction::cast(listener.get(0));
+ i::Handle<i::JSFunction> callback_fun(callback);
+ if (callback_fun.is_identical_to(Utils::OpenHandle(*that))) {
+ listeners.set(i, i::Heap::undefined_value());
+ }
+ }
+ }
+ i::Debugger::UpdateActiveDebugger();
+}
+
+
+void Debug::DebugBreak() {
+ i::StackGuard::DebugBreak();
+}
+
+
+void Debug::SetMessageHandler(v8::DebugMessageHandler handler, void* data) {
+ i::Debugger::SetMessageHandler(handler, data);
+}
+
+
+void Debug::SendCommand(const uint16_t* command, int length) {
+ i::Debugger::ProcessCommand(i::Vector<const uint16_t>(command, length));
+}
+
+
+namespace internal {
+
+
+HandleScopeImplementer* HandleScopeImplementer::instance() {
+ return &thread_local;
+}
+
+
+char* HandleScopeImplementer::ArchiveThread(char* storage) {
+ return thread_local.ArchiveThreadHelper(storage);
+}
+
+
+char* HandleScopeImplementer::ArchiveThreadHelper(char* storage) {
+ ImplementationUtilities::HandleScopeData* current =
+ ImplementationUtilities::CurrentHandleScope();
+ handle_scope_data_ = *current;
+ memcpy(storage, this, sizeof(*this));
+
+ Initialize();
+ current->Initialize();
+
+ return storage + ArchiveSpacePerThread();
+}
+
+
+int HandleScopeImplementer::ArchiveSpacePerThread() {
+ return sizeof(thread_local);
+}
+
+
+char* HandleScopeImplementer::RestoreThread(char* storage) {
+ return thread_local.RestoreThreadHelper(storage);
+}
+
+
+char* HandleScopeImplementer::RestoreThreadHelper(char* storage) {
+ memcpy(this, storage, sizeof(*this));
+ *ImplementationUtilities::CurrentHandleScope() = handle_scope_data_;
+ return storage + ArchiveSpacePerThread();
+}
+
+
+void HandleScopeImplementer::Iterate(
+ ObjectVisitor* v,
+ List<void**>* blocks,
+ ImplementationUtilities::HandleScopeData* handle_data) {
+ // Iterate over all handles in the blocks except for the last.
+ for (int i = blocks->length() - 2; i >= 0; --i) {
+ Object** block =
+ reinterpret_cast<Object**>(blocks->at(i));
+ v->VisitPointers(block, &block[kHandleBlockSize]);
+ }
+
+ // Iterate over live handles in the last block (if any).
+ if (!blocks->is_empty()) {
+ v->VisitPointers(reinterpret_cast<Object**>(blocks->last()),
+ reinterpret_cast<Object**>(handle_data->next));
+ }
+}
+
+
+void HandleScopeImplementer::Iterate(ObjectVisitor* v) {
+ ImplementationUtilities::HandleScopeData* current =
+ ImplementationUtilities::CurrentHandleScope();
+ Iterate(v, thread_local.Blocks(), current);
+}
+
+
+char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) {
+ HandleScopeImplementer* thread_local =
+ reinterpret_cast<HandleScopeImplementer*>(storage);
+ List<void**>* blocks_of_archived_thread = thread_local->Blocks();
+ ImplementationUtilities::HandleScopeData* handle_data_of_archived_thread =
+ &thread_local->handle_scope_data_;
+ Iterate(v, blocks_of_archived_thread, handle_data_of_archived_thread);
+
+ return storage + ArchiveSpacePerThread();
+}
+
+} } // namespace v8::internal
diff --git a/src/api.h b/src/api.h
new file mode 100644
index 0000000..6975e57
--- /dev/null
+++ b/src/api.h
@@ -0,0 +1,479 @@
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_API_H_
+#define V8_API_H_
+
+#include "factory.h"
+
+namespace v8 {
+
+// Constants used in the implementation of the API. The most natural thing
+// would usually be to place these with the classes that use them, but
+// we want to keep them out of v8.h because it is an externally
+// visible file.
+class Consts {
+ public:
+ enum TemplateType {
+ FUNCTION_TEMPLATE = 0,
+ OBJECT_TEMPLATE = 1
+ };
+};
+
+
+// Utilities for working with neander-objects, primitive
+// env-independent JSObjects used by the api.
+class NeanderObject {
+ public:
+ explicit NeanderObject(int size);
+ inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
+ inline NeanderObject(v8::internal::Object* obj);
+ inline v8::internal::Object* get(int index);
+ inline void set(int index, v8::internal::Object* value);
+ inline v8::internal::Handle<v8::internal::JSObject> value() { return value_; }
+ int size();
+ private:
+ v8::internal::Handle<v8::internal::JSObject> value_;
+};
+
+
+// Utilities for working with neander-arrays, a simple extensible
+// array abstraction built on neander-objects.
+class NeanderArray {
+ public:
+ NeanderArray();
+ inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
+ inline v8::internal::Handle<v8::internal::JSObject> value() {
+ return obj_.value();
+ }
+
+ void add(v8::internal::Handle<v8::internal::Object> value);
+
+ int length();
+
+ v8::internal::Object* get(int index);
+ // Change the value at an index to undefined value. If the index is
+ // out of bounds, the request is ignored. Returns the old value.
+ void set(int index, v8::internal::Object* value);
+ private:
+ NeanderObject obj_;
+};
+
+
+NeanderObject::NeanderObject(v8::internal::Handle<v8::internal::Object> obj)
+ : value_(v8::internal::Handle<v8::internal::JSObject>::cast(obj)) { }
+
+
+NeanderObject::NeanderObject(v8::internal::Object* obj)
+ : value_(v8::internal::Handle<v8::internal::JSObject>(
+ v8::internal::JSObject::cast(obj))) { }
+
+
+NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
+ : obj_(obj) { }
+
+
+v8::internal::Object* NeanderObject::get(int offset) {
+ ASSERT(value()->HasFastElements());
+ return v8::internal::FixedArray::cast(value()->elements())->get(offset);
+}
+
+
+void NeanderObject::set(int offset, v8::internal::Object* value) {
+ ASSERT(value_->HasFastElements());
+ v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
+}
+
+
+template <typename T> static inline T ToCData(v8::internal::Object* obj) {
+ STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
+ return reinterpret_cast<T>(
+ reinterpret_cast<int>(v8::internal::Proxy::cast(obj)->proxy()));
+}
+
+
+template <typename T>
+static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
+ STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
+ return v8::internal::Factory::NewProxy(
+ reinterpret_cast<v8::internal::Address>(reinterpret_cast<int>(obj)));
+}
+
+
+v8::Arguments::Arguments(v8::Local<v8::Value> data,
+ v8::Local<v8::Object> holder,
+ v8::Local<v8::Function> callee,
+ bool is_construct_call,
+ void** values, int length)
+ : data_(data), holder_(holder), callee_(callee),
+ is_construct_call_(is_construct_call),
+ values_(values), length_(length) { }
+
+
+enum ExtensionTraversalState {
+ UNVISITED, VISITED, INSTALLED
+};
+
+
+class RegisteredExtension {
+ public:
+ explicit RegisteredExtension(Extension* extension);
+ static void Register(RegisteredExtension* that);
+ Extension* extension() { return extension_; }
+ RegisteredExtension* next() { return next_; }
+ RegisteredExtension* next_auto() { return next_auto_; }
+ ExtensionTraversalState state() { return state_; }
+ void set_state(ExtensionTraversalState value) { state_ = value; }
+ static RegisteredExtension* first_extension() { return first_extension_; }
+ private:
+ Extension* extension_;
+ RegisteredExtension* next_;
+ RegisteredExtension* next_auto_;
+ ExtensionTraversalState state_;
+ static RegisteredExtension* first_extension_;
+ static RegisteredExtension* first_auto_extension_;
+};
+
+
+class ImplementationUtilities {
+ public:
+ static v8::Handle<v8::Primitive> Undefined();
+ static v8::Handle<v8::Primitive> Null();
+ static v8::Handle<v8::Boolean> True();
+ static v8::Handle<v8::Boolean> False();
+
+ static int GetNameCount(ExtensionConfiguration* that) {
+ return that->name_count_;
+ }
+
+ static const char** GetNames(ExtensionConfiguration* that) {
+ return that->names_;
+ }
+
+ static v8::Arguments NewArguments(Local<Value> data,
+ Local<Object> holder,
+ Local<Function> callee,
+ bool is_construct_call,
+ void** argv, int argc) {
+ return v8::Arguments(data, holder, callee, is_construct_call, argv, argc);
+ }
+
+ // Introduce an alias for the handle scope data to allow non-friends
+ // to access the HandleScope data.
+ typedef v8::HandleScope::Data HandleScopeData;
+
+ static HandleScopeData* CurrentHandleScope() {
+ return &v8::HandleScope::current_;
+ }
+
+#ifdef DEBUG
+ static void ZapHandleRange(void** begin, void** end) {
+ v8::HandleScope::ZapRange(begin, end);
+ }
+#endif
+};
+
+
+class Utils {
+ public:
+ static bool ReportApiFailure(const char* location, const char* message);
+
+ static Local<FunctionTemplate> ToFunctionTemplate(NeanderObject obj);
+ static Local<ObjectTemplate> ToObjectTemplate(NeanderObject obj);
+
+ static inline Local<Context> ToLocal(
+ v8::internal::Handle<v8::internal::Context> obj);
+ static inline Local<Value> ToLocal(
+ v8::internal::Handle<v8::internal::Object> obj);
+ static inline Local<Function> ToLocal(
+ v8::internal::Handle<v8::internal::JSFunction> obj);
+ static inline Local<String> ToLocal(
+ v8::internal::Handle<v8::internal::String> obj);
+ static inline Local<Object> ToLocal(
+ v8::internal::Handle<v8::internal::JSObject> obj);
+ static inline Local<Array> ToLocal(
+ v8::internal::Handle<v8::internal::JSArray> obj);
+ static inline Local<External> ToLocal(
+ v8::internal::Handle<v8::internal::Proxy> obj);
+ static inline Local<Message> MessageToLocal(
+ v8::internal::Handle<v8::internal::Object> obj);
+ static inline Local<Number> NumberToLocal(
+ v8::internal::Handle<v8::internal::Object> obj);
+ static inline Local<Integer> IntegerToLocal(
+ v8::internal::Handle<v8::internal::Object> obj);
+ static inline Local<Uint32> Uint32ToLocal(
+ v8::internal::Handle<v8::internal::Object> obj);
+ static inline Local<FunctionTemplate> ToLocal(
+ v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
+ static inline Local<ObjectTemplate> ToLocal(
+ v8::internal::Handle<v8::internal::ObjectTemplateInfo> obj);
+ static inline Local<Signature> ToLocal(
+ v8::internal::Handle<v8::internal::SignatureInfo> obj);
+ static inline Local<TypeSwitch> ToLocal(
+ v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
+
+ static inline v8::internal::Handle<v8::internal::TemplateInfo>
+ OpenHandle(Template* that);
+ static inline v8::internal::Handle<v8::internal::FunctionTemplateInfo>
+ OpenHandle(FunctionTemplate* that);
+ static inline v8::internal::Handle<v8::internal::ObjectTemplateInfo>
+ OpenHandle(ObjectTemplate* that);
+ static inline v8::internal::Handle<v8::internal::Object>
+ OpenHandle(Data* data);
+ static inline v8::internal::Handle<v8::internal::JSObject>
+ OpenHandle(v8::Object* data);
+ static inline v8::internal::Handle<v8::internal::JSArray>
+ OpenHandle(v8::Array* data);
+ static inline v8::internal::Handle<v8::internal::String>
+ OpenHandle(String* data);
+ static inline v8::internal::Handle<v8::internal::JSFunction>
+ OpenHandle(Script* data);
+ static inline v8::internal::Handle<v8::internal::JSFunction>
+ OpenHandle(Function* data);
+ static inline v8::internal::Handle<v8::internal::JSObject>
+ OpenHandle(Message* message);
+ static inline v8::internal::Handle<v8::internal::Context>
+ OpenHandle(v8::Context* context);
+ static inline v8::internal::Handle<v8::internal::SignatureInfo>
+ OpenHandle(v8::Signature* sig);
+ static inline v8::internal::Handle<v8::internal::TypeSwitchInfo>
+ OpenHandle(v8::TypeSwitch* that);
+};
+
+
+template <class T>
+static inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
+ return reinterpret_cast<T*>(obj.location());
+}
+
+
+template <class T>
+v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
+ HandleScope* scope) {
+ return Utils::OpenHandle(*scope->Close(Utils::ToLocal(*this)));
+}
+
+
+// Implementations of ToLocal
+
+#define MAKE_TO_LOCAL(Name, From, To) \
+ Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
+ return Local<To>(reinterpret_cast<To*>(obj.location())); \
+ }
+
+MAKE_TO_LOCAL(ToLocal, Context, Context)
+MAKE_TO_LOCAL(ToLocal, Object, Value)
+MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
+MAKE_TO_LOCAL(ToLocal, String, String)
+MAKE_TO_LOCAL(ToLocal, JSObject, Object)
+MAKE_TO_LOCAL(ToLocal, JSArray, Array)
+MAKE_TO_LOCAL(ToLocal, Proxy, External)
+MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
+MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
+MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
+MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
+MAKE_TO_LOCAL(MessageToLocal, Object, Message)
+MAKE_TO_LOCAL(NumberToLocal, Object, Number)
+MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
+MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
+
+#undef MAKE_TO_LOCAL
+
+
+// Implementations of OpenHandle
+
+#define MAKE_OPEN_HANDLE(From, To) \
+ v8::internal::Handle<v8::internal::To> Utils::OpenHandle(v8::From* that) { \
+ return v8::internal::Handle<v8::internal::To>( \
+ reinterpret_cast<v8::internal::To**>(that)); \
+ }
+
+MAKE_OPEN_HANDLE(Template, TemplateInfo)
+MAKE_OPEN_HANDLE(FunctionTemplate, FunctionTemplateInfo)
+MAKE_OPEN_HANDLE(ObjectTemplate, ObjectTemplateInfo)
+MAKE_OPEN_HANDLE(Signature, SignatureInfo)
+MAKE_OPEN_HANDLE(TypeSwitch, TypeSwitchInfo)
+MAKE_OPEN_HANDLE(Data, Object)
+MAKE_OPEN_HANDLE(Object, JSObject)
+MAKE_OPEN_HANDLE(Array, JSArray)
+MAKE_OPEN_HANDLE(String, String)
+MAKE_OPEN_HANDLE(Script, JSFunction)
+MAKE_OPEN_HANDLE(Function, JSFunction)
+MAKE_OPEN_HANDLE(Message, JSObject)
+MAKE_OPEN_HANDLE(Context, Context)
+
+#undef MAKE_OPEN_HANDLE
+
+
+namespace internal {
+
+// This class is here in order to be able to declare it a friend of
+// HandleScope. Moving these methods to be members of HandleScope would be
+// neat in some ways, but it would expose external implementation details in
+// our public header file, which is undesirable.
+//
+// There is a singleton instance of this class to hold the per-thread data.
+// For multithreaded V8 programs this data is copied in and out of storage
+// so that the currently executing thread always has its own copy of this
+// data.
+class HandleScopeImplementer {
+ public:
+
+ HandleScopeImplementer()
+ : blocks(0),
+ entered_contexts(0),
+ security_contexts(0) {
+ Initialize();
+ }
+
+ void Initialize() {
+ blocks.Initialize(0);
+ entered_contexts.Initialize(0);
+ security_contexts.Initialize(0);
+ spare = NULL;
+ ignore_out_of_memory = false;
+ call_depth = 0;
+ }
+
+ static HandleScopeImplementer* instance();
+
+ // Threading support for handle data.
+ static int ArchiveSpacePerThread();
+ static char* RestoreThread(char* from);
+ static char* ArchiveThread(char* to);
+
+ // Garbage collection support.
+ static void Iterate(v8::internal::ObjectVisitor* v);
+ static char* Iterate(v8::internal::ObjectVisitor* v, char* data);
+
+
+ inline void** GetSpareOrNewBlock();
+ inline void DeleteExtensions(int extensions);
+
+ inline void IncrementCallDepth() {call_depth++;}
+ inline void DecrementCallDepth() {call_depth--;}
+ inline bool CallDepthIsZero() { return call_depth == 0; }
+
+ inline void AddEnteredContext(Handle<Object>);
+ inline Handle<Object> RemoveLastEnteredContext();
+ inline bool HasEnteredContexts();
+ inline void AddSecurityContext(Handle<Object>);
+ inline Handle<Object> RemoveLastSecurityContext();
+ inline bool HasSecurityContexts();
+
+ inline List<void**>* Blocks() { return &blocks; }
+
+ inline bool IgnoreOutOfMemory() { return ignore_out_of_memory; }
+ inline void SetIgnoreOutOfMemory(bool value) { ignore_out_of_memory = value; }
+
+ private:
+ List<void**> blocks;
+ Object** spare;
+ int call_depth;
+ // Used as a stack to keep track of contexts entered.
+ List<Handle<Object> > entered_contexts;
+ // Used as a stack to keep track of security contexts entered.
+ List<Handle<Object> > security_contexts;
+ bool ignore_out_of_memory;
+ // This is only used for threading support.
+ ImplementationUtilities::HandleScopeData handle_scope_data_;
+
+ static void Iterate(ObjectVisitor* v,
+ List<void**>* blocks,
+ ImplementationUtilities::HandleScopeData* handle_data);
+ char* RestoreThreadHelper(char* from);
+ char* ArchiveThreadHelper(char* to);
+
+ DISALLOW_EVIL_CONSTRUCTORS(HandleScopeImplementer);
+};
+
+
+static const int kHandleBlockSize = v8::internal::KB - 2; // fit in one page
+
+
+void HandleScopeImplementer::AddEnteredContext(Handle<Object> context) {
+ entered_contexts.Add(context);
+}
+
+
+Handle<Object> HandleScopeImplementer::RemoveLastEnteredContext() {
+ return entered_contexts.RemoveLast();
+}
+
+
+bool HandleScopeImplementer::HasEnteredContexts() {
+ return !entered_contexts.is_empty();
+}
+
+void HandleScopeImplementer::AddSecurityContext(Handle<Object> context) {
+ security_contexts.Add(context);
+}
+
+
+Handle<Object> HandleScopeImplementer::RemoveLastSecurityContext() {
+ return security_contexts.RemoveLast();
+}
+
+
+bool HandleScopeImplementer::HasSecurityContexts() {
+ return !security_contexts.is_empty();
+}
+
+
+// If there's a spare block, use it for growing the current scope.
+void** HandleScopeImplementer::GetSpareOrNewBlock() {
+ void** block = (spare != NULL) ?
+ reinterpret_cast<void**>(spare) :
+ NewArray<void*>(kHandleBlockSize);
+ spare = NULL;
+ return block;
+}
+
+
+void HandleScopeImplementer::DeleteExtensions(int extensions) {
+ if (spare != NULL) {
+ DeleteArray(spare);
+ spare = NULL;
+ }
+ for (int i = extensions; i > 1; --i) {
+ void** block = blocks.RemoveLast();
+#ifdef DEBUG
+ ImplementationUtilities::ZapHandleRange(block, &block[kHandleBlockSize]);
+#endif
+ DeleteArray(block);
+ }
+ spare = reinterpret_cast<Object**>(blocks.RemoveLast());
+#ifdef DEBUG
+ ImplementationUtilities::ZapHandleRange(
+ reinterpret_cast<void**>(spare),
+ reinterpret_cast<void**>(&spare[kHandleBlockSize]));
+#endif
+}
+
+} } // namespace v8::internal
+
+#endif // V8_API_H_
diff --git a/src/apinatives.js b/src/apinatives.js
new file mode 100644
index 0000000..9ec2090
--- /dev/null
+++ b/src/apinatives.js
@@ -0,0 +1,92 @@
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file contains infrastructure used by the API. See
+// v8natives.js for an explanation of these files are processed and
+// loaded.
+
+
+function CreateDate(time) {
+ var date = new ORIGINAL_DATE();
+ date.setTime(time);
+ return date;
+};
+
+
+const kApiFunctionCache = {};
+const functionCache = kApiFunctionCache;
+
+
+function Instantiate(data) {
+ if (!%IsTemplate(data)) return data;
+ var tag = %GetTemplateField(data, kApiTagOffset);
+ switch (tag) {
+ case kFunctionTag:
+ return InstantiateFunction(data);
+ case kNewObjectTag:
+ var Constructor = %GetTemplateField(data, kApiConstructorOffset);
+ var result = Constructor ? new (Instantiate(Constructor))() : {};
+ ConfigureTemplateInstance(result, data);
+ return result;
+ default:
+ throw 'Unknown API tag <' + tag + '>';
+ }
+};
+
+
+function InstantiateFunction(data) {
+ var serialNumber = %GetTemplateField(data, kApiSerialNumberOffset);
+ if (!(serialNumber in kApiFunctionCache)) {
+ kApiFunctionCache[serialNumber] = null;
+ var fun = %CreateApiFunction(data);
+ kApiFunctionCache[serialNumber] = fun;
+ var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
+ fun.prototype = prototype ? Instantiate(prototype) : {};
+ %AddProperty(fun.prototype, "constructor", fun, DONT_ENUM);
+ var parent = %GetTemplateField(data, kApiParentTemplateOffset);
+ if (parent) {
+ var parent_fun = Instantiate(parent);
+ fun.prototype.__proto__ = parent_fun.prototype;
+ }
+ ConfigureTemplateInstance(fun, data);
+ }
+ return kApiFunctionCache[serialNumber];
+};
+
+
+function ConfigureTemplateInstance(obj, data) {
+ var properties = %GetTemplateField(data, kApiPropertyListOffset);
+ if (properties) {
+ for (var i = 0; i < properties[0]; i += 3) {
+ var name = properties[i + 1];
+ var prop_data = properties[i + 2];
+ var attributes = properties[i + 3];
+ var value = Instantiate(prop_data);
+ %SetProperty(obj, name, value, attributes);
+ }
+ }
+};
diff --git a/src/arguments.h b/src/arguments.h
new file mode 100644
index 0000000..418d568
--- /dev/null
+++ b/src/arguments.h
@@ -0,0 +1,70 @@
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARGUMENTS_H_
+#define V8_ARGUMENTS_H_
+
+namespace v8 { namespace internal {
+
+// Arguments provides access to runtime call parameters.
+//
+// It uses the fact that the instance fields of Arguments
+// (length_, arguments_) are "overlayed" with the parameters
+// (no. of parameters, and the parameter pointer) passed so
+// that inside the C++ function, the parameters passed can
+// be accessed conveniently:
+//
+// Object* Runtime_function(Arguments args) {
+// ... use args[i] here ...
+// }
+
+class Arguments BASE_EMBEDDED {
+ public:
+ Object*& operator[] (int index) {
+ ASSERT(0 <= index && index <= length_);
+ return arguments_[-index];
+ }
+
+ template <class S> Handle<S> at(int index) {
+ Object** value = &((*this)[index]);
+ // This cast checks that the object we're accessing does indeed have the
+ // expected type.
+ S::cast(*value);
+ return Handle<S>(reinterpret_cast<S**>(value));
+ }
+
+ // Get the total number of arguments including the receiver.
+ int length() const { return length_ + 1; }
+
+ private:
+ int length_;
+ Object** arguments_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARGUMENTS_H_
diff --git a/src/array.js b/src/array.js
new file mode 100644
index 0000000..105f41b
--- /dev/null
+++ b/src/array.js
@@ -0,0 +1,923 @@
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file relies on the fact that the following declarations have been made
+// in runtime.js:
+// const $Array = global.Array;
+
+// -------------------------------------------------------------------
+
+// Determines if the array contains the element.
+function Contains(array, element) {
+ var length = array.length;
+ for (var i = 0; i < length; i++) {
+ if (array[i] === element) return true;
+ }
+ return false;
+};
+
+
+// Global list of arrays visited during toString, toLocaleString and
+// join invocations.
+var visited_arrays = new $Array();
+
+
+// Gets a sorted array of array keys. Useful for operations on sparse
+// arrays. Dupes have not been removed.
+function GetSortedArrayKeys(array, intervals) {
+ var length = intervals.length;
+ var keys = [];
+ for (var k = 0; k < length; k++) {
+ var key = intervals[k];
+ if (key < 0) {
+ var j = -1 - key;
+ var limit = j + intervals[++k];
+ for (; j < limit; j++) {
+ var e = array[j];
+ if (!IS_UNDEFINED(e) || j in array) {
+ keys.push(j);
+ }
+ }
+ } else {
+ // The case where key is undefined also ends here.
+ if (!IS_UNDEFINED(key)) {
+ var e = array[key];
+ if (!IS_UNDEFINED(e) || key in array) {
+ keys.push(key);
+ }
+ }
+ }
+ }
+ keys.sort(function(a, b) { return a - b; });
+ return keys;
+}
+
+
+// Optimized for sparse arrays if separator is ''.
+function SparseJoin(array, len, convert) {
+ var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
+ var builder = new StringBuilder();
+ var last_key = -1;
+ var keys_length = keys.length;
+ for (var i = 0; i < keys_length; i++) {
+ var key = keys[i];
+ if (key != last_key) {
+ var e = array[key];
+ builder.add(convert(e));
+ last_key = key;
+ }
+ }
+ return builder.generate();
+}
+
+
+function UseSparseVariant(object, length, is_array) {
+ return is_array &&
+ length > 1000 &&
+ (!%_IsSmi(length) ||
+ %EstimateNumberOfElements(object) < (length >> 2));
+}
+
+
+function Join(array, length, separator, convert) {
+ if (length == 0) return '';
+
+ var is_array = IS_ARRAY(array);
+
+ if (is_array) {
+ // If the array is cyclic, return the empty string for already
+ // visited arrays.
+ if (Contains(visited_arrays, array)) return '';
+ visited_arrays[visited_arrays.length] = array;
+ }
+
+ // Attempt to convert the elements.
+ try {
+ if (UseSparseVariant(array, length, is_array) && separator === '') {
+ return SparseJoin(array, length, convert);
+ }
+
+ var builder = new StringBuilder();
+
+ for (var i = 0; i < length; i++) {
+ var e = array[i];
+ if (i != 0) builder.add(separator);
+ if (!IS_UNDEFINED(e) || (i in array)) {
+ builder.add(convert(e));
+ }
+ }
+ return builder.generate();
+ } finally {
+ // Make sure to pop the visited array no matter what happens.
+ if (is_array) visited_arrays.pop();
+ }
+};
+
+
+function ConvertToString(e) {
+ if (e == null) return '';
+ else return ToString(e);
+};
+
+
+function ConvertToLocaleString(e) {
+ if (e == null) return '';
+ else {
+ // e_obj's toLocaleString might be overwritten, check if it is a function.
+ // Call ToString if toLocaleString is not a function.
+ // See issue 877615.
+ var e_obj = ToObject(e);
+ if (IS_FUNCTION(e_obj.toLocaleString))
+ return e_obj.toLocaleString();
+ else
+ return ToString(e);
+ }
+};
+
+
+// This function implements the optimized splice implementation that can use
+// special array operations to handle sparse arrays in a sensible fashion.
+function SmartSlice(array, start_i, del_count, len, deleted_elements) {
+ // Move deleted elements to a new array (the return value from splice).
+ // Intervals array can contain keys and intervals. See comment in Concat.
+ var intervals = %GetArrayKeys(array, start_i + del_count);
+ var length = intervals.length;
+ for (var k = 0; k < length; k++) {
+ var key = intervals[k];
+ if (key < 0) {
+ var j = -1 - key;
+ var interval_limit = j + intervals[++k];
+ if (j < start_i) {
+ j = start_i;
+ }
+ for (; j < interval_limit; j++) {
+ // ECMA-262 15.4.4.12 line 10. The spec could also be
+ // interpreted such that %HasLocalProperty would be the
+ // appropriate test. We follow KJS in consulting the
+ // prototype.
+ var current = array[j];
+ if (!IS_UNDEFINED(current) || j in array) {
+ deleted_elements[j - start_i] = current;
+ }
+ }
+ } else {
+ if (!IS_UNDEFINED(key)) {
+ if (key >= start_i) {
+ // ECMA-262 15.4.4.12 line 10. The spec could also be
+ // interpreted such that %HasLocalProperty would be the
+ // appropriate test. We follow KJS in consulting the
+ // prototype.
+ var current = array[key];
+ if (!IS_UNDEFINED(current) || key in array) {
+ deleted_elements[key - start_i] = current;
+ }
+ }
+ }
+ }
+ }
+};
+
+
+// This function implements the optimized splice implementation that can use
+// special array operations to handle sparse arrays in a sensible fashion.
+function SmartMove(array, start_i, del_count, len, num_additional_args) {
+ // Move data to new array.
+ var new_array = new $Array(len - del_count + num_additional_args);
+ var intervals = %GetArrayKeys(array, len);
+ var length = intervals.length;
+ for (var k = 0; k < length; k++) {
+ var key = intervals[k];
+ if (key < 0) {
+ var j = -1 - key;
+ var interval_limit = j + intervals[++k];
+ while (j < start_i && j < interval_limit) {
+ // The spec could also be interpreted such that
+ // %HasLocalProperty would be the appropriate test. We follow
+ // KJS in consulting the prototype.
+ var current = array[j];
+ if (!IS_UNDEFINED(current) || j in array)
+ new_array[j] = current;
+ j++;
+ }
+ j = start_i + del_count;
+ while (j < interval_limit) {
+ // ECMA-262 15.4.4.12 lines 24 and 41. The spec could also be
+ // interpreted such that %HasLocalProperty would be the
+ // appropriate test. We follow KJS in consulting the
+ // prototype.
+ var current = array[j];
+ if (!IS_UNDEFINED(current) || j in array)
+ new_array[j - del_count + num_additional_args] = current;
+ j++;
+ }
+ } else {
+ if (!IS_UNDEFINED(key)) {
+ if (key < start_i) {
+ // The spec could also be interpreted such that
+ // %HasLocalProperty would be the appropriate test. We follow
+ // KJS in consulting the prototype.
+ var current = array[key];
+ if (!IS_UNDEFINED(current) || key in array)
+ new_array[key] = current;
+ } else if (key >= start_i + del_count) {
+ // ECMA-262 15.4.4.12 lines 24 and 41. The spec could also
+ // be interpreted such that %HasLocalProperty would be the
+ // appropriate test. We follow KJS in consulting the
+ // prototype.
+ var current = array[key];
+ if (!IS_UNDEFINED(current) || key in array)
+ new_array[key - del_count + num_additional_args] = current;
+ }
+ }
+ }
+ }
+ // Move contents of new_array into this array
+ %MoveArrayContents(new_array, array);
+};
+
+
+// This is part of the old simple-minded splice. We are using it either
+// because the receiver is not an array (so we have no choice) or because we
+// know we are not deleting or moving a lot of elements.
+function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
+ for (var i = 0; i < del_count; i++) {
+ var index = start_i + i;
+ // The spec could also be interpreted such that %HasLocalProperty
+ // would be the appropriate test. We follow KJS in consulting the
+ // prototype.
+ var current = array[index];
+ if (!IS_UNDEFINED(current) || index in array)
+ deleted_elements[i] = current;
+ }
+};
+
+
+function SimpleMove(array, start_i, del_count, len, num_additional_args) {
+ if (num_additional_args !== del_count) {
+ // Move the existing elements after the elements to be deleted
+ // to the right position in the resulting array.
+ if (num_additional_args > del_count) {
+ for (var i = len - del_count; i > start_i; i--) {
+ var from_index = i + del_count - 1;
+ var to_index = i + num_additional_args - 1;
+ // The spec could also be interpreted such that
+ // %HasLocalProperty would be the appropriate test. We follow
+ // KJS in consulting the prototype.
+ var current = array[from_index];
+ if (!IS_UNDEFINED(current) || from_index in array) {
+ array[to_index] = current;
+ } else {
+ delete array[to_index];
+ }
+ }
+ } else {
+ for (var i = start_i; i < len - del_count; i++) {
+ var from_index = i + del_count;
+ var to_index = i + num_additional_args;
+ // The spec could also be interpreted such that
+ // %HasLocalProperty would be the appropriate test. We follow
+ // KJS in consulting the prototype.
+ var current = array[from_index];
+ if (!IS_UNDEFINED(current) || from_index in array) {
+ array[to_index] = current;
+ } else {
+ delete array[to_index];
+ }
+ }
+ for (var i = len; i > len - del_count + num_additional_args; i--) {
+ delete array[i - 1];
+ }
+ }
+ }
+};
+
+
+// -------------------------------------------------------------------
+
+
+function ArrayToString() {
+ if (!IS_ARRAY(this)) {
+ throw new $TypeError('Array.prototype.toString is not generic');
+ }
+ return Join(this, this.length, ',', ConvertToString);
+};
+
+
+function ArrayToLocaleString() {
+ if (!IS_ARRAY(this)) {
+ throw new $TypeError('Array.prototype.toString is not generic');
+ }
+ return Join(this, this.length, ',', ConvertToLocaleString);
+};
+
+
+function ArrayJoin(separator) {
+ if (IS_UNDEFINED(separator)) separator = ',';
+ else separator = ToString(separator);
+ return Join(this, ToUint32(this.length), separator, ConvertToString);
+};
+
+
+// Removes the last element from the array and returns it. See
+// ECMA-262, section 15.4.4.6.
+function ArrayPop() {
+ var n = ToUint32(this.length);
+ if (n == 0) {
+ this.length = n;
+ return;
+ }
+ n--;
+ var value = this[n];
+ this.length = n;
+ delete this[n];
+ return value;
+};
+
+
+// Appends the arguments to the end of the array and returns the new
+// length of the array. See ECMA-262, section 15.4.4.7.
+function ArrayPush() {
+ var n = ToUint32(this.length);
+ var m = %_ArgumentsLength();
+ for (var i = 0; i < m; i++) {
+ this[i+n] = %_Arguments(i);
+ }
+ this.length = n + m;
+ return this.length;
+};
+
+
+function ArrayConcat(arg1) { // length == 1
+ var arg_number = 0, arg_count = %_ArgumentsLength();
+ var n = 0;
+
+ var A = $Array(1 + arg_count);
+ var E = this;
+
+ while (true) {
+ if (IS_ARRAY(E)) {
+ // This is an array of intervals or an array of keys. Keys are
+ // represented by non-negative integers. Intervals are represented by
+ // negative integers, followed by positive counts. The interval start
+ // is determined by subtracting the entry from -1. There may also be
+ // undefined entries in the array which should be skipped.
+ var intervals = %GetArrayKeys(E, E.length);
+ var length = intervals.length;
+ for (var k = 0; k < length; k++) {
+ var key = intervals[k];
+ if (key < 0) {
+ var j = -1 - key;
+ var limit = j + intervals[++k];
+ for (; j < limit; j++) {
+ if (j in E) {
+ A[n + j] = E[j];
+ }
+ }
+ } else {
+ // The case where key is undefined also ends here.
+ if (!IS_UNDEFINED(key)) {
+ A[n + key] = E[key];
+ }
+ }
+ }
+ n += E.length;
+ } else {
+ A[n++] = E;
+ }
+ if (arg_number == arg_count) break;
+ E = %_Arguments(arg_number++);
+ }
+
+ A.length = n; // may contain empty arrays
+ return A;
+};
+
+
+// For implementing reverse() on large, sparse arrays.
+function SparseReverse(array, len) {
+ var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
+ var high_counter = keys.length - 1;
+ var low_counter = 0;
+ while (low_counter <= high_counter) {
+ var i = keys[low_counter];
+ var j = keys[high_counter];
+
+ var j_complement = len - j - 1;
+ var low, high;
+
+ if (j_complement <= i) {
+ high = j;
+ while (keys[--high_counter] == j);
+ low = j_complement;
+ }
+ if (j_complement >= i) {
+ low = i;
+ while (keys[++low_counter] == i);
+ high = len - i - 1;
+ }
+
+ var current_i = array[low];
+ if (!IS_UNDEFINED(current_i) || low in array) {
+ var current_j = array[high];
+ if (!IS_UNDEFINED(current_j) || high in array) {
+ array[low] = current_j;
+ array[high] = current_i;
+ } else {
+ array[high] = current_i;
+ delete array[low];
+ }
+ } else {
+ var current_j = array[high];
+ if (!IS_UNDEFINED(current_j) || high in array) {
+ array[low] = current_j;
+ delete array[high];
+ }
+ }
+ }
+}
+
+
+function ArrayReverse() {
+ var j = ToUint32(this.length) - 1;
+
+ if (UseSparseVariant(this, j, IS_ARRAY(this))) {
+ SparseReverse(this, j+1);
+ return this;
+ }
+
+ for (var i = 0; i < j; i++, j--) {
+ var current_i = this[i];
+ if (!IS_UNDEFINED(current_i) || i in this) {
+ var current_j = this[j];
+ if (!IS_UNDEFINED(current_j) || j in this) {
+ this[i] = current_j;
+ this[j] = current_i;
+ } else {
+ this[j] = current_i;
+ delete this[i];
+ }
+ } else {
+ var current_j = this[j];
+ if (!IS_UNDEFINED(current_j) || j in this) {
+ this[i] = current_j;
+ delete this[j];
+ }
+ }
+ }
+ return this;
+};
+
+
+function ArrayShift() {
+ var len = ToUint32(this.length);
+
+ if (len === 0) {
+ this.length = 0;
+ return;
+ }
+
+ var first = this[0];
+
+ if (IS_ARRAY(this))
+ SmartMove(this, 0, 1, len, 0);
+ else
+ SimpleMove(this, 0, 1, len, 0);
+
+ this.length = len - 1;
+
+ return first;
+};
+
+
+function ArrayUnshift(arg1) { // length == 1
+ var len = ToUint32(this.length);
+ var num_arguments = %_ArgumentsLength();
+
+ if (IS_ARRAY(this))
+ SmartMove(this, 0, 0, len, num_arguments);
+ else
+ SimpleMove(this, 0, 0, len, num_arguments);
+
+ for (var i = 0; i < num_arguments; i++) {
+ this[i] = %_Arguments(i);
+ }
+
+ this.length = len + num_arguments;
+
+ return len + num_arguments;
+};
+
+
+function ArraySlice(start, end) {
+ var len = ToUint32(this.length);
+ var start_i = TO_INTEGER(start);
+ var end_i = len;
+
+ if (end !== void 0) end_i = TO_INTEGER(end);
+
+ if (start_i < 0) {
+ start_i += len;
+ if (start_i < 0) start_i = 0;
+ } else {
+ if (start_i > len) start_i = len;
+ }
+
+ if (end_i < 0) {
+ end_i += len;
+ if (end_i < 0) end_i = 0;
+ } else {
+ if (end_i > len) end_i = len;
+ }
+
+ var result = [];
+
+ if (end_i < start_i)
+ return result;
+
+ if (IS_ARRAY(this))
+ SmartSlice(this, start_i, end_i - start_i, len, result);
+ else
+ SimpleSlice(this, start_i, end_i - start_i, len, result);
+
+ result.length = end_i - start_i;
+
+ return result;
+};
+
+
+function ArraySplice(start, delete_count) {
+ var num_arguments = %_ArgumentsLength();
+
+ // SpiderMonkey and KJS return undefined in the case where no
+ // arguments are given instead of using the implicit undefined
+ // arguments. This does not follow ECMA-262, but we do the same for
+ // compatibility.
+ if (num_arguments == 0) return;
+
+ var len = ToUint32(this.length);
+ var start_i = TO_INTEGER(start);
+
+ if (start_i < 0) {
+ start_i += len;
+ if (start_i < 0) start_i = 0;
+ } else {
+ if (start_i > len) start_i = len;
+ }
+
+ // SpiderMonkey and KJS treat the case where no delete count is
+ // given differently from when an undefined delete count is given.
+ // This does not follow ECMA-262, but we do the same for
+ // compatibility.
+ var del_count = 0;
+ if (num_arguments > 1) {
+ del_count = TO_INTEGER(delete_count);
+ if (del_count < 0) del_count = 0;
+ if (del_count > len - start_i) del_count = len - start_i;
+ } else {
+ del_count = len - start_i;
+ }
+
+ var deleted_elements = [];
+ deleted_elements.length = del_count;
+
+ // Number of elements to add.
+ var num_additional_args = 0;
+ if (num_arguments > 2) {
+ num_additional_args = num_arguments - 2;
+ }
+
+ var use_simple_splice = true;
+
+ if (IS_ARRAY(this) && num_additional_args !== del_count) {
+ // If we are only deleting/moving a few things near the end of the
+ // array then the simple version is going to be faster, because it
+ // doesn't touch most of the array.
+ var estimated_non_hole_elements = %EstimateNumberOfElements(this);
+ if (len > 20 && (estimated_non_hole_elements >> 2) < (len - start_i)) {
+ use_simple_splice = false;
+ }
+ }
+
+ if (use_simple_splice) {
+ SimpleSlice(this, start_i, del_count, len, deleted_elements);
+ SimpleMove(this, start_i, del_count, len, num_additional_args);
+ } else {
+ SmartSlice(this, start_i, del_count, len, deleted_elements);
+ SmartMove(this, start_i, del_count, len, num_additional_args);
+ }
+
+ // Insert the arguments into the resulting array in
+ // place of the deleted elements.
+ var i = start_i;
+ var arguments_index = 2;
+ var arguments_length = %_ArgumentsLength();
+ while (arguments_index < arguments_length) {
+ this[i++] = %_Arguments(arguments_index++);
+ }
+ this.length = len - del_count + num_additional_args;
+
+ // Return the deleted elements.
+ return deleted_elements;
+};
+
+
+function ArraySort(comparefn) {
+ // Standard in-place HeapSort algorithm.
+
+ function Compare(x,y) {
+ if (IS_UNDEFINED(x)) {
+ if (IS_UNDEFINED(y)) return 0;
+ return 1;
+ }
+ if (IS_UNDEFINED(y)) return -1;
+
+ if (IS_FUNCTION(comparefn)) {
+ return comparefn.call(null, x, y);
+ }
+ x = ToString(x);
+ y = ToString(y);
+ if (x == y) return 0;
+ else return x < y ? -1 : 1;
+ };
+
+ var old_length = ToUint32(this.length);
+
+ %RemoveArrayHoles(this);
+
+ var length = ToUint32(this.length);
+
+ // Bottom-up max-heap construction.
+ for (var i = 1; i < length; ++i) {
+ var child_index = i;
+ while (child_index > 0) {
+ var parent_index = ((child_index + 1) >> 1) - 1;
+ var parent_value = this[parent_index], child_value = this[child_index];
+ if (Compare(parent_value, child_value) < 0) {
+ this[parent_index] = child_value;
+ this[child_index] = parent_value;
+ } else {
+ break;
+ }
+ child_index = parent_index;
+ }
+ }
+
+ // Extract element and create sorted array.
+ for (var i = length - 1; i > 0; --i) {
+ // Put the max element at the back of the array.
+ var t0 = this[0]; this[0] = this[i]; this[i] = t0;
+ // Sift down the new top element.
+ var parent_index = 0;
+ while (true) {
+ var child_index = ((parent_index + 1) << 1) - 1;
+ if (child_index >= i) break;
+ var child1_value = this[child_index];
+ var child2_value = this[child_index + 1];
+ var parent_value = this[parent_index];
+ if (child_index + 1 >= i || Compare(child1_value, child2_value) > 0) {
+ if (Compare(parent_value, child1_value) > 0) break;
+ this[child_index] = parent_value;
+ this[parent_index] = child1_value;
+ parent_index = child_index;
+ } else {
+ if (Compare(parent_value, child2_value) > 0) break;
+ this[child_index + 1] = parent_value;
+ this[parent_index] = child2_value;
+ parent_index = child_index + 1;
+ }
+ }
+ }
+
+ // We only changed the length of the this object (in
+ // RemoveArrayHoles) if it was an array. We are not allowed to set
+ // the length of the this object if it is not an array because this
+ // might introduce a new length property.
+ if (IS_ARRAY(this)) {
+ this.length = old_length;
+ }
+
+ return this;
+};
+
+
+// The following functions cannot be made efficient on sparse arrays while
+// preserving the semantics, since the calls to the receiver function can add
+// or delete elements from the array.
+
+function ArrayFilter(f, receiver) {
+ if (!IS_FUNCTION(f)) {
+ throw MakeTypeError('called_non_callable', [ f ]);
+ }
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = this.length;
+ var result = [];
+ for (var i = 0; i < length; i++) {
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ if (f.call(receiver, current, i, this)) result.push(current);
+ }
+ }
+ return result;
+};
+
+
+function ArrayForEach(f, receiver) {
+ if (!IS_FUNCTION(f)) {
+ throw MakeTypeError('called_non_callable', [ f ]);
+ }
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = this.length;
+ for (var i = 0; i < length; i++) {
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ f.call(receiver, current, i, this);
+ }
+ }
+};
+
+
+// Executes the function once for each element present in the
+// array until it finds one where callback returns true.
+function ArraySome(f, receiver) {
+ if (!IS_FUNCTION(f)) {
+ throw MakeTypeError('called_non_callable', [ f ]);
+ }
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = this.length;
+ for (var i = 0; i < length; i++) {
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ if (f.call(receiver, current, i, this)) return true;
+ }
+ }
+ return false;
+};
+
+
+function ArrayEvery(f, receiver) {
+ if (!IS_FUNCTION(f)) {
+ throw MakeTypeError('called_non_callable', [ f ]);
+ }
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = this.length;
+ for (var i = 0; i < length; i++) {
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ if (!f.call(receiver, current, i, this)) return false;
+ }
+ }
+
+ return true;
+};
+
+
+function ArrayMap(f, receiver) {
+ if (!IS_FUNCTION(f)) {
+ throw MakeTypeError('called_non_callable', [ f ]);
+ }
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = this.length;
+ var result = new $Array(length);
+ for (var i = 0; i < length; i++) {
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ result[i] = f.call(receiver, current, i, this);
+ }
+ }
+ return result;
+};
+
+
+function ArrayIndexOf(element, index) {
+ var length = this.length;
+ if (index == null) {
+ index = 0;
+ } else {
+ index = TO_INTEGER(index);
+ // If index is negative, index from the end of the array.
+ if (index < 0) index = length + index;
+ // If index is still negative, search the entire array.
+ if (index < 0) index = 0;
+ }
+ // Lookup through the array.
+ for (var i = index; i < length; i++) {
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ if (current === element) return i;
+ }
+ }
+ return -1;
+};
+
+
+function ArrayLastIndexOf(element, index) {
+ var length = this.length;
+ if (index == null) {
+ index = length - 1;
+ } else {
+ index = TO_INTEGER(index);
+ // If index is negative, index from end of the array.
+ if (index < 0) index = length + index;
+ // If index is still negative, do not search the array.
+ if (index < 0) index = -1;
+ else if (index >= length) index = length - 1;
+ }
+ // Lookup through the array.
+ for (var i = index; i >= 0; i--) {
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ if (current === element) return i;
+ }
+ }
+ return -1;
+};
+
+
+// -------------------------------------------------------------------
+
+function InstallProperties(prototype, attributes, properties) {
+ for (var key in properties) {
+ %AddProperty(prototype, key, properties[key], attributes);
+ }
+};
+
+
+function UpdateFunctionLengths(lengths) {
+ for (var key in lengths) {
+ %FunctionSetLength(this[key], lengths[key]);
+ }
+};
+
+
+// -------------------------------------------------------------------
+
+function SetupArray() {
+ // Setup non-enumerable properties of the Array.prototype object.
+ InstallProperties($Array.prototype, DONT_ENUM, {
+ constructor: $Array,
+ toString: ArrayToString,
+ toLocaleString: ArrayToLocaleString,
+ join: ArrayJoin,
+ pop: ArrayPop,
+ push: ArrayPush,
+ concat: ArrayConcat,
+ reverse: ArrayReverse,
+ shift: ArrayShift,
+ unshift: ArrayUnshift,
+ slice: ArraySlice,
+ splice: ArraySplice,
+ sort: ArraySort,
+ filter: ArrayFilter,
+ forEach: ArrayForEach,
+ some: ArraySome,
+ every: ArrayEvery,
+ map: ArrayMap,
+ indexOf: ArrayIndexOf,
+ lastIndexOf: ArrayLastIndexOf
+ });
+
+ // Manipulate the length of some of the functions to meet
+ // expectations set by ECMA-262 or Mozilla.
+ UpdateFunctionLengths({
+ ArrayFilter: 1,
+ ArrayForEach: 1,
+ ArraySome: 1,
+ ArrayEvery: 1,
+ ArrayMap: 1,
+ ArrayIndexOf: 1,
+ ArrayLastIndexOf: 1,
+ ArrayPush: 1
+ });
+};
+
+
+SetupArray();
diff --git a/src/assembler-arm-inl.h b/src/assembler-arm-inl.h
new file mode 100644
index 0000000..4d9a46c
--- /dev/null
+++ b/src/assembler-arm-inl.h
@@ -0,0 +1,220 @@
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ASSEMBLER_ARM_INL_H_
+#define V8_ASSEMBLER_ARM_INL_H_
+
+#include "assembler-arm.h"
+#include "cpu.h"
+
+
+namespace v8 { namespace internal {
+
+Condition NegateCondition(Condition cc) {
+ ASSERT(cc != al);
+ return static_cast<Condition>(cc ^ ne);
+}
+
+
+void RelocInfo::apply(int delta) {
+ // We do not use pc relative addressing on ARM, so there is nothing to do.
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(is_code_target(rmode_));
+ return Assembler::target_address_at(pc_);
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+ ASSERT(is_code_target(rmode_));
+ Assembler::set_target_address_at(pc_, target);
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(is_code_target(rmode_) || rmode_ == embedded_object);
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+}
+
+
+Object** RelocInfo::target_object_address() {
+ ASSERT(is_code_target(rmode_) || rmode_ == embedded_object);
+ return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+ ASSERT(is_code_target(rmode_) || rmode_ == embedded_object);
+ Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+}
+
+
+Address* RelocInfo::target_reference_address() {
+ ASSERT(rmode_ == external_reference);
+ return reinterpret_cast<Address*>(pc_);
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT(is_call_instruction());
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT(is_call_instruction());
+ UNIMPLEMENTED();
+}
+
+
+Object* RelocInfo::call_object() {
+ ASSERT(is_call_instruction());
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object** RelocInfo::call_object_address() {
+ ASSERT(is_call_instruction());
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+ ASSERT(is_call_instruction());
+ UNIMPLEMENTED();
+}
+
+
+bool RelocInfo::is_call_instruction() {
+ UNIMPLEMENTED();
+ return false;
+}
+
+
+Operand::Operand(int32_t immediate, RelocMode rmode) {
+ rm_ = no_reg;
+ imm32_ = immediate;
+ rmode_ = rmode;
+}
+
+
+Operand::Operand(const char* s) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(s);
+ rmode_ = embedded_string;
+}
+
+
+Operand::Operand(const ExternalReference& f) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(f.address());
+ rmode_ = external_reference;
+}
+
+
+Operand::Operand(Object** opp) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(opp);
+ rmode_ = no_reloc;
+}
+
+
+Operand::Operand(Context** cpp) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(cpp);
+ rmode_ = no_reloc;
+}
+
+
+Operand::Operand(Smi* value) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<intptr_t>(value);
+ rmode_ = no_reloc;
+}
+
+
+Operand::Operand(Register rm) {
+ rm_ = rm;
+ rs_ = no_reg;
+ shift_op_ = LSL;
+ shift_imm_ = 0;
+}
+
+
+void Assembler::CheckBuffer() {
+ if (buffer_space() <= kGap) {
+ GrowBuffer();
+ }
+ if (pc_offset() > next_buffer_check_) {
+ CheckConstPool(false, true);
+ }
+}
+
+
+void Assembler::emit(Instr x) {
+ CheckBuffer();
+ *reinterpret_cast<Instr*>(pc_) = x;
+ pc_ += kInstrSize;
+}
+
+
+Address Assembler::target_address_address_at(Address pc) {
+ Instr instr = Memory::int32_at(pc);
+ // Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
+ ASSERT((instr & 0x0f7f0000) == 0x051f0000);
+ int offset = instr & 0xfff; // offset_12 is unsigned
+ if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
+ // Verify that the constant pool comes after the instruction referencing it.
+ ASSERT(offset >= -4);
+ return pc + offset + 8;
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+ return Memory::Address_at(target_address_address_at(pc));
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+ Memory::Address_at(target_address_address_at(pc)) = target;
+ // Intuitively, we would think it is necessary to flush the instruction cache
+ // after patching a target address in the code as follows:
+ // CPU::FlushICache(pc, sizeof(target));
+ // However, on ARM, no instruction was actually patched by the assignment
+ // above; the target address is not part of an instruction, it is patched in
+ // the constant pool and is read via a data access; the instruction accessing
+ // this address in the constant pool remains unchanged.
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ASSEMBLER_ARM_INL_H_
diff --git a/src/assembler-arm.cc b/src/assembler-arm.cc
new file mode 100644
index 0000000..4287376
--- /dev/null
+++ b/src/assembler-arm.cc
@@ -0,0 +1,1477 @@
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "assembler-arm-inl.h"
+
+namespace v8 { namespace internal {
+
+DEFINE_bool(debug_code, false,
+ "generate extra code (comments, assertions) for debugging");
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and CRegister
+
+Register no_reg = { -1 };
+
+Register r0 = { 0 };
+Register r1 = { 1 };
+Register r2 = { 2 };
+Register r3 = { 3 };
+Register r4 = { 4 };
+Register r5 = { 5 };
+Register r6 = { 6 };
+Register r7 = { 7 };
+Register r8 = { 8 };
+Register r9 = { 9 };
+Register r10 = { 10 };
+Register fp = { 11 };
+Register ip = { 12 };
+Register sp = { 13 };
+Register lr = { 14 };
+Register pc = { 15 };
+
+
+CRegister no_creg = { -1 };
+
+CRegister cr0 = { 0 };
+CRegister cr1 = { 1 };
+CRegister cr2 = { 2 };
+CRegister cr3 = { 3 };
+CRegister cr4 = { 4 };
+CRegister cr5 = { 5 };
+CRegister cr6 = { 6 };
+CRegister cr7 = { 7 };
+CRegister cr8 = { 8 };
+CRegister cr9 = { 9 };
+CRegister cr10 = { 10 };
+CRegister cr11 = { 11 };
+CRegister cr12 = { 12 };
+CRegister cr13 = { 13 };
+CRegister cr14 = { 14 };
+CRegister cr15 = { 15 };
+
+
+// In order to determine the pc store offset, we execute a small code sequence.
+// See ARM Architecture Reference Manual section A-2.4.3
+// Note that 'str pc, [sp]' and 'stmia sp, {pc}' were using different offsets
+// under the QEMU emulator (now fixed), so we are careful to test the actual
+// instruction we are interested in (stmia).
+int PcStoreOffset() {
+#if !defined(__arm__)
+ // Building an ARM emulator based target. The emulator is wired for 8 byte
+ // pc offsets as is the default in the spec.
+ static int pc_store_offset = 8;
+#elif defined(__arm__) && !defined(__thumb__)
+ // __arm__ may be defined in thumb mode.
+ static int pc_store_offset = -1;
+ asm volatile(
+ "sub sp, sp, #4 \n\t"
+ "sub r1, pc, #4 \n\t"
+ "stmia sp, {pc} \n\t"
+ "ldr r0, [sp] \n\t"
+ "add sp, sp, #4 \n\t"
+ "sub %0, r0, r1 \n\t"
+ : "=r" (pc_store_offset) : : "r0", "r1", "memory");
+#elif defined(__thumb__)
+ static int pc_store_offset = -1;
+ asm volatile(
+ "@ Enter ARM Mode \n\t"
+ "adr r2, 1f \n\t"
+ "bx r2 \n\t"
+ ".ALIGN 4 \n\t"
+ ".ARM \n"
+ "1: sub sp, sp, #4 \n\t"
+ "sub r1, pc, #4 \n\t"
+ "stmia sp, {pc} \n\t"
+ "ldr r0, [sp] \n\t"
+ "add sp, sp, #4 \n\t"
+ "sub %0, r0, r1 \n"
+ "@ Enter THUMB Mode\n\t"
+ "adr r2, 2f+1 \n\t"
+ "bx r2 \n\t"
+ ".THUMB \n"
+ "2: \n\t"
+ : "=r" (pc_store_offset) : : "r0", "r1", "r2", "memory");
+#else
+#error unsupported architecture
+#endif
+ ASSERT(pc_store_offset == 8 || pc_store_offset == 12);
+ return pc_store_offset;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask = 0;
+
+
+void RelocInfo::patch_code(byte* instructions, int instruction_count) {
+ // Patch the code at the current address with the supplied instructions.
+ UNIMPLEMENTED();
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard int3 instructions can be added if required.
+void RelocInfo::patch_code_with_call(Address target, int guard_bytes) {
+ // Patch the code at the current address with a call to the target.
+ UNIMPLEMENTED();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand
+// See assembler-arm-inl.h for inlined constructors
+
+Operand::Operand(Handle<Object> handle) {
+ rm_ = no_reg;
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ ASSERT(!Heap::InNewSpace(obj));
+ if (obj->IsHeapObject()) {
+ imm32_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = embedded_object;
+ } else {
+ // no relocation needed
+ imm32_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = no_reloc;
+ }
+}
+
+
+Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
+ ASSERT(is_uint5(shift_imm));
+ ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
+ rm_ = rm;
+ rs_ = no_reg;
+ shift_op_ = shift_op;
+ shift_imm_ = shift_imm & 31;
+ if (shift_op == RRX) {
+ // encoded as ROR with shift_imm == 0
+ ASSERT(shift_imm == 0);
+ shift_op_ = ROR;
+ shift_imm_ = 0;
+ }
+}
+
+
+Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
+ ASSERT(shift_op != RRX);
+ rm_ = rm;
+ rs_ = no_reg;
+ shift_op_ = shift_op;
+ rs_ = rs;
+}
+
+
+MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
+ rn_ = rn;
+ rm_ = no_reg;
+ offset_ = offset;
+ am_ = am;
+}
+
+MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
+ rn_ = rn;
+ rm_ = rm;
+ shift_op_ = LSL;
+ shift_imm_ = 0;
+ am_ = am;
+}
+
+
+MemOperand::MemOperand(Register rn, Register rm,
+ ShiftOp shift_op, int shift_imm, AddrMode am) {
+ ASSERT(is_uint5(shift_imm));
+ rn_ = rn;
+ rm_ = rm;
+ shift_op_ = shift_op;
+ shift_imm_ = shift_imm & 31;
+ am_ = am;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+// Instruction encoding bits
+enum {
+ H = 1 << 5, // halfword (or byte)
+ S6 = 1 << 6, // signed (or unsigned)
+ L = 1 << 20, // load (or store)
+ S = 1 << 20, // set condition code (or leave unchanged)
+ W = 1 << 21, // writeback base register (or leave unchanged)
+ A = 1 << 21, // accumulate in multiply instruction (or not)
+ B = 1 << 22, // unsigned byte (or word)
+ N = 1 << 22, // long (or short)
+ U = 1 << 23, // positive (or negative) offset/index
+ P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
+ I = 1 << 25, // immediate shifter operand (or not)
+
+ B4 = 1 << 4,
+ B5 = 1 << 5,
+ B7 = 1 << 7,
+ B8 = 1 << 8,
+ B12 = 1 << 12,
+ B16 = 1 << 16,
+ B20 = 1 << 20,
+ B21 = 1 << 21,
+ B22 = 1 << 22,
+ B23 = 1 << 23,
+ B24 = 1 << 24,
+ B25 = 1 << 25,
+ B26 = 1 << 26,
+ B27 = 1 << 27,
+
+ // Instruction bit masks
+ CondMask = 15 << 28,
+ OpCodeMask = 15 << 21, // in data-processing instructions
+ Imm24Mask = (1 << 24) - 1,
+ Off12Mask = (1 << 12) - 1,
+ // Reserved condition
+ nv = 15 << 28
+};
+
+
+// spare_buffer_
+static const int kMinimalBufferSize = 4*KB;
+static byte* spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+ if (buffer == NULL) {
+ // do our own buffer management
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+
+ if (spare_buffer_ != NULL) {
+ buffer = spare_buffer_;
+ spare_buffer_ = NULL;
+ }
+ }
+ if (buffer == NULL) {
+ buffer_ = NewArray<byte>(buffer_size);
+ } else {
+ buffer_ = static_cast<byte*>(buffer);
+ }
+ buffer_size_ = buffer_size;
+ own_buffer_ = true;
+
+ } else {
+ // use externally provided buffer instead
+ ASSERT(buffer_size > 0);
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+ own_buffer_ = false;
+ }
+
+ // setup buffer pointers
+ ASSERT(buffer_ != NULL);
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ num_prinfo_ = 0;
+ next_buffer_check_ = 0;
+ no_const_pool_before_ = 0;
+ last_const_pool_end_ = 0;
+ last_bound_pos_ = 0;
+ last_position_ = kNoPosition;
+ last_position_is_statement_ = false;
+}
+
+
+Assembler::~Assembler() {
+ if (own_buffer_) {
+ if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+ spare_buffer_ = buffer_;
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // finalize code
+ if (unbound_label_.is_linked())
+ bind_to(&unbound_label_, binding_pos_);
+
+ // emit constant pool if necessary
+ CheckConstPool(true, false);
+ ASSERT(num_prinfo_ == 0);
+
+ // setup desc
+ desc->buffer = buffer_;
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(m >= 4 && IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+
+// The link chain is terminated by a negative code position (must be aligned)
+const int kEndOfChain = -4;
+
+
+int Assembler::target_at(int pos) {
+ Instr instr = instr_at(pos);
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
+ int imm26 = ((instr & Imm24Mask) << 8) >> 6;
+ if ((instr & CondMask) == nv && (instr & B24) != 0)
+ // blx uses bit 24 to encode bit 2 of imm26
+ imm26 += 2;
+
+ return pos + 8 + imm26;
+}
+
+
+void Assembler::target_at_put(int pos, int target_pos) {
+ int imm26 = target_pos - pos - 8;
+ Instr instr = instr_at(pos);
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
+ if ((instr & CondMask) == nv) {
+ // blx uses bit 24 to encode bit 2 of imm26
+ ASSERT((imm26 & 1) == 0);
+ instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
+ } else {
+ ASSERT((imm26 & 3) == 0);
+ instr &= ~Imm24Mask;
+ }
+ int imm24 = imm26 >> 2;
+ ASSERT(is_int24(imm24));
+ instr_at_put(pos, instr | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::print(Label* L) {
+ if (L->is_unused()) {
+ PrintF("unused label\n");
+ } else if (L->is_bound()) {
+ PrintF("bound label to %d\n", L->pos());
+ } else if (L->is_linked()) {
+ Label l = *L;
+ PrintF("unbound label");
+ while (l.is_linked()) {
+ PrintF("@ %d ", l.pos());
+ Instr instr = instr_at(l.pos());
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
+ int cond = instr & CondMask;
+ const char* b;
+ const char* c;
+ if (cond == nv) {
+ b = "blx";
+ c = "";
+ } else {
+ if ((instr & B24) != 0)
+ b = "bl";
+ else
+ b = "b";
+
+ switch (cond) {
+ case eq: c = "eq"; break;
+ case ne: c = "ne"; break;
+ case hs: c = "hs"; break;
+ case lo: c = "lo"; break;
+ case mi: c = "mi"; break;
+ case pl: c = "pl"; break;
+ case vs: c = "vs"; break;
+ case vc: c = "vc"; break;
+ case hi: c = "hi"; break;
+ case ls: c = "ls"; break;
+ case ge: c = "ge"; break;
+ case lt: c = "lt"; break;
+ case gt: c = "gt"; break;
+ case le: c = "le"; break;
+ case al: c = ""; break;
+ default:
+ c = "";
+ UNREACHABLE();
+ }
+ }
+ PrintF("%s%s\n", b, c);
+ next(&l);
+ }
+ } else {
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+ }
+}
+
+
+DEFINE_bool(eliminate_jumps, true, "eliminate jumps to jumps in assembly code");
+DEFINE_bool(print_jump_elimination, false,
+ "print elimination of jumps to jumps in assembly code");
+
+void Assembler::bind_to(Label* L, int pos) {
+ ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ while (L->is_linked()) {
+ int fixup_pos = L->pos();
+ next(L); // call next before overwriting link with target at fixup_pos
+ target_at_put(fixup_pos, pos);
+ }
+ L->bind_to(pos);
+
+ // do not eliminate jump instructions before the last bound position
+ if (pos > last_bound_pos_)
+ last_bound_pos_ = pos;
+}
+
+
+void Assembler::link_to(Label* L, Label* appendix) {
+ if (appendix->is_linked()) {
+ if (L->is_linked()) {
+ // append appendix to L's list
+ int fixup_pos;
+ int link = L->pos();
+ do {
+ fixup_pos = link;
+ link = target_at(fixup_pos);
+ } while (link > 0);
+ ASSERT(link == kEndOfChain);
+ target_at_put(fixup_pos, appendix->pos());
+ } else {
+ // L is empty, simply use appendix
+ *L = *appendix;
+ }
+ }
+ appendix->Unuse(); // appendix should not be used anymore
+}
+
+
+void Assembler::bind(Label* L) {
+ ASSERT(!L->is_bound()); // label can only be bound once
+ if (FLAG_eliminate_jumps) {
+ // Resolve unbound label.
+ if (unbound_label_.is_linked()) {
+ // Unbound label exists => link it with L if same binding
+ // position, otherwise fix it.
+ if (binding_pos_ == pc_offset()) {
+ // Link it to L's list.
+ link_to(L, &unbound_label_);
+ } else {
+ // Otherwise bind unbound label.
+ ASSERT(binding_pos_ < pc_offset());
+ bind_to(&unbound_label_, binding_pos_);
+ }
+ }
+ ASSERT(!unbound_label_.is_linked());
+ // Try to eliminate jumps to next instruction.
+ Instr instr;
+ // Do not remove an already bound jump target.
+ while (last_bound_pos_ < pc_offset() &&
+ reloc_info_writer.last_pc() <= pc_ - kInstrSize &&
+ L->is_linked() && L->pos() == pc_offset() - kInstrSize &&
+ (((instr = instr_at(L->pos())) & CondMask) != nv && // not blx
+ (instr & 15*B24) == 10*B24)) { // b<cond>, but not bl<cond>
+ // Previous instruction is b<cond> jumping immediately after it
+ // => eliminate it
+ if (FLAG_print_jump_elimination)
+ PrintF("@ %d jump to next eliminated\n", L->pos());
+ // Remove first entry from label list.
+ next(L);
+ // Eliminate instruction (set code pointers back).
+ pc_ -= kInstrSize;
+ // Make sure not to skip relocation information when rewinding.
+ ASSERT(reloc_info_writer.last_pc() <= pc_);
+ }
+ // delay fixup of L => store it as unbound label
+ unbound_label_ = *L;
+ binding_pos_ = pc_offset();
+ L->Unuse();
+ }
+ bind_to(L, pc_offset());
+}
+
+
+void Assembler::next(Label* L) {
+ ASSERT(L->is_linked());
+ int link = target_at(L->pos());
+ if (link > 0) {
+ L->link_to(link);
+ } else {
+ ASSERT(link == kEndOfChain);
+ L->Unuse();
+ }
+}
+
+
+// Low-level code emission routines depending on the addressing mode
+static bool fits_shifter(uint32_t imm32,
+ uint32_t* rotate_imm,
+ uint32_t* immed_8,
+ Instr* instr) {
+ // imm32 must be unsigned
+ for (int rot = 0; rot < 16; rot++) {
+ uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
+ if ((imm8 <= 0xff)) {
+ *rotate_imm = rot;
+ *immed_8 = imm8;
+ return true;
+ }
+ }
+ // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
+ if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
+ if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= 0x2*B21;
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void Assembler::addrmod1(Instr instr,
+ Register rn,
+ Register rd,
+ const Operand& x) {
+ CheckBuffer();
+ ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
+ if (!x.rm_.is_valid()) {
+ // immediate
+ uint32_t rotate_imm;
+ uint32_t immed_8;
+ if ((x.rmode_ != no_reloc && x.rmode_ != external_reference) ||
+ !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
+ // The immediate operand cannot be encoded as a shifter operand, so load
+ // it first to register ip and change the original instruction to use ip.
+ // However, if the original instruction is a 'mov rd, x' (not setting the
+ // condition code), then replace it with a 'ldr rd, [pc]'
+ RecordRelocInfo(x.rmode_, x.imm32_);
+ ASSERT(!rn.is(ip)); // rn should never be ip, or will be trashed
+ Condition cond = static_cast<Condition>(instr & CondMask);
+ if ((instr & ~CondMask) == 13*B21) { // mov, S not set
+ ldr(rd, MemOperand(pc, 0), cond);
+ } else {
+ ldr(ip, MemOperand(pc, 0), cond);
+ addrmod1(instr, rn, rd, Operand(ip));
+ }
+ return;
+ }
+ instr |= I | rotate_imm*B8 | immed_8;
+ } else if (!x.rs_.is_valid()) {
+ // immediate shift
+ instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+ } else {
+ // register shift
+ ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
+ instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
+ }
+ emit(instr | rn.code()*B16 | rd.code()*B12);
+ if (rn.is(pc) || x.rm_.is(pc))
+ // block constant pool emission for one instruction after reading pc
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
+}
+
+
+void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
+ ASSERT((instr & ~(CondMask | B | L)) == B26);
+ int am = x.am_;
+ if (!x.rm_.is_valid()) {
+ // immediate offset
+ int offset_12 = x.offset_;
+ if (offset_12 < 0) {
+ offset_12 = -offset_12;
+ am ^= U;
+ }
+ if (!is_uint12(offset_12)) {
+ // immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ mov(ip, Operand(x.offset_), LeaveCC,
+ static_cast<Condition>(instr & CondMask));
+ addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ return;
+ }
+ ASSERT(offset_12 >= 0); // no masking needed
+ instr |= offset_12;
+ } else {
+ // register offset (shift_imm_ and shift_op_ are 0) or scaled
+ // register offset the constructors make sure than both shift_imm_
+ // and shift_op_ are initialized
+ ASSERT(!x.rm_.is(pc));
+ instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+ }
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+}
+
+
+void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
+ ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
+ ASSERT(x.rn_.is_valid());
+ int am = x.am_;
+ if (!x.rm_.is_valid()) {
+ // immediate offset
+ int offset_8 = x.offset_;
+ if (offset_8 < 0) {
+ offset_8 = -offset_8;
+ am ^= U;
+ }
+ if (!is_uint8(offset_8)) {
+ // immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ mov(ip, Operand(x.offset_), LeaveCC,
+ static_cast<Condition>(instr & CondMask));
+ addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ return;
+ }
+ ASSERT(offset_8 >= 0); // no masking needed
+ instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
+ } else if (x.shift_imm_ != 0) {
+ // scaled register offset not supported, load index first
+ // rn (and rd in a load) should never be ip, or will be trashed
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
+ static_cast<Condition>(instr & CondMask));
+ addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ return;
+ } else {
+ // register offset
+ ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
+ instr |= x.rm_.code();
+ }
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+}
+
+
+void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
+ ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
+ ASSERT(rl != 0);
+ ASSERT(!rn.is(pc));
+ emit(instr | rn.code()*B16 | rl);
+}
+
+
+void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
+ // unindexed addressing is not encoded by this function
+ ASSERT((instr & ~(CondMask | P | U | N | W | L)) == (B27 | B26));
+ ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
+ int am = x.am_;
+ int offset_8 = x.offset_;
+ ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
+ offset_8 >>= 2;
+ if (offset_8 < 0) {
+ offset_8 = -offset_8;
+ am ^= U;
+ }
+ ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+
+ // post-indexed addressing requires W == 1; different than in addrmod2/3
+ if ((am & P) == 0)
+ am |= W;
+
+ ASSERT(offset_8 >= 0); // no masking needed
+ emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
+}
+
+
+int Assembler::branch_offset(Label* L, Condition cond) {
+ // if we emit an unconditional jump/call and if the current position is the
+ // target of the unbound label, we can change the binding position of the
+ // unbound label, thereby eliminating an unnessary jump
+ bool can_eliminate = false;
+ if (cond == al && FLAG_eliminate_jumps &&
+ unbound_label_.is_linked() && binding_pos_ == pc_offset()) {
+ can_eliminate = true;
+ if (FLAG_print_jump_elimination) {
+ PrintF("eliminated jumps/calls to %d from ", binding_pos_);
+ print(&unbound_label_);
+ }
+ }
+ int target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ if (can_eliminate)
+ binding_pos_ = target_pos;
+ } else {
+ if (can_eliminate)
+ link_to(L, &unbound_label_); // may modify L's link
+ if (L->is_linked())
+ target_pos = L->pos(); // L's link
+ else
+ target_pos = kEndOfChain;
+ L->link_to(pc_offset());
+ }
+
+ // Block the emission of the constant pool, since the branch instruction must
+ // be emitted at the pc offset recorded by the label
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
+
+ return target_pos - pc_offset() - 8;
+}
+
+
+// Branch instructions
+void Assembler::b(int branch_offset, Condition cond) {
+ ASSERT((branch_offset & 3) == 0);
+ int imm24 = branch_offset >> 2;
+ ASSERT(is_int24(imm24));
+ emit(cond | B27 | B25 | (imm24 & Imm24Mask));
+
+ if (cond == al)
+ // dead code is a good location to emit the constant pool
+ CheckConstPool(false, false);
+}
+
+
+void Assembler::bl(int branch_offset, Condition cond) {
+ ASSERT((branch_offset & 3) == 0);
+ int imm24 = branch_offset >> 2;
+ ASSERT(is_int24(imm24));
+ emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::blx(int branch_offset) { // v5 and above
+ ASSERT((branch_offset & 1) == 0);
+ int h = ((branch_offset & 2) >> 1)*B24;
+ int imm24 = branch_offset >> 2;
+ ASSERT(is_int24(imm24));
+ emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::blx(Register target, Condition cond) { // v5 and above
+ ASSERT(!target.is(pc));
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
+}
+
+
+void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
+ ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
+}
+
+
+// Data-processing instructions
+void Assembler::and_(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 0*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::eor(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 1*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::sub(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 2*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::rsb(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 3*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::add(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 4*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::adc(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 5*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::sbc(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 6*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::rsc(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 7*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 8*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 9*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 10*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 11*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::orr(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 12*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
+ addrmod1(cond | 13*B21 | s, r0, dst, src);
+}
+
+
+void Assembler::bic(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 14*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
+ addrmod1(cond | 15*B21 | s, r0, dst, src);
+}
+
+
+// Multiply instructions
+void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
+ SBit s, Condition cond) {
+ ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+ ASSERT(!dst.is(src1));
+ emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::mul(Register dst, Register src1, Register src2,
+ SBit s, Condition cond) {
+ ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dst.is(src1));
+ emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::smlal(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::smull(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::umlal(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::umull(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+// Miscellaneous arithmetic instructions
+void Assembler::clz(Register dst, Register src, Condition cond) {
+ // v5 and above.
+ ASSERT(!dst.is(pc) && !src.is(pc));
+ emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
+ 15*B8 | B4 | src.code());
+}
+
+
+// Status register access instructions
+void Assembler::mrs(Register dst, SRegister s, Condition cond) {
+ ASSERT(!dst.is(pc));
+ emit(cond | B24 | s | 15*B16 | dst.code()*B12);
+}
+
+
+void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
+ Condition cond) {
+ ASSERT(fields >= B16 && fields < B20); // at least one field set
+ Instr instr;
+ if (!src.rm_.is_valid()) {
+ // immediate
+ uint32_t rotate_imm;
+ uint32_t immed_8;
+ if ((src.rmode_ != no_reloc && src.rmode_ != external_reference)||
+ !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
+ // immediate operand cannot be encoded, load it first to register ip
+ RecordRelocInfo(src.rmode_, src.imm32_);
+ ldr(ip, MemOperand(pc, 0), cond);
+ msr(fields, Operand(ip), cond);
+ return;
+ }
+ instr = I | rotate_imm*B8 | immed_8;
+ } else {
+ ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
+ instr = src.rm_.code();
+ }
+ emit(cond | instr | B24 | B21 | fields | 15*B12);
+}
+
+
+// Load/Store instructions
+void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
+ addrmod2(cond | B26 | L, dst, src);
+}
+
+
+void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
+ addrmod2(cond | B26, src, dst);
+}
+
+
+void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
+ addrmod2(cond | B26 | B | L, dst, src);
+}
+
+
+void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
+ addrmod2(cond | B26 | B, src, dst);
+}
+
+
+void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
+ addrmod3(cond | L | B7 | H | B4, dst, src);
+}
+
+
+void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
+ addrmod3(cond | B7 | H | B4, src, dst);
+}
+
+
+void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
+ addrmod3(cond | L | B7 | S6 | B4, dst, src);
+}
+
+
+void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
+ addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
+}
+
+
+// Load/Store multiple instructions
+void Assembler::ldm(BlockAddrMode am,
+ Register base,
+ RegList dst,
+ Condition cond) {
+ // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable
+ ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
+
+ addrmod4(cond | B27 | am | L, base, dst);
+
+ // emit the constant pool after a function return implemented by ldm ..{..pc}
+ if (cond == al && (dst & pc.bit()) != 0) {
+ // There is a slight chance that the ldm instruction was actually a call,
+ // in which case it would be wrong to return into the constant pool; we
+ // recognize this case by checking if the emission of the pool was blocked
+ // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
+ // the case, we emit a jump over the pool.
+ CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
+ }
+}
+
+
+void Assembler::stm(BlockAddrMode am,
+ Register base,
+ RegList src,
+ Condition cond) {
+ addrmod4(cond | B27 | am, base, src);
+}
+
+
+// Semaphore instructions
+void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
+ ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
+ ASSERT(!dst.is(base) && !src.is(base));
+ emit(cond | P | base.code()*B16 | dst.code()*B12 |
+ B7 | B4 | src.code());
+}
+
+
+void Assembler::swpb(Register dst,
+ Register src,
+ Register base,
+ Condition cond) {
+ ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
+ ASSERT(!dst.is(base) && !src.is(base));
+ emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
+ B7 | B4 | src.code());
+}
+
+
+// Exception-generating instructions and debugging support
+void Assembler::stop(const char* msg) {
+ emit(15 << 28 | ((intptr_t) msg));
+}
+
+
+void Assembler::bkpt(uint32_t imm16) { // v5 and above
+ ASSERT(is_uint16(imm16));
+ emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
+}
+
+
+void Assembler::swi(uint32_t imm24, Condition cond) {
+ ASSERT(is_uint24(imm24));
+ emit(cond | 15*B24 | imm24);
+}
+
+
+// Coprocessor instructions
+void Assembler::cdp(Coprocessor coproc,
+ int opcode_1,
+ CRegister crd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2,
+ Condition cond) {
+ ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
+ crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
+}
+
+
+void Assembler::cdp2(Coprocessor coproc,
+ int opcode_1,
+ CRegister crd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2) { // v5 and above
+ cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::mcr(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2,
+ Condition cond) {
+ ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
+ rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+}
+
+
+void Assembler::mcr2(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2) { // v5 and above
+ mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::mrc(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2,
+ Condition cond) {
+ ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
+ rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+}
+
+
+void Assembler::mrc2(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2) { // v5 and above
+ mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::ldc(Coprocessor coproc,
+ CRegister crd,
+ const MemOperand& src,
+ LFlag l,
+ Condition cond) {
+ addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
+}
+
+
+void Assembler::ldc(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l,
+ Condition cond) {
+ // unindexed addressing
+ ASSERT(is_uint8(option));
+ emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
+ coproc*B8 | (option & 255));
+}
+
+
+void Assembler::ldc2(Coprocessor coproc,
+ CRegister crd,
+ const MemOperand& src,
+ LFlag l) { // v5 and above
+ ldc(coproc, crd, src, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::ldc2(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l) { // v5 and above
+ ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::stc(Coprocessor coproc,
+ CRegister crd,
+ const MemOperand& dst,
+ LFlag l,
+ Condition cond) {
+ addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
+}
+
+
+void Assembler::stc(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l,
+ Condition cond) {
+ // unindexed addressing
+ ASSERT(is_uint8(option));
+ emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
+ coproc*B8 | (option & 255));
+}
+
+
+void Assembler::stc2(Coprocessor
+ coproc, CRegister crd,
+ const MemOperand& dst,
+ LFlag l) { // v5 and above
+ stc(coproc, crd, dst, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::stc2(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l) { // v5 and above
+ stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+}
+
+
+// Pseudo instructions
+void Assembler::lea(Register dst,
+ const MemOperand& x,
+ SBit s,
+ Condition cond) {
+ int am = x.am_;
+ if (!x.rm_.is_valid()) {
+ // immediate offset
+ if ((am & P) == 0) // post indexing
+ mov(dst, Operand(x.rn_), s, cond);
+ else if ((am & U) == 0) // negative indexing
+ sub(dst, x.rn_, Operand(x.offset_), s, cond);
+ else
+ add(dst, x.rn_, Operand(x.offset_), s, cond);
+ } else {
+ // Register offset (shift_imm_ and shift_op_ are 0) or scaled
+ // register offset the constructors make sure than both shift_imm_
+ // and shift_op_ are initialized.
+ ASSERT(!x.rm_.is(pc));
+ if ((am & P) == 0) // post indexing
+ mov(dst, Operand(x.rn_), s, cond);
+ else if ((am & U) == 0) // negative indexing
+ sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
+ else
+ add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
+ }
+}
+
+
+// Debugging
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_debug_code) {
+ CheckBuffer();
+ RecordRelocInfo(comment, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+ if (pos == kNoPosition) return;
+ ASSERT(position >= 0);
+ if (pos == last_position_) return;
+ CheckBuffer();
+ RecordRelocInfo(position, pos);
+ last_position_ = pos;
+ last_position_is_statement_ = false;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+ if (pos == last_position_) return;
+ CheckBuffer();
+ RecordRelocInfo(statement_position, pos);
+ last_position_ = pos;
+ last_position_is_statement_ = true;
+}
+
+
+void Assembler::GrowBuffer() {
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // compute new buffer size
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4*KB) {
+ desc.buffer_size = 4*KB;
+ } else if (buffer_size_ < 1*MB) {
+ desc.buffer_size = 2*buffer_size_;
+ } else {
+ desc.buffer_size = buffer_size_ + 1*MB;
+ }
+ CHECK_GT(desc.buffer_size, 0); // no overflow
+
+ // setup new buffer
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+ // copy the data
+ int pc_delta = desc.buffer - buffer_;
+ int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ memmove(desc.buffer, buffer_, desc.instr_size);
+ memmove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // switch buffers
+ DeleteArray(buffer_);
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ += pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // none of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries
+
+ // relocate pending relocation entries
+ for (int i = 0; i < num_prinfo_; i++) {
+ RelocInfo& rinfo = prinfo_[i];
+ ASSERT(rinfo.rmode() != comment && rinfo.rmode() != position);
+ rinfo.set_pc(rinfo.pc() + pc_delta);
+ }
+}
+
+
+void Assembler::RecordRelocInfo(RelocMode rmode, intptr_t data) {
+ RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
+ if (rmode >= comment && rmode <= statement_position) {
+ // adjust code for new modes
+ ASSERT(is_comment(rmode) || is_position(rmode));
+ // these modes do not need an entry in the constant pool
+ } else {
+ ASSERT(num_prinfo_ < kMaxNumPRInfo);
+ prinfo_[num_prinfo_++] = rinfo;
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
+ }
+ if (rinfo.rmode() != no_reloc) {
+ ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ reloc_info_writer.Write(&rinfo);
+ }
+}
+
+
+void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ // Calculate the offset of the next check. It will be overwritten
+ // when a const pool is generated or when const pools are being
+ // blocked for a specific range.
+ next_buffer_check_ = pc_offset() + kCheckConstInterval;
+
+ // There is nothing to do if there are no pending relocation info entries
+ if (num_prinfo_ == 0) return;
+
+ // We emit a constant pool at regular intervals of about kDistBetweenPools
+ // or when requested by parameter force_emit (e.g. after each function).
+ // We prefer not to emit a jump unless the max distance is reached or if we
+ // are running low on slots, which can happen if a lot of constants are being
+ // emitted (e.g. --debug-code and many static references).
+ int dist = pc_offset() - last_const_pool_end_;
+ if (!force_emit && dist < kMaxDistBetweenPools &&
+ (require_jump || dist < kDistBetweenPools) &&
+ // TODO(1236125): Cleanup the "magic" number below. We know that
+ // the code generation will test every kCheckConstIntervalInst.
+ // Thus we are safe as long as we generate less than 7 constant
+ // entries per instruction.
+ (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
+ return;
+ }
+
+ // If we did not return by now, we need to emit the constant pool soon.
+
+ // However, some small sequences of instructions must not be broken up by the
+ // insertion of a constant pool; such sequences are protected by setting
+ // no_const_pool_before_, which is checked here. Also, recursive calls to
+ // CheckConstPool are blocked by no_const_pool_before_.
+ if (pc_offset() < no_const_pool_before_) {
+ // Emission is currently blocked; make sure we try again as soon as possible
+ next_buffer_check_ = no_const_pool_before_;
+
+ // Something is wrong if emission is forced and blocked at the same time
+ ASSERT(!force_emit);
+ return;
+ }
+
+ int jump_instr = require_jump ? kInstrSize : 0;
+
+ // Check that the code buffer is large enough before emitting the constant
+ // pool and relocation information (include the jump over the pool and the
+ // constant pool marker).
+ int max_needed_space =
+ jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
+ while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
+
+ // Block recursive calls to CheckConstPool
+ BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
+ num_prinfo_*kInstrSize);
+ // Don't bother to check for the emit calls below.
+ next_buffer_check_ = no_const_pool_before_;
+
+ // Emit jump over constant pool if necessary
+ Label after_pool;
+ if (require_jump) b(&after_pool);
+
+ RecordComment("[ Constant Pool");
+
+ // Put down constant pool marker
+ // "Undefined instruction" as specified by A3.1 Instruction set encoding
+ emit(0x03000000 | num_prinfo_);
+
+ // Emit constant pool entries
+ for (int i = 0; i < num_prinfo_; i++) {
+ RelocInfo& rinfo = prinfo_[i];
+ ASSERT(rinfo.rmode() != comment && rinfo.rmode() != position &&
+ rinfo.rmode() != statement_position);
+ Instr instr = instr_at(rinfo.pc());
+ // Instruction to patch must be a ldr/str [pc, #offset]
+ // P and U set, B and W clear, Rn == pc, offset12 still 0
+ ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
+ (2*B25 | P | U | pc.code()*B16));
+ int delta = pc_ - rinfo.pc() - 8;
+ ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
+ if (delta < 0) {
+ instr &= ~U;
+ delta = -delta;
+ }
+ ASSERT(is_uint12(delta));
+ instr_at_put(rinfo.pc(), instr + delta);
+ emit(rinfo.data());
+ }
+ num_prinfo_ = 0;
+ last_const_pool_end_ = pc_offset();
+
+ RecordComment("]");
+
+ if (after_pool.is_linked()) {
+ bind(&after_pool);
+ }
+
+ // Since a constant pool was just emitted, move the check offset forward by
+ // the standard interval.
+ next_buffer_check_ = pc_offset() + kCheckConstInterval;
+}
+
+
+} } // namespace v8::internal
diff --git a/src/assembler-arm.h b/src/assembler-arm.h
new file mode 100644
index 0000000..46580e2
--- /dev/null
+++ b/src/assembler-arm.h
@@ -0,0 +1,730 @@
+// Copyright 2007-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A light-weight ARM Assembler
+// Generates user mode instructions for the ARM architecture up to version 5
+
+#ifndef V8_ASSEMBLER_ARM_H_
+#define V8_ASSEMBLER_ARM_H_
+
+#include "assembler.h"
+
+namespace v8 { namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+//
+// Core register
+struct Register {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // (unfortunately we can't make this private in a struct)
+ int code_;
+};
+
+
+extern Register no_reg;
+extern Register r0;
+extern Register r1;
+extern Register r2;
+extern Register r3;
+extern Register r4;
+extern Register r5;
+extern Register r6;
+extern Register r7;
+extern Register r8;
+extern Register r9;
+extern Register r10;
+extern Register fp;
+extern Register ip;
+extern Register sp;
+extern Register lr;
+extern Register pc;
+
+
+// Coprocessor register
+struct CRegister {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(CRegister creg) const { return code_ == creg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // (unfortunately we can't make this private in a struct)
+ int code_;
+};
+
+
+extern CRegister no_creg;
+extern CRegister cr0;
+extern CRegister cr1;
+extern CRegister cr2;
+extern CRegister cr3;
+extern CRegister cr4;
+extern CRegister cr5;
+extern CRegister cr6;
+extern CRegister cr7;
+extern CRegister cr8;
+extern CRegister cr9;
+extern CRegister cr10;
+extern CRegister cr11;
+extern CRegister cr12;
+extern CRegister cr13;
+extern CRegister cr14;
+extern CRegister cr15;
+
+
+// Coprocessor number
+enum Coprocessor {
+ p0 = 0,
+ p1 = 1,
+ p2 = 2,
+ p3 = 3,
+ p4 = 4,
+ p5 = 5,
+ p6 = 6,
+ p7 = 7,
+ p8 = 8,
+ p9 = 9,
+ p10 = 10,
+ p11 = 11,
+ p12 = 12,
+ p13 = 13,
+ p14 = 14,
+ p15 = 15
+};
+
+
+// Condition field in instructions
+enum Condition {
+ eq = 0 << 28,
+ ne = 1 << 28,
+ cs = 2 << 28,
+ hs = 2 << 28,
+ cc = 3 << 28,
+ lo = 3 << 28,
+ mi = 4 << 28,
+ pl = 5 << 28,
+ vs = 6 << 28,
+ vc = 7 << 28,
+ hi = 8 << 28,
+ ls = 9 << 28,
+ ge = 10 << 28,
+ lt = 11 << 28,
+ gt = 12 << 28,
+ le = 13 << 28,
+ al = 14 << 28
+};
+
+
+// Returns the equivalent of !cc.
+INLINE(Condition NegateCondition(Condition cc));
+
+
+// The pc store offset may be 8 or 12 depending on the processor implementation.
+int PcStoreOffset();
+
+
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants
+
+// Shifter operand shift operation
+enum ShiftOp {
+ LSL = 0 << 5,
+ LSR = 1 << 5,
+ ASR = 2 << 5,
+ ROR = 3 << 5,
+ RRX = -1
+};
+
+
+// Condition code updating mode
+enum SBit {
+ SetCC = 1 << 20, // set condition code
+ LeaveCC = 0 << 20 // leave condition code unchanged
+};
+
+
+// Status register selection
+enum SRegister {
+ CPSR = 0 << 22,
+ SPSR = 1 << 22
+};
+
+
+// Status register fields
+enum SRegisterField {
+ CPSR_c = CPSR | 1 << 16,
+ CPSR_x = CPSR | 1 << 17,
+ CPSR_s = CPSR | 1 << 18,
+ CPSR_f = CPSR | 1 << 19,
+ SPSR_c = SPSR | 1 << 16,
+ SPSR_x = SPSR | 1 << 17,
+ SPSR_s = SPSR | 1 << 18,
+ SPSR_f = SPSR | 1 << 19
+};
+
+// Status register field mask (or'ed SRegisterField enum values)
+typedef uint32_t SRegisterFieldMask;
+
+
+// Memory operand addressing mode
+enum AddrMode {
+ // bit encoding P U W
+ Offset = (8|4|0) << 21, // offset (without writeback to base)
+ PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
+ PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
+ NegOffset = (8|0|0) << 21, // negative offset (without writeback to base)
+ NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
+ NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
+};
+
+
+// Load/store multiple addressing mode
+enum BlockAddrMode {
+ // bit encoding P U W
+ da = (0|0|0) << 21, // decrement after
+ ia = (0|4|0) << 21, // increment after
+ db = (8|0|0) << 21, // decrement before
+ ib = (8|4|0) << 21, // increment before
+ da_w = (0|0|1) << 21, // decrement after with writeback to base
+ ia_w = (0|4|1) << 21, // increment after with writeback to base
+ db_w = (8|0|1) << 21, // decrement before with writeback to base
+ ib_w = (8|4|1) << 21 // increment before with writeback to base
+};
+
+
+// Coprocessor load/store operand size
+enum LFlag {
+ Long = 1 << 22, // long load/store coprocessor
+ Short = 0 << 22 // short load/store coprocessor
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+// Class Operand represents a shifter operand in data processing instructions
+class Operand BASE_EMBEDDED {
+ public:
+ // immediate
+ INLINE(explicit Operand(int32_t immediate, RelocMode rmode = no_reloc));
+ INLINE(explicit Operand(const ExternalReference& f));
+ INLINE(explicit Operand(const char* s));
+ INLINE(explicit Operand(Object** opp));
+ INLINE(explicit Operand(Context** cpp));
+ explicit Operand(Handle<Object> handle);
+ INLINE(explicit Operand(Smi* value));
+
+ // rm
+ INLINE(explicit Operand(Register rm));
+
+ // rm <shift_op> shift_imm
+ explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
+
+ // rm <shift_op> rs
+ explicit Operand(Register rm, ShiftOp shift_op, Register rs);
+
+ private:
+ Register rm_;
+ Register rs_;
+ ShiftOp shift_op_;
+ int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
+ int32_t imm32_; // valid if rm_ == no_reg
+ RelocMode rmode_;
+
+ friend class Assembler;
+};
+
+
+// Class MemOperand represents a memory operand in load and store instructions
+class MemOperand BASE_EMBEDDED {
+ public:
+ // [rn +/- offset] Offset/NegOffset
+ // [rn +/- offset]! PreIndex/NegPreIndex
+ // [rn], +/- offset PostIndex/NegPostIndex
+ // offset is any signed 32-bit value; offset is first loaded to register ip if
+ // it does not fit the addressing mode (12-bit unsigned and sign bit)
+ explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
+
+ // [rn +/- rm] Offset/NegOffset
+ // [rn +/- rm]! PreIndex/NegPreIndex
+ // [rn], +/- rm PostIndex/NegPostIndex
+ explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
+
+ // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset
+ // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex
+ // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
+ explicit MemOperand(Register rn, Register rm,
+ ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
+
+ private:
+ Register rn_; // base
+ Register rm_; // register offset
+ int32_t offset_; // valid if rm_ == no_reg
+ ShiftOp shift_op_;
+ int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
+ AddrMode am_; // bits P, U, and W
+
+ friend class Assembler;
+};
+
+
+typedef int32_t Instr;
+
+
+class Assembler : public Malloced {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(void* buffer, int buffer_size);
+ ~Assembler();
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked inbetween GetCode() calls.
+ void GetCode(CodeDesc* desc);
+
+ // Label operations & relative jumps (PPUM Appendix D)
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+
+ void bind(Label* L); // binds an unbound label L to the current code position
+
+ // Returns the branch offset to the given label from the current code position
+ // Links the label to the current position if it is still unbound
+ // Manages the jump elimination optimization if necessary
+ int branch_offset(Label* L, Condition cond);
+
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc.
+ INLINE(static Address target_address_address_at(Address pc));
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ INLINE(static Address target_address_at(Address pc));
+ INLINE(static void set_target_address_at(Address pc, Address target));
+
+ // Distance between the instruction referring to the address of the call
+ // target (ldr pc, [target addr in const pool]) and the return address
+ static const int kTargetAddrToReturnAddrDist = sizeof(Instr);
+
+
+ // ---------------------------------------------------------------------------
+ // Code generation
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+
+ // Branch instructions
+ void b(int branch_offset, Condition cond = al);
+ void bl(int branch_offset, Condition cond = al);
+ void blx(int branch_offset); // v5 and above
+ void blx(Register target, Condition cond = al); // v5 and above
+ void bx(Register target, Condition cond = al); // v5 and above, plus v4t
+
+ // Convenience branch instructions using labels
+ void b(Label* L, Condition cond = al) { b(branch_offset(L, cond), cond); }
+ void b(Condition cond, Label* L) { b(branch_offset(L, cond), cond); }
+ void bl(Label* L, Condition cond = al) { bl(branch_offset(L, cond), cond); }
+ void bl(Condition cond, Label* L) { bl(branch_offset(L, cond), cond); }
+ void blx(Label* L) { blx(branch_offset(L, al)); } // v5 and above
+
+ // Data-processing instructions
+ void and_(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void eor(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void sub(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void rsb(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void add(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void adc(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void sbc(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void rsc(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void tst(Register src1, const Operand& src2, Condition cond = al);
+
+ void teq(Register src1, const Operand& src2, Condition cond = al);
+
+ void cmp(Register src1, const Operand& src2, Condition cond = al);
+
+ void cmn(Register src1, const Operand& src2, Condition cond = al);
+
+ void orr(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void mov(Register dst, const Operand& src,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void bic(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void mvn(Register dst, const Operand& src,
+ SBit s = LeaveCC, Condition cond = al);
+
+ // Multiply instructions
+
+ void mla(Register dst, Register src1, Register src2, Register srcA,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void mul(Register dst, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void smlal(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void smull(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void umlal(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void umull(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ // Miscellaneous arithmetic instructions
+
+ void clz(Register dst, Register src, Condition cond = al); // v5 and above
+
+ // Status register access instructions
+
+ void mrs(Register dst, SRegister s, Condition cond = al);
+ void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
+
+ // Load/Store instructions
+ void ldr(Register dst, const MemOperand& src, Condition cond = al);
+ void str(Register src, const MemOperand& dst, Condition cond = al);
+ void ldrb(Register dst, const MemOperand& src, Condition cond = al);
+ void strb(Register src, const MemOperand& dst, Condition cond = al);
+ void ldrh(Register dst, const MemOperand& src, Condition cond = al);
+ void strh(Register src, const MemOperand& dst, Condition cond = al);
+ void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
+ void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
+
+ // Load/Store multiple instructions
+ void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
+ void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
+
+ // Semaphore instructions
+ void swp(Register dst, Register src, Register base, Condition cond = al);
+ void swpb(Register dst, Register src, Register base, Condition cond = al);
+
+ // Exception-generating instructions and debugging support
+ void stop(const char* msg);
+ void untested(const char* msg);
+ void unimplemented(const char* msg);
+ void unreachable(const char* msg);
+
+ void bkpt(uint32_t imm16); // v5 and above
+ void swi(uint32_t imm24, Condition cond = al);
+ // To generate a breakpoint on ARM Linux you can use swi(0x9f0001).
+ // For some reason stepi or cont will not work in gdb until you have done:
+ // set $pc = $pc + 4
+ inline void int3() { swi(0x9f0001); }
+
+ // Coprocessor instructions
+
+ void cdp(Coprocessor coproc, int opcode_1,
+ CRegister crd, CRegister crn, CRegister crm,
+ int opcode_2, Condition cond = al);
+
+ void cdp2(Coprocessor coproc, int opcode_1,
+ CRegister crd, CRegister crn, CRegister crm,
+ int opcode_2); // v5 and above
+
+ void mcr(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0, Condition cond = al);
+
+ void mcr2(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0); // v5 and above
+
+ void mrc(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0, Condition cond = al);
+
+ void mrc2(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0); // v5 and above
+
+ void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
+ LFlag l = Short, Condition cond = al);
+ void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short, Condition cond = al);
+
+ void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
+ LFlag l = Short); // v5 and above
+ void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short); // v5 and above
+
+ void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
+ LFlag l = Short, Condition cond = al);
+ void stc(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short, Condition cond = al);
+
+ void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
+ LFlag l = Short); // v5 and above
+ void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short); // v5 and above
+
+ // Pseudo instructions
+ void nop() { mov(r0, Operand(r0)); }
+
+ void push(Register src, Condition cond = al) {
+ str(src, MemOperand(sp, 4, NegPreIndex), cond);
+ }
+
+ void pop(Register dst, Condition cond = al) {
+ ldr(dst, MemOperand(sp, 4, PostIndex), cond);
+ }
+
+ // Load effective address of memory operand x into register dst
+ void lea(Register dst, const MemOperand& x,
+ SBit s = LeaveCC, Condition cond = al);
+
+ // Jump unconditionally to given label.
+ void jmp(Label* L) { b(L, al); }
+
+
+ // Debugging
+
+ // Record a comment relocation entry that can be used by a disassembler.
+ // Use --debug_code to enable.
+ void RecordComment(const char* msg);
+
+ void RecordPosition(int pos);
+ void RecordStatementPosition(int pos);
+
+ int pc_offset() const { return pc_ - buffer_; }
+ int last_position() const { return last_position_; }
+ bool last_position_is_statement() const {
+ return last_position_is_statement_;
+ }
+
+ protected:
+ int buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Read/patch instructions
+ Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+ void instr_at_put(byte* pc, Instr instr) {
+ *reinterpret_cast<Instr*>(pc) = instr;
+ }
+ Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ }
+
+ // Decode branch instruction at pos and return branch target pos
+ int target_at(int pos);
+
+ // Patch branch instruction at pos to branch to given branch target pos
+ void target_at_put(int pos, int target_pos);
+
+ private:
+ // Code buffer:
+ // The buffer into which code and relocation info are generated.
+ byte* buffer_;
+ int buffer_size_;
+ // True if the assembler owns the buffer, false if buffer is external.
+ bool own_buffer_;
+
+ // Buffer size and constant pool distance are checked together at regular
+ // intervals of kBufferCheckInterval emitted bytes
+ static const int kBufferCheckInterval = 1*KB/2;
+ int next_buffer_check_; // pc offset of next buffer check
+
+ // Code generation
+ static const int kInstrSize = sizeof(Instr); // signed size
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries.
+ static const int kGap = 32;
+ byte* pc_; // the program counter; moves forward
+
+ // Constant pool generation
+ // Pools are emitted in the instruction stream, preferably after unconditional
+ // jumps or after returns from functions (in dead code locations).
+ // If a long code sequence does not contain unconditional jumps, it is
+ // necessary to emit the constant pool before the pool gets too far from the
+ // location it is accessed from. In this case, we emit a jump over the emitted
+ // constant pool.
+ // Constants in the pool may be addresses of functions that gets relocated;
+ // if so, a relocation info entry is associated to the constant pool entry.
+
+ // Repeated checking whether the constant pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated. That also means that the sizing of the buffers is not
+ // an exact science, and that we rely on some slop to not overrun buffers.
+ static const int kCheckConstIntervalInst = 32;
+ static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+
+
+ // Pools are emitted after function return and in dead code at (more or less)
+ // regular intervals of kDistBetweenPools bytes
+ static const int kDistBetweenPools = 1*KB;
+
+ // Constants in pools are accessed via pc relative addressing, which can
+ // reach +/-4KB thereby defining a maximum distance between the instruction
+ // and the accessed constant. We satisfy this constraint by limiting the
+ // distance between pools.
+ static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
+
+ // Emission of the constant pool may be blocked in some code sequences
+ int no_const_pool_before_; // block emission before this pc offset
+
+ // Keep track of the last emitted pool to guarantee a maximal distance
+ int last_const_pool_end_; // pc offset following the last constant pool
+
+ // Relocation info generation
+ // Each relocation is encoded as a variable size value
+ static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+ // Relocation info records are also used during code generation as temporary
+ // containers for constants and code target addresses until they are emitted
+ // to the constant pool. These pending relocation info records are temporarily
+ // stored in a separate buffer until a constant pool is emitted.
+ // If every instruction in a long sequence is accessing the pool, we need one
+ // pending relocation entry per instruction.
+ static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
+ RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info
+ int num_prinfo_; // number of pending reloc info entries in the buffer
+
+ // Jump-to-jump elimination:
+ // The last label to be bound to _binding_pos, if unbound.
+ Label unbound_label_;
+ // The position to which _unbound_label has to be bound, if present.
+ int binding_pos_;
+ // The position before which jumps cannot be eliminated.
+ int last_bound_pos_;
+
+ // source position information
+ int last_position_;
+ bool last_position_is_statement_;
+
+ // Code emission
+ inline void CheckBuffer();
+ void GrowBuffer();
+ inline void emit(Instr x);
+
+ // Instruction generation
+ void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
+ void addrmod2(Instr instr, Register rd, const MemOperand& x);
+ void addrmod3(Instr instr, Register rd, const MemOperand& x);
+ void addrmod4(Instr instr, Register rn, RegList rl);
+ void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
+
+ // Labels
+ void print(Label* L);
+ void bind_to(Label* L, int pos);
+ void link_to(Label* L, Label* appendix);
+ void next(Label* L);
+
+ // Record reloc info for current pc_
+ void RecordRelocInfo(RelocMode rmode, intptr_t data = 0);
+
+ // Check if is time to emit a constant pool for pending reloc info entries
+ void CheckConstPool(bool force_emit, bool require_jump);
+
+ // Block the emission of the constant pool before pc_offset
+ void BlockConstPoolBefore(int pc_offset) {
+ if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ASSEMBLER_ARM_H_
diff --git a/src/assembler-ia32-inl.h b/src/assembler-ia32-inl.h
new file mode 100644
index 0000000..6b71056
--- /dev/null
+++ b/src/assembler-ia32-inl.h
@@ -0,0 +1,242 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+
+// A light-weight IA32 Assembler.
+
+#ifndef V8_ASSEMBLER_IA32_INL_H_
+#define V8_ASSEMBLER_IA32_INL_H_
+
+#include "cpu.h"
+
+namespace v8 { namespace internal {
+
+Condition NegateCondition(Condition cc) {
+ return static_cast<Condition>(cc ^ 1);
+}
+
+
+// The modes possibly affected by apply must be in kApplyMask.
+void RelocInfo::apply(int delta) {
+ if (rmode_ == runtime_entry || is_code_target(rmode_)) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc_);
+ *p -= delta; // relocate entry
+ } else if (rmode_ == js_return && is_call_instruction()) {
+ // Special handling of js_return when a break point is set (call
+ // instruction has been inserted).
+ int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ *p -= delta; // relocate entry
+ }
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(is_code_target(rmode_) || rmode_ == runtime_entry);
+ return Assembler::target_address_at(pc_);
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+ ASSERT(is_code_target(rmode_) || rmode_ == runtime_entry);
+ Assembler::set_target_address_at(pc_, target);
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(is_code_target(rmode_) || rmode_ == embedded_object);
+ return *reinterpret_cast<Object**>(pc_);
+}
+
+
+Object** RelocInfo::target_object_address() {
+ ASSERT(is_code_target(rmode_) || rmode_ == embedded_object);
+ return reinterpret_cast<Object**>(pc_);
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+ ASSERT(is_code_target(rmode_) || rmode_ == embedded_object);
+ *reinterpret_cast<Object**>(pc_) = target;
+}
+
+
+Address* RelocInfo::target_reference_address() {
+ ASSERT(rmode_ == external_reference);
+ return reinterpret_cast<Address*>(pc_);
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT(is_call_instruction());
+ return Assembler::target_address_at(pc_ + 1);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT(is_call_instruction());
+ Assembler::set_target_address_at(pc_ + 1, target);
+}
+
+
+Object* RelocInfo::call_object() {
+ ASSERT(is_call_instruction());
+ return *call_object_address();
+}
+
+
+Object** RelocInfo::call_object_address() {
+ ASSERT(is_call_instruction());
+ return reinterpret_cast<Object**>(pc_ + 1);
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+ ASSERT(is_call_instruction());
+ *call_object_address() = target;
+}
+
+
+bool RelocInfo::is_call_instruction() {
+ return *pc_ == 0xE8;
+}
+
+
+Immediate::Immediate(int x) {
+ x_ = x;
+ rmode_ = no_reloc;
+}
+
+
+Immediate::Immediate(const ExternalReference& ext) {
+ x_ = reinterpret_cast<int32_t>(ext.address());
+ rmode_ = external_reference;
+}
+
+Immediate::Immediate(const char* s) {
+ x_ = reinterpret_cast<int32_t>(s);
+ rmode_ = embedded_string;
+}
+
+
+Immediate::Immediate(Handle<Object> handle) {
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ ASSERT(!Heap::InNewSpace(obj));
+ if (obj->IsHeapObject()) {
+ x_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = embedded_object;
+ } else {
+ // no relocation needed
+ x_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = no_reloc;
+ }
+}
+
+
+Immediate::Immediate(Smi* value) {
+ x_ = reinterpret_cast<intptr_t>(value);
+ rmode_ = no_reloc;
+}
+
+
+void Assembler::emit(uint32_t x) {
+ *reinterpret_cast<uint32_t*>(pc_) = x;
+ pc_ += sizeof(uint32_t);
+}
+
+
+void Assembler::emit(Handle<Object> handle) {
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ ASSERT(!Heap::InNewSpace(obj));
+ if (obj->IsHeapObject()) {
+ emit(reinterpret_cast<intptr_t>(handle.location()), embedded_object);
+ } else {
+ // no relocation needed
+ emit(reinterpret_cast<intptr_t>(obj));
+ }
+}
+
+
+void Assembler::emit(uint32_t x, RelocMode rmode) {
+ if (rmode != no_reloc) RecordRelocInfo(rmode);
+ emit(x);
+}
+
+
+void Assembler::emit(const Immediate& x) {
+ if (x.rmode_ != no_reloc) RecordRelocInfo(x.rmode_);
+ emit(x.x_);
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+ return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
+}
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc);
+ *p = target - (pc + sizeof(int32_t));
+ CPU::FlushICache(p, sizeof(int32_t));
+}
+
+void Operand::set_modrm(int mod, // reg == 0
+ Register rm) {
+ ASSERT((mod & -4) == 0);
+ buf_[0] = mod << 6 | rm.code();
+ len_ = 1;
+}
+
+
+void Operand::set_dispr(int32_t disp, RelocMode rmode) {
+ ASSERT(len_ == 1 || len_ == 2);
+ *reinterpret_cast<int32_t*>(&buf_[len_]) = disp;
+ len_ += sizeof(int32_t);
+ rmode_ = rmode;
+}
+
+Operand::Operand(Register reg) {
+ // reg
+ set_modrm(3, reg);
+}
+
+
+Operand::Operand(int32_t disp, RelocMode rmode) {
+ // [disp/r]
+ set_modrm(0, ebp);
+ set_dispr(disp, rmode);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ASSEMBLER_IA32_INL_H_
diff --git a/src/assembler-ia32.cc b/src/assembler-ia32.cc
new file mode 100644
index 0000000..a8b521e
--- /dev/null
+++ b/src/assembler-ia32.cc
@@ -0,0 +1,2092 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+
+#include "v8.h"
+
+#include "disassembler.h"
+#include "macro-assembler.h"
+#include "serialize.h"
+
+namespace v8 { namespace internal {
+
+DEFINE_bool(debug_code, false,
+ "generate extra code (comments, assertions) for debugging");
+DEFINE_bool(emit_branch_hints, false, "emit branch hints");
+
+// -----------------------------------------------------------------------------
+// Implementation of Register
+
+Register eax = { 0 };
+Register ecx = { 1 };
+Register edx = { 2 };
+Register ebx = { 3 };
+Register esp = { 4 };
+Register ebp = { 5 };
+Register esi = { 6 };
+Register edi = { 7 };
+Register no_reg = { -1 };
+
+XMMRegister xmm0 = { 0 };
+XMMRegister xmm1 = { 1 };
+XMMRegister xmm2 = { 2 };
+XMMRegister xmm3 = { 3 };
+XMMRegister xmm4 = { 4 };
+XMMRegister xmm5 = { 5 };
+XMMRegister xmm6 = { 6 };
+XMMRegister xmm7 = { 7 };
+
+
+// -----------------------------------------------------------------------------
+// Implementation of CpuFeatures
+
+// Safe default is no features.
+uint32_t CpuFeatures::supported_ = 0;
+uint32_t CpuFeatures::enabled_ = 0;
+
+
+typedef int (*F0)();
+
+// The Probe method needs executable memory, so it uses Heap::CreateCode.
+// Allocation failure is silent and leads to safe default.
+void CpuFeatures::Probe() {
+ supported_ = 0;
+ if (Serializer::enabled()) return; // No features if we might serialize.
+ Assembler assm(NULL, 0);
+ Label done;
+#define __ assm.
+ // Save old esp, since we are going to modify the stack.
+ __ push(ebp);
+ __ pushfd();
+ __ push(ecx);
+ __ push(edx);
+ __ push(ebx);
+ __ mov(ebp, Operand(esp));
+ // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
+ __ pushfd();
+ __ pop(eax);
+ __ mov(edx, Operand(eax));
+ __ xor_(eax, 0x200000); // Flip bit 21.
+ __ push(eax);
+ __ popfd();
+ __ pushfd();
+ __ pop(eax);
+ __ xor_(eax, Operand(edx)); // Different if CPUID is supported.
+ __ j(zero, &done);
+ // Invoke CPUID with 1 in eax to get feature information in edx.
+ __ mov(eax, 1);
+ // Temporarily force CPUID support, since we know it is safe here.
+ supported_ = (1 << CPUID);
+ { Scope fscope(CPUID);
+ __ cpuid();
+ }
+ supported_ = 0;
+ // Return result in eax.
+ __ mov(eax, Operand(edx));
+ __ bind(&done);
+ __ mov(esp, Operand(ebp));
+ __ pop(ebx);
+ __ pop(edx);
+ __ pop(ecx);
+ __ popfd();
+ __ pop(ebp);
+ __ ret(0);
+#undef __
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB));
+ if (!code->IsCode()) return;
+ F0 f = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+ uint32_t res = f();
+ supported_ = (res | (1 << CPUID));
+}
+
+
+// -----------------------------------------------------------------------------
+// A Displacement describes the 32bit immediate field of an instruction which
+// may be used together with a Label in order to refer to a yet unknown code
+// position. Displacements stored in the instruction stream are used to describe
+// the instruction and to chain a list of instructions using the same Label.
+// A Displacement contains 3 different fields:
+//
+// next field: position of next displacement in the chain (0 = end of list)
+// type field: instruction type
+//
+// A next value of null (0) indicates the end of a chain (note that there can
+// be no displacement at position zero, because there is always at least one
+// instruction byte before the displacement).
+//
+// Displacement _data field layout
+//
+// |31.....1|.......0|
+// [ next | type |
+
+class Displacement BASE_EMBEDDED {
+ private:
+ enum Type {
+ UNCONDITIONAL_JUMP,
+ OTHER
+ };
+
+ int data_;
+
+ class TypeField: public BitField<Type, 0, 1> {};
+ class NextField: public BitField<int, 1, 32-1> {};
+
+ void init(Label* L, Type type) {
+ ASSERT(!L->is_bound());
+ int next = 0;
+ if (L->is_linked()) {
+ next = L->pos();
+ ASSERT(next > 0); // Displacements must be at positions > 0
+ }
+ // Ensure that we _never_ overflow the next field.
+ ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize));
+ data_ = NextField::encode(next) | TypeField::encode(type);
+ }
+
+ int data() const { return data_; }
+ Type type() const { return TypeField::decode(data_); }
+ void next(Label* L) const {
+ int n = NextField::decode(data_);
+ n > 0 ? L->link_to(n) : L->Unuse();
+ }
+ void link_to(Label* L) { init(L, type()); }
+
+ explicit Displacement(int data) { data_ = data; }
+
+ Displacement(Label* L, Type type) { init(L, type); }
+
+ void print() {
+ PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
+ NextField::decode(data_));
+ }
+
+ friend class Assembler;
+ friend class MacroAssembler;
+};
+
+
+// TODO(1236137): Stop using macros here. The reason for using them is
+// to avoid declaring the Displacement class in the .h file and have
+// functions on the assembler that returns them. Maybe that's not a
+// big issue?
+#define disp_at(L) \
+ Displacement(long_at((L)->pos()))
+
+#define disp_at_put(L, disp) \
+ long_at_put((L)->pos(), (disp).data())
+
+#define emit_disp(L, type) { \
+ Displacement disp((L), (type)); \
+ (L)->link_to(pc_offset()); \
+ emit(static_cast<int>(disp.data())); \
+ }
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+
+const int RelocInfo::kApplyMask =
+ RelocInfo::kCodeTargetMask | 1 << runtime_entry | 1 << js_return;
+
+
+void RelocInfo::patch_code(byte* instructions, int instruction_count) {
+ // Patch the code at the current address with the supplied instructions.
+ for (int i = 0; i < instruction_count; i++) {
+ *(pc_ + i) = *(instructions + i);
+ }
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard int3 instructions can be added if required.
+void RelocInfo::patch_code_with_call(Address target, int guard_bytes) {
+ // Call instruction takes up 5 bytes and int3 takes up one byte.
+ int code_size = 5 + guard_bytes;
+
+ // Patch the code.
+ CodePatcher patcher(pc_, code_size);
+ patcher.masm()->call(target, no_reloc);
+
+ // Add the requested number of int3 instructions after the call.
+ for (int i = 0; i < guard_bytes; i++) {
+ patcher.masm()->int3();
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand
+
+Operand::Operand(Register base, int32_t disp, RelocMode rmode) {
+ // [base + disp/r]
+ if (disp == 0 && rmode == no_reloc && !base.is(ebp)) {
+ // [base]
+ set_modrm(0, base);
+ if (base.is(esp)) set_sib(times_1, esp, base);
+ } else if (is_int8(disp) && rmode == no_reloc) {
+ // [base + disp8]
+ set_modrm(1, base);
+ if (base.is(esp)) set_sib(times_1, esp, base);
+ set_disp8(disp);
+ } else {
+ // [base + disp/r]
+ set_modrm(2, base);
+ if (base.is(esp)) set_sib(times_1, esp, base);
+ set_dispr(disp, rmode);
+ }
+}
+
+
+Operand::Operand(Register base,
+ Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocMode rmode) {
+ ASSERT(!index.is(esp)); // illegal addressing mode
+ // [base + index*scale + disp/r]
+ if (disp == 0 && rmode == no_reloc && !base.is(ebp)) {
+ // [base + index*scale]
+ set_modrm(0, esp);
+ set_sib(scale, index, base);
+ } else if (is_int8(disp) && rmode == no_reloc) {
+ // [base + index*scale + disp8]
+ set_modrm(1, esp);
+ set_sib(scale, index, base);
+ set_disp8(disp);
+ } else {
+ // [base + index*scale + disp/r]
+ set_modrm(2, esp);
+ set_sib(scale, index, base);
+ set_dispr(disp, rmode);
+ }
+}
+
+
+Operand::Operand(Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocMode rmode) {
+ ASSERT(!index.is(esp)); // illegal addressing mode
+ // [index*scale + disp/r]
+ set_modrm(0, esp);
+ set_sib(scale, index, ebp);
+ set_dispr(disp, rmode);
+}
+
+
+void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
+ ASSERT(len_ == 1);
+ ASSERT((scale & -4) == 0);
+ buf_[1] = scale << 6 | index.code() << 3 | base.code();
+ len_ = 2;
+}
+
+
+void Operand::set_disp8(int8_t disp) {
+ ASSERT(len_ == 1 || len_ == 2);
+ *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
+}
+
+
+void Operand::set_reg(Register reg) const {
+ ASSERT(len_ > 0);
+ buf_[0] = (buf_[0] & ~0x38) | static_cast<byte>(reg.code() << 3);
+}
+
+
+bool Operand::is_reg(Register reg) const {
+ return ((buf_[0] & 0xF8) == 0xC0) // addressing mode is register only.
+ && ((buf_[0] & 0x07) == reg.code()); // register codes match.
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+// Emit a single byte. Must always be inlined.
+#define EMIT(x) \
+ *pc_++ = (x)
+
+
+// spare_buffer_
+static byte* spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+ if (buffer == NULL) {
+ // do our own buffer management
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+
+ if (spare_buffer_ != NULL) {
+ buffer = spare_buffer_;
+ spare_buffer_ = NULL;
+ }
+ }
+ if (buffer == NULL) {
+ buffer_ = NewArray<byte>(buffer_size);
+ } else {
+ buffer_ = static_cast<byte*>(buffer);
+ }
+ buffer_size_ = buffer_size;
+ own_buffer_ = true;
+
+ } else {
+ // use externally provided buffer instead
+ ASSERT(buffer_size > 0);
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+ own_buffer_ = false;
+ }
+
+ // Clear the buffer in debug mode unless it was provided by the
+ // caller in which case we can't be sure it's okay to overwrite
+ // existing code in it; see CodePatcher::CodePatcher(...).
+ if (kDebug && own_buffer_) {
+ memset(buffer_, 0xCC, buffer_size); // int3
+ }
+
+ // setup buffer pointers
+ ASSERT(buffer_ != NULL);
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+
+ last_pc_ = NULL;
+ last_bound_pos_ = 0;
+ last_position_ = kNoPosition;
+ last_position_is_statement_ = false;
+}
+
+
+Assembler::~Assembler() {
+ if (own_buffer_) {
+ if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+ spare_buffer_ = buffer_;
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // finalize code
+ if (unbound_label_.is_linked())
+ bind_to(&unbound_label_, binding_pos_);
+
+ // (at this point overflow() may be true, but the gap ensures that
+ // we are still not overlapping instructions and relocation info)
+ ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap
+ // setup desc
+ desc->buffer = buffer_;
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+ Counters::reloc_info_size.Increment(desc->reloc_size);
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+void Assembler::cpuid() {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xA2);
+}
+
+
+void Assembler::pushad() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x60);
+}
+
+
+void Assembler::popad() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x61);
+}
+
+
+void Assembler::pushfd() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x9C);
+}
+
+
+void Assembler::popfd() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x9D);
+}
+
+
+void Assembler::push(const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (x.is_int8()) {
+ EMIT(0x6a);
+ EMIT(x.x_);
+ } else {
+ EMIT(0x68);
+ emit(x);
+ }
+}
+
+
+DEFINE_bool(push_pop_elimination, true,
+ "eliminate redundant push/pops in assembly code");
+DEFINE_bool(print_push_pop_elimination, false,
+ "print elimination of redundant push/pops in assembly code");
+
+void Assembler::push(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x50 | src.code());
+}
+
+
+void Assembler::push(const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xFF);
+ emit_operand(esi, src);
+}
+
+
+void Assembler::pop(Register dst) {
+ ASSERT(reloc_info_writer.last_pc() != NULL);
+ if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
+ // (last_pc_ != NULL) is rolled into the above check
+ // If a last_pc_ is set, we need to make sure that there has not been any
+ // relocation information generated between the last instruction and this
+ // pop instruction.
+ byte instr = last_pc_[0];
+ if (instr == (0x50 | dst.code())) {
+ pc_ = last_pc_;
+ last_pc_ = NULL;
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%d push/pop (same reg) eliminated\n", pc_offset());
+ }
+ return;
+ } else if (instr == 0xff) { // push of an operand, convert to a move
+ byte op1 = last_pc_[1];
+ // Check if the operation is really a push
+ if ((op1 & 0x38) == (6 << 3)) {
+ op1 = (op1 & ~0x38) | static_cast<byte>(dst.code() << 3);
+ last_pc_[0] = 0x8b;
+ last_pc_[1] = op1;
+ last_pc_ = NULL;
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%d push/pop (op->reg) eliminated\n", pc_offset());
+ }
+ return;
+ }
+ } else if ((instr == 0x89) &&
+ (last_pc_[1] == 0x04) &&
+ (last_pc_[2] == 0x24)) {
+ // 0x71283c 396 890424 mov [esp],eax
+ // 0x71283f 399 58 pop eax
+ if (dst.is(eax)) {
+ // change to
+ // 0x710fac 216 83c404 add esp,0x4
+ last_pc_[0] = 0x83;
+ last_pc_[1] = 0xc4;
+ last_pc_[2] = 0x04;
+ last_pc_ = NULL;
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%d push/pop (mov-pop) eliminated\n", pc_offset());
+ }
+ return;
+ }
+ }
+ // Other potential patterns for peephole:
+ // 0x712716 102 890424 mov [esp], eax
+ // 0x712719 105 8b1424 mov edx, [esp]
+ }
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x58 | dst.code());
+}
+
+
+void Assembler::pop(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x8F);
+ emit_operand(eax, dst);
+}
+
+
+void Assembler::mov_b(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x8A);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mov_b(const Operand& dst, int8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xC6);
+ emit_operand(eax, dst);
+ EMIT(imm8);
+}
+
+
+void Assembler::mov_b(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x88);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::mov_w(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x8B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mov_w(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x89);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::mov(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xB8 | dst.code());
+ emit(imm32);
+}
+
+
+void Assembler::mov(Register dst, Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xB8 | dst.code());
+ emit(handle);
+}
+
+
+void Assembler::mov(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x8B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mov(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ emit(x);
+}
+
+
+void Assembler::mov(const Operand& dst, Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ emit(handle);
+}
+
+
+void Assembler::mov(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x89);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::movsx_b(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xBE);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movsx_w(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xBF);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movzx_b(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xB6);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movzx_w(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xB7);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ UNIMPLEMENTED();
+ USE(cc);
+ USE(dst);
+ USE(imm32);
+}
+
+
+void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ UNIMPLEMENTED();
+ USE(cc);
+ USE(dst);
+ USE(handle);
+}
+
+
+void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ UNIMPLEMENTED();
+ USE(cc);
+ USE(dst);
+ USE(src);
+}
+
+
+void Assembler::adc(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(2, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::adc(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x13);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::add(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x03);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::add(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(0, dst, x);
+}
+
+
+void Assembler::and_(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(4, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::and_(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x23);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::and_(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(4, dst, x);
+}
+
+
+void Assembler::and_(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x21);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::cmp(Register reg, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(7, Operand(reg), Immediate(imm32));
+}
+
+
+void Assembler::cmp(Register reg, Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(7, Operand(reg), Immediate(handle));
+}
+
+
+void Assembler::cmp(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x3B);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::cmp(const Operand& op, const Immediate& imm) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(7, op, imm);
+}
+
+
+void Assembler::dec_b(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xFE);
+ EMIT(0xC8 | dst.code());
+}
+
+
+void Assembler::dec(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x48 | dst.code());
+}
+
+
+void Assembler::dec(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xFF);
+ emit_operand(ecx, dst);
+}
+
+
+void Assembler::cdq() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x99);
+}
+
+
+void Assembler::idiv(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF7);
+ EMIT(0xF8 | src.code());
+}
+
+
+void Assembler::imul(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xAF);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::imul(Register dst, Register src, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (is_int8(imm32)) {
+ EMIT(0x6B);
+ EMIT(0xC0 | dst.code() << 3 | src.code());
+ EMIT(imm32);
+ } else {
+ EMIT(0x69);
+ EMIT(0xC0 | dst.code() << 3 | src.code());
+ emit(imm32);
+ }
+}
+
+
+void Assembler::inc(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x40 | dst.code());
+}
+
+
+void Assembler::inc(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xFF);
+ emit_operand(eax, dst);
+}
+
+
+void Assembler::lea(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x8D);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mul(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF7);
+ EMIT(0xE0 | src.code());
+}
+
+
+void Assembler::neg(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF7);
+ EMIT(0xD8 | dst.code());
+}
+
+
+void Assembler::not_(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF7);
+ EMIT(0xD0 | dst.code());
+}
+
+
+void Assembler::or_(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(1, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::or_(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::or_(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(1, dst, x);
+}
+
+
+void Assembler::or_(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x09);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::rcl(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xD0 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xD0 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::sar(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xF8 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xF8 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::sar(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD3);
+ EMIT(0xF8 | dst.code());
+}
+
+
+void Assembler::sbb(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x1B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::shld(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xA5);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::shl(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xE0 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xE0 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::shl(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD3);
+ EMIT(0xE0 | dst.code());
+}
+
+
+void Assembler::shrd(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xAD);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::shr(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ EMIT(0xC1);
+ EMIT(0xE8 | dst.code());
+ EMIT(imm8);
+}
+
+
+void Assembler::shr(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD3);
+ EMIT(0xE8 | dst.code());
+}
+
+
+void Assembler::sub(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(5, dst, x);
+}
+
+
+void Assembler::sub(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x2B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::sub(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x29);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::test(Register reg, const Immediate& imm) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Only use test against byte for registers that have a byte
+ // variant: eax, ebx, ecx, and edx.
+ if (imm.rmode_ == no_reloc && is_uint8(imm.x_) && reg.code() < 4) {
+ uint8_t imm8 = imm.x_;
+ if (reg.is(eax)) {
+ EMIT(0xA8);
+ EMIT(imm8);
+ } else {
+ emit_arith_b(0xF6, 0xC0, reg, imm8);
+ }
+ } else {
+ // This is not using emit_arith because test doesn't support
+ // sign-extension of 8-bit operands.
+ if (reg.is(eax)) {
+ EMIT(0xA9);
+ } else {
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
+ }
+ emit(imm);
+ }
+}
+
+
+void Assembler::test(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x85);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::test(const Operand& op, const Immediate& imm) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF7);
+ emit_operand(eax, op);
+ emit(imm);
+}
+
+
+void Assembler::xor_(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(6, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::xor_(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x33);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::xor_(const Operand& src, Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x31);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::xor_(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(6, dst, x);
+}
+
+
+void Assembler::bts(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0xAB);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::hlt() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF4);
+}
+
+
+void Assembler::int3() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xCC);
+}
+
+
+void Assembler::nop() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x90);
+}
+
+
+void Assembler::rdtsc() {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::RDTSC));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0x31);
+}
+
+
+void Assembler::ret(int imm16) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint16(imm16));
+ if (imm16 == 0) {
+ EMIT(0xC3);
+ } else {
+ EMIT(0xC2);
+ EMIT(imm16 & 0xFF);
+ EMIT((imm16 >> 8) & 0xFF);
+ }
+}
+
+
+void Assembler::leave() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xC9);
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the 32bit
+// Displacement of the last instruction using the label.
+
+
+void Assembler::print(Label* L) {
+ if (L->is_unused()) {
+ PrintF("unused label\n");
+ } else if (L->is_bound()) {
+ PrintF("bound label to %d\n", L->pos());
+ } else if (L->is_linked()) {
+ Label l = *L;
+ PrintF("unbound label");
+ while (l.is_linked()) {
+ Displacement disp = disp_at(&l);
+ PrintF("@ %d ", l.pos());
+ disp.print();
+ PrintF("\n");
+ disp.next(&l);
+ }
+ } else {
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+ }
+}
+
+
+DEFINE_bool(eliminate_jumps, true, "eliminate jumps to jumps in assembly code");
+DEFINE_bool(print_jump_elimination, false,
+ "print elimination of jumps to jumps in assembly code");
+
+void Assembler::bind_to(Label* L, int pos) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = NULL;
+ ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ while (L->is_linked()) {
+ Displacement disp = disp_at(L);
+ int fixup_pos = L->pos();
+ if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
+ ASSERT(byte_at(fixup_pos - 1) == 0xE9); // jmp expected
+ }
+ int imm32 = pos - (fixup_pos + sizeof(int32_t));
+ long_at_put(fixup_pos, imm32);
+ disp.next(L);
+ }
+ L->bind_to(pos);
+
+ // do not eliminate jump instructions before the last bound position
+ if (pos > last_bound_pos_)
+ last_bound_pos_ = pos;
+}
+
+
+void Assembler::link_to(Label* L, Label* appendix) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = NULL;
+ if (appendix->is_linked()) {
+ if (L->is_linked()) {
+ // append appendix to L's list
+ Label p;
+ Label q = *L;
+ do {
+ p = q;
+ Displacement disp = disp_at(&q);
+ disp.next(&q);
+ } while (q.is_linked());
+ Displacement disp = disp_at(&p);
+ disp.link_to(appendix);
+ disp_at_put(&p, disp);
+ p.Unuse(); // to avoid assertion failure in ~Label
+ } else {
+ // L is empty, simply use appendix
+ *L = *appendix;
+ }
+ }
+ appendix->Unuse(); // appendix should not be used anymore
+}
+
+
+void Assembler::bind(Label* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = NULL;
+ ASSERT(!L->is_bound()); // label can only be bound once
+ if (FLAG_eliminate_jumps) {
+ // Resolve unbound label.
+ if (unbound_label_.is_linked()) {
+ // Unbound label exists => link it with L if same binding
+ // position, otherwise fix it.
+ if (binding_pos_ == pc_offset()) {
+ // Link it to L's list.
+ link_to(L, &unbound_label_);
+ } else {
+ // Otherwise bind unbound label.
+ ASSERT(binding_pos_ < pc_offset());
+ bind_to(&unbound_label_, binding_pos_);
+ }
+ }
+ ASSERT(!unbound_label_.is_linked());
+ // try to eliminate jumps to next instruction
+ const int absolute_jump_size = 5;
+ // Do not remove an already bound jump target.
+ while (last_bound_pos_ < pc_offset() &&
+ reloc_info_writer.last_pc() <= pc_ - absolute_jump_size &&
+ L->is_linked() &&
+ (L->pos() + static_cast<int>(sizeof(int32_t)) == pc_offset()) &&
+ (disp_at(L).type() == Displacement::UNCONDITIONAL_JUMP)) {
+ // Previous instruction is jump jumping immediately after it =>
+ // eliminate it.
+ // jmp expected.
+ ASSERT(byte_at(pc_offset() - absolute_jump_size) == 0xE9);
+ if (FLAG_print_jump_elimination) {
+ PrintF("@ %d jump to next eliminated\n", L->pos());
+ }
+ // Remove first entry from label list.
+ Displacement disp = disp_at(L);
+ disp.next(L);
+ // Eliminate instruction (set code pointers back).
+ pc_ -= absolute_jump_size;
+ // Make sure not to skip relocation information when rewinding.
+ ASSERT(reloc_info_writer.last_pc() <= pc_);
+ }
+ // Delay fixup of L => store it as unbound label.
+ unbound_label_ = *L;
+ binding_pos_ = pc_offset();
+ L->Unuse();
+ }
+ bind_to(L, pc_offset());
+}
+
+
+void Assembler::call(Label* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (L->is_bound()) {
+ const int long_size = 5;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ // 1110 1000 #32-bit disp
+ EMIT(0xE8);
+ emit(offs - long_size);
+ } else {
+ // 1110 1000 #32-bit disp
+ EMIT(0xE8);
+ emit_disp(L, Displacement::OTHER);
+ }
+}
+
+
+void Assembler::call(byte* entry, RelocMode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(!is_code_target(rmode));
+ EMIT(0xE8);
+ emit(entry - (pc_ + sizeof(int32_t)), rmode);
+}
+
+
+void Assembler::call(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xFF);
+ emit_operand(edx, adr);
+}
+
+
+void Assembler::call(Handle<Code> code, RelocMode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_code_target(rmode));
+ EMIT(0xE8);
+ emit(reinterpret_cast<intptr_t>(code.location()), rmode);
+}
+
+
+void Assembler::jmp(Label* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (L->is_bound()) {
+ const int short_size = 2;
+ const int long_size = 5;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ if (is_int8(offs - short_size)) {
+ // 1110 1011 #8-bit disp
+ EMIT(0xEB);
+ EMIT((offs - short_size) & 0xFF);
+ } else {
+ // 1110 1001 #32-bit disp
+ EMIT(0xE9);
+ emit(offs - long_size);
+ }
+ } else {
+ if (FLAG_eliminate_jumps &&
+ unbound_label_.is_linked() &&
+ binding_pos_ == pc_offset()) {
+ // Current position is target of jumps
+ if (FLAG_print_jump_elimination) {
+ PrintF("eliminated jumps/calls to %d from ", binding_pos_);
+ print(&unbound_label_);
+ }
+ link_to(L, &unbound_label_);
+ }
+ // 1110 1001 #32-bit disp
+ EMIT(0xE9);
+ emit_disp(L, Displacement::UNCONDITIONAL_JUMP);
+ }
+}
+
+
+void Assembler::jmp(byte* entry, RelocMode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(!is_code_target(rmode));
+ EMIT(0xE9);
+ emit(entry - (pc_ + sizeof(int32_t)), rmode);
+}
+
+
+void Assembler::jmp(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xFF);
+ emit_operand(esp, adr);
+}
+
+
+void Assembler::jmp(Handle<Code> code, RelocMode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_code_target(rmode));
+ EMIT(0xE9);
+ emit(reinterpret_cast<intptr_t>(code.location()), rmode);
+}
+
+
+
+void Assembler::j(Condition cc, Label* L, Hint hint) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(0 <= cc && cc < 16);
+ if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
+ if (L->is_bound()) {
+ const int short_size = 2;
+ const int long_size = 6;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ if (is_int8(offs - short_size)) {
+ // 0111 tttn #8-bit disp
+ EMIT(0x70 | cc);
+ EMIT((offs - short_size) & 0xFF);
+ } else {
+ // 0000 1111 1000 tttn #32-bit disp
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ emit(offs - long_size);
+ }
+ } else {
+ // 0000 1111 1000 tttn #32-bit disp
+ // Note: could eliminate cond. jumps to this jump if condition
+ // is the same however, seems to be rather unlikely case.
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ emit_disp(L, Displacement::OTHER);
+ }
+}
+
+
+void Assembler::j(Condition cc, byte* entry, RelocMode rmode, Hint hint) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT((0 <= cc) && (cc < 16));
+ if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
+ // 0000 1111 1000 tttn #32-bit disp
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ emit(entry - (pc_ + sizeof(int32_t)), rmode);
+}
+
+
+void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
+ // 0000 1111 1000 tttn #32-bit disp
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ emit(reinterpret_cast<intptr_t>(code.location()), code_target);
+}
+
+
+// FPU instructions
+
+
+void Assembler::fld(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xD9, 0xC0, i);
+}
+
+
+void Assembler::fld1() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xE8);
+}
+
+
+void Assembler::fldz() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xEE);
+}
+
+
+void Assembler::fld_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ emit_operand(eax, adr);
+}
+
+
+void Assembler::fld_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDD);
+ emit_operand(eax, adr);
+}
+
+
+void Assembler::fstp_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ emit_operand(ebx, adr);
+}
+
+
+void Assembler::fstp_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDD);
+ emit_operand(ebx, adr);
+}
+
+
+void Assembler::fild_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDB);
+ emit_operand(eax, adr);
+}
+
+
+void Assembler::fild_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDF);
+ emit_operand(ebp, adr);
+}
+
+
+void Assembler::fistp_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDB);
+ emit_operand(ebx, adr);
+}
+
+
+void Assembler::fist_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDB);
+ emit_operand(edx, adr);
+}
+
+
+void Assembler::fistp_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDF);
+ emit_operand(edi, adr);
+}
+
+
+void Assembler::fabs() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xE1);
+}
+
+
+void Assembler::fchs() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xE0);
+}
+
+
+void Assembler::fadd(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xC0, i);
+}
+
+
+void Assembler::fsub(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xE8, i);
+}
+
+
+void Assembler::fisub_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDA);
+ emit_operand(esp, adr);
+}
+
+
+void Assembler::fmul(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xC8, i);
+}
+
+
+void Assembler::fdiv(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xF8, i);
+}
+
+
+void Assembler::faddp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xC0, i);
+}
+
+
+void Assembler::fsubp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xE8, i);
+}
+
+
+void Assembler::fsubrp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xE0, i);
+}
+
+
+void Assembler::fmulp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xC8, i);
+}
+
+
+void Assembler::fdivp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xF8, i);
+}
+
+
+void Assembler::fprem() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xF8);
+}
+
+
+void Assembler::fprem1() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xF5);
+}
+
+
+void Assembler::fxch(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xD9, 0xC8, i);
+}
+
+
+void Assembler::fincstp() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xF7);
+}
+
+
+void Assembler::ffree(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDD, 0xC0, i);
+}
+
+
+void Assembler::ftst() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xE4);
+}
+
+
+void Assembler::fucomp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDD, 0xE8, i);
+}
+
+
+void Assembler::fucompp() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDA);
+ EMIT(0xE9);
+}
+
+
+void Assembler::fcompp() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDE);
+ EMIT(0xD9);
+}
+
+
+void Assembler::fnstsw_ax() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xdF);
+ EMIT(0xE0);
+}
+
+
+void Assembler::fwait() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x9B);
+}
+
+
+void Assembler::frndint() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xFC);
+}
+
+
+void Assembler::sahf() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x9E);
+}
+
+
+void Assembler::cvttss2si(Register dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF3);
+ EMIT(0x0F);
+ EMIT(0x2C);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::cvttsd2si(Register dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x2C);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x2A);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x5C);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movdbl(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ movsd(dst, src);
+}
+
+
+void Assembler::movdbl(const Operand& dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ movsd(dst, src);
+}
+
+
+void Assembler::movsd(const Operand& dst, XMMRegister src ) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2); // double
+ EMIT(0x0F);
+ EMIT(0x11); // store
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movsd(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2); // double
+ EMIT(0x0F);
+ EMIT(0x10); // load
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
+ Register ireg = { reg.code() };
+ emit_operand(ireg, adr);
+}
+
+
+void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
+ EMIT(0xC0 | dst.code() << 3 | src.code());
+}
+
+
+void Assembler::Print() {
+ Disassembler::Decode(stdout, buffer_, pc_);
+}
+
+
+void Assembler::RecordJSReturn() {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(js_return);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_debug_code) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(comment, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+ if (pos == kNoPosition) return;
+ ASSERT(position >= 0);
+ if (pos == last_position_) return;
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(position, pos);
+ last_position_ = pos;
+ last_position_is_statement_ = false;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+ if (pos == kNoPosition) return;
+ ASSERT(position >= 0);
+ if (pos == last_position_) return;
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(statement_position, pos);
+ last_position_ = pos;
+ last_position_is_statement_ = true;
+}
+
+
+void Assembler::GrowBuffer() {
+ ASSERT(overflow()); // should not call this otherwise
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // compute new buffer size
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4*KB) {
+ desc.buffer_size = 4*KB;
+ } else {
+ desc.buffer_size = 2*buffer_size_;
+ }
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if ((desc.buffer_size > kMaximalBufferSize) ||
+ (desc.buffer_size > Heap::OldGenerationSize())) {
+ V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ }
+
+ // setup new buffer
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
+
+ // Clear the buffer in debug mode. Use 'int3' instructions to make
+ // sure to get into problems if we ever run uninitialized code.
+ if (kDebug) {
+ memset(desc.buffer, 0xCC, desc.buffer_size);
+ }
+
+ // copy the data
+ int pc_delta = desc.buffer - buffer_;
+ int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ memmove(desc.buffer, buffer_, desc.instr_size);
+ memmove(rc_delta + reloc_info_writer.pos(),
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // switch buffers
+ if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+ spare_buffer_ = buffer_;
+ } else {
+ DeleteArray(buffer_);
+ }
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ += pc_delta;
+ if (last_pc_ != NULL) {
+ last_pc_ += pc_delta;
+ }
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // relocate runtime entries
+ for (RelocIterator it(desc); !it.done(); it.next()) {
+ RelocMode rmode = it.rinfo()->rmode();
+ if (rmode == runtime_entry) {
+ int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+ *p -= pc_delta; // relocate entry
+ }
+ }
+
+ ASSERT(!overflow());
+}
+
+
+void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
+ ASSERT(is_uint8(op1) && is_uint8(op2)); // wrong opcode
+ ASSERT(is_uint8(imm8));
+ ASSERT((op1 & 0x01) == 0); // should be 8bit operation
+ EMIT(op1);
+ EMIT(op2 | dst.code());
+ EMIT(imm8);
+}
+
+
+void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
+ ASSERT((0 <= sel) && (sel <= 7));
+ Register ireg = { sel };
+ if (x.is_int8()) {
+ EMIT(0x83); // using a sign-extended 8-bit immediate.
+ emit_operand(ireg, dst);
+ EMIT(x.x_ & 0xFF);
+ } else if (dst.is_reg(eax)) {
+ EMIT((sel << 3) | 0x05); // short form if the destination is eax.
+ emit(x);
+ } else {
+ EMIT(0x81); // using a literal 32-bit immediate.
+ emit_operand(ireg, dst);
+ emit(x);
+ }
+}
+
+
+void Assembler::emit_operand(Register reg, const Operand& adr) {
+ adr.set_reg(reg);
+ memmove(pc_, adr.buf_, adr.len_);
+ pc_ += adr.len_;
+ if (adr.len_ >= sizeof(int32_t) && adr.rmode_ != no_reloc) {
+ pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
+ RecordRelocInfo(adr.rmode_);
+ pc_ += sizeof(int32_t);
+ }
+}
+
+
+void Assembler::emit_operand(const Operand& adr, Register reg) {
+ adr.set_reg(reg);
+ memmove(pc_, adr.buf_, adr.len_);
+ pc_ += adr.len_;
+ if (adr.len_ >= sizeof(int32_t) && adr.rmode_ != no_reloc) {
+ pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
+ RecordRelocInfo(adr.rmode_);
+ pc_ += sizeof(int32_t);
+ }
+}
+
+
+void Assembler::emit_farith(int b1, int b2, int i) {
+ ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode
+ ASSERT(0 <= i && i < 8); // illegal stack offset
+ EMIT(b1);
+ EMIT(b2 + i);
+}
+
+
+void Assembler::RecordRelocInfo(RelocMode rmode, intptr_t data) {
+ ASSERT(rmode != no_reloc);
+ RelocInfo rinfo(pc_, rmode, data);
+ reloc_info_writer.Write(&rinfo);
+}
+
+
+} } // namespace v8::internal
diff --git a/src/assembler-ia32.h b/src/assembler-ia32.h
new file mode 100644
index 0000000..e41b9c7
--- /dev/null
+++ b/src/assembler-ia32.h
@@ -0,0 +1,749 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+
+// A light-weight IA32 Assembler.
+
+#ifndef V8_ASSEMBLER_IA32_H_
+#define V8_ASSEMBLER_IA32_H_
+
+namespace v8 { namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+//
+struct Register {
+ bool is_valid() const { return 0 <= code_ && code_ < 8; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // (unfortunately we can't make this private in a struct)
+ int code_;
+};
+
+extern Register eax;
+extern Register ecx;
+extern Register edx;
+extern Register ebx;
+extern Register esp;
+extern Register ebp;
+extern Register esi;
+extern Register edi;
+extern Register no_reg;
+
+
+struct XMMRegister {
+ bool is_valid() const { return 0 <= code_ && code_ < 2; } // currently
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+
+ int code_;
+};
+
+extern XMMRegister xmm0;
+extern XMMRegister xmm1;
+extern XMMRegister xmm2;
+extern XMMRegister xmm3;
+extern XMMRegister xmm4;
+extern XMMRegister xmm5;
+extern XMMRegister xmm6;
+extern XMMRegister xmm7;
+
+enum Condition {
+ // any value < 0 is considered no_condition
+ no_condition = -1,
+
+ overflow = 0,
+ no_overflow = 1,
+ below = 2,
+ above_equal = 3,
+ equal = 4,
+ not_equal = 5,
+ below_equal = 6,
+ above = 7,
+ sign = 8,
+ not_sign = 9,
+ parity_even = 10,
+ parity_odd = 11,
+ less = 12,
+ greater_equal = 13,
+ less_equal = 14,
+ greater = 15,
+
+ // aliases
+ zero = equal,
+ not_zero = not_equal,
+ negative = sign,
+ positive = not_sign
+};
+
+
+// Returns the equivalent of !cc.
+// Negation of the default no_condition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc);
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseCondition(Condition cc) {
+ switch (cc) {
+ case below:
+ return above;
+ case above:
+ return below;
+ case above_equal:
+ return below_equal;
+ case below_equal:
+ return above_equal;
+ case less:
+ return greater;
+ case greater:
+ return less;
+ case greater_equal:
+ return less_equal;
+ case less_equal:
+ return greater_equal;
+ default:
+ return cc;
+ };
+}
+
+enum Hint {
+ no_hint = 0,
+ not_taken = 0x2e,
+ taken = 0x3e
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Immediates
+
+class Immediate BASE_EMBEDDED {
+ public:
+ inline explicit Immediate(int x);
+ inline explicit Immediate(const char* s);
+ inline explicit Immediate(const ExternalReference& ext);
+ inline explicit Immediate(Handle<Object> handle);
+ inline explicit Immediate(Smi* value);
+
+ bool is_zero() const { return x_ == 0 && rmode_ == no_reloc; }
+ bool is_int8() const { return -128 <= x_ && x_ < 128 && rmode_ == no_reloc; }
+
+ private:
+ int x_;
+ RelocMode rmode_;
+
+ friend class Assembler;
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+enum ScaleFactor {
+ times_1 = 0,
+ times_2 = 1,
+ times_4 = 2,
+ times_8 = 3
+};
+
+
+class Operand BASE_EMBEDDED {
+ public:
+ // reg
+ INLINE(explicit Operand(Register reg));
+
+ // [disp/r]
+ INLINE(explicit Operand(int32_t disp, RelocMode rmode));
+ // disp only must always be relocated
+
+ // [base + disp/r]
+ explicit Operand(Register base, int32_t disp, RelocMode rmode = no_reloc);
+
+ // [base + index*scale + disp/r]
+ explicit Operand(Register base,
+ Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocMode rmode = no_reloc);
+
+ // [index*scale + disp/r]
+ explicit Operand(Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocMode rmode = no_reloc);
+
+ static Operand StaticVariable(const ExternalReference& ext) {
+ return Operand(reinterpret_cast<int32_t>(ext.address()),
+ external_reference);
+ }
+
+ static Operand StaticArray(Register index,
+ ScaleFactor scale,
+ const ExternalReference& arr) {
+ return Operand(index, scale, reinterpret_cast<int32_t>(arr.address()),
+ external_reference);
+ }
+
+ // Returns true if this Operand is a wrapper for the specified register.
+ bool is_reg(Register reg) const;
+
+ private:
+ // Mutable because reg in ModR/M byte is set by Assembler via set_reg().
+ mutable byte buf_[6];
+ // The number of bytes in buf_.
+ unsigned int len_;
+ // Only valid if len_ > 4.
+ RelocMode rmode_;
+
+ inline void set_modrm(int mod, // reg == 0
+ Register rm);
+ inline void set_sib(ScaleFactor scale, Register index, Register base);
+ inline void set_disp8(int8_t disp);
+ inline void set_dispr(int32_t disp, RelocMode rmode);
+ inline void set_reg(Register reg) const;
+
+ friend class Assembler;
+};
+
+
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+// Example:
+// if (CpuFeatures::IsSupported(SSE2)) {
+// CpuFeatures::Scope fscope(SSE2);
+// // Generate SSE2 floating point code.
+// } else {
+// // Generate standard x87 floating point code.
+// }
+class CpuFeatures : public AllStatic {
+ public:
+ // Feature flags bit positions. They are mostly based on the CPUID spec.
+ // (We assign CPUID itself to one of the currently reserved bits --
+ // feel free to change this if needed.)
+ enum Feature { SSE2 = 26, CMOV = 15, RDTSC = 4, CPUID = 10 };
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(Feature f) { return supported_ & (1 << f); }
+ // Check whether a feature is currently enabled.
+ static bool IsEnabled(Feature f) { return enabled_ & (1 << f); }
+ // Enable a specified feature within a scope.
+ class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ explicit Scope(Feature f) {
+ ASSERT(CpuFeatures::IsSupported(f));
+ old_enabled_ = CpuFeatures::enabled_;
+ CpuFeatures::enabled_ |= (1 << f);
+ }
+ ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
+ private:
+ uint32_t old_enabled_;
+#else
+ public:
+ explicit Scope(Feature f) {}
+#endif
+ };
+ private:
+ static uint32_t supported_;
+ static uint32_t enabled_;
+};
+
+
+class Assembler : public Malloced {
+ private:
+ // The relocation writer's position is kGap bytes below the end of
+ // the generated instructions. This leaves enough space for the
+ // longest possible ia32 instruction (17 bytes as of 9/26/06) and
+ // allows for a single, fast space check per instruction.
+ static const int kGap = 32;
+
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(void* buffer, int buffer_size);
+ ~Assembler();
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked inbetween GetCode() calls.
+ void GetCode(CodeDesc* desc);
+
+ // Read/Modify the code target in the branch/call instruction at pc.
+ inline static Address target_address_at(Address pc);
+ inline static void set_target_address_at(Address pc, Address target);
+
+ // Distance between the address of the code target in the call instruction
+ // and the return address
+ static const int kTargetAddrToReturnAddrDist = kPointerSize;
+
+
+ // ---------------------------------------------------------------------------
+ // Code generation
+ //
+ // - function names correspond one-to-one to ia32 instruction mnemonics
+ // - unless specified otherwise, instructions operate on 32bit operands
+ // - instructions on 8bit (byte) operands/registers have a trailing '_b'
+ // - instructions on 16bit (word) operands/registers have a trailing '_w'
+ // - naming conflicts with C++ keywords are resolved via a trailing '_'
+
+ // NOTE ON INTERFACE: Currently, the interface is not very consistent
+ // in the sense that some operations (e.g. mov()) can be called in more
+ // the one way to generate the same instruction: The Register argument
+ // can in some cases be replaced with an Operand(Register) argument.
+ // This should be cleaned up and made more othogonal. The questions
+ // is: should we always use Operands instead of Registers where an
+ // Operand is possible, or should we have a Register (overloaded) form
+ // instead? We must be carefull to make sure that the selected instruction
+ // is obvious from the parameters to avoid hard-to-find code generation
+ // bugs.
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2.
+ void Align(int m);
+
+ // Stack
+ void pushad();
+ void popad();
+
+ void pushfd();
+ void popfd();
+
+ void push(const Immediate& x);
+ void push(Register src);
+ void push(const Operand& src);
+
+ void pop(Register dst);
+ void pop(const Operand& dst);
+
+ // Moves
+ void mov_b(Register dst, const Operand& src);
+ void mov_b(const Operand& dst, int8_t imm8);
+ void mov_b(const Operand& dst, Register src);
+
+ void mov_w(Register dst, const Operand& src);
+ void mov_w(const Operand& dst, Register src);
+
+ void mov(Register dst, int32_t imm32);
+ void mov(Register dst, Handle<Object> handle);
+ void mov(Register dst, const Operand& src);
+ void mov(const Operand& dst, const Immediate& x);
+ void mov(const Operand& dst, Handle<Object> handle);
+ void mov(const Operand& dst, Register src);
+
+ void movsx_b(Register dst, const Operand& src);
+
+ void movsx_w(Register dst, const Operand& src);
+
+ void movzx_b(Register dst, const Operand& src);
+
+ void movzx_w(Register dst, const Operand& src);
+
+ // Conditional moves
+ void cmov(Condition cc, Register dst, int32_t imm32);
+ void cmov(Condition cc, Register dst, Handle<Object> handle);
+ void cmov(Condition cc, Register dst, const Operand& src);
+
+ // Arithmetics
+ void adc(Register dst, int32_t imm32);
+ void adc(Register dst, const Operand& src);
+
+ void add(Register dst, const Operand& src);
+ void add(const Operand& dst, const Immediate& x);
+
+ void and_(Register dst, int32_t imm32);
+ void and_(Register dst, const Operand& src);
+ void and_(const Operand& src, Register dst);
+ void and_(const Operand& dst, const Immediate& x);
+
+ void cmp(Register reg, int32_t imm32);
+ void cmp(Register reg, Handle<Object> handle);
+ void cmp(Register reg, const Operand& op);
+ void cmp(const Operand& op, const Immediate& imm);
+
+ void dec_b(Register dst);
+
+ void dec(Register dst);
+ void dec(const Operand& dst);
+
+ void cdq();
+
+ void idiv(Register src);
+
+ void imul(Register dst, const Operand& src);
+ void imul(Register dst, Register src, int32_t imm32);
+
+ void inc(Register dst);
+ void inc(const Operand& dst);
+
+ void lea(Register dst, const Operand& src);
+
+ void mul(Register src);
+
+ void neg(Register dst);
+
+ void not_(Register dst);
+
+ void or_(Register dst, int32_t imm32);
+ void or_(Register dst, const Operand& src);
+ void or_(const Operand& dst, Register src);
+ void or_(const Operand& dst, const Immediate& x);
+
+ void rcl(Register dst, uint8_t imm8);
+
+ void sar(Register dst, uint8_t imm8);
+ void sar(Register dst);
+
+ void sbb(Register dst, const Operand& src);
+
+ void shld(Register dst, const Operand& src);
+
+ void shl(Register dst, uint8_t imm8);
+ void shl(Register dst);
+
+ void shrd(Register dst, const Operand& src);
+
+ void shr(Register dst, uint8_t imm8);
+ void shr(Register dst);
+
+ void sub(const Operand& dst, const Immediate& x);
+ void sub(Register dst, const Operand& src);
+ void sub(const Operand& dst, Register src);
+
+ void test(Register reg, const Immediate& imm);
+ void test(Register reg, const Operand& op);
+ void test(const Operand& op, const Immediate& imm);
+
+ void xor_(Register dst, int32_t imm32);
+ void xor_(Register dst, const Operand& src);
+ void xor_(const Operand& src, Register dst);
+ void xor_(const Operand& dst, const Immediate& x);
+
+ // Bit operations.
+ void bts(const Operand& dst, Register src);
+
+ // Miscellaneous
+ void hlt();
+ void int3();
+ void nop();
+ void rdtsc();
+ void ret(int imm16);
+ void leave();
+
+ // Label operations & relative jumps (PPUM Appendix D)
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+
+ void bind(Label* L); // binds an unbound label L to the current code position
+
+ // Calls
+ void call(Label* L);
+ void call(byte* entry, RelocMode rmode);
+ void call(const Operand& adr);
+ void call(Handle<Code> code, RelocMode rmode);
+
+ // Jumps
+ void jmp(Label* L); // unconditional jump to L
+ void jmp(byte* entry, RelocMode rmode);
+ void jmp(const Operand& adr);
+ void jmp(Handle<Code> code, RelocMode rmode);
+
+ // Conditional jumps
+ void j(Condition cc, Label* L, Hint hint = no_hint);
+ void j(Condition cc, byte* entry, RelocMode rmode, Hint hint = no_hint);
+ void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
+
+ // Floating-point operations
+ void fld(int i);
+
+ void fld1();
+ void fldz();
+
+ void fld_s(const Operand& adr);
+ void fld_d(const Operand& adr);
+
+ void fstp_s(const Operand& adr);
+ void fstp_d(const Operand& adr);
+
+ void fild_s(const Operand& adr);
+ void fild_d(const Operand& adr);
+
+ void fist_s(const Operand& adr);
+
+ void fistp_s(const Operand& adr);
+ void fistp_d(const Operand& adr);
+
+ void fabs();
+ void fchs();
+
+ void fadd(int i);
+ void fsub(int i);
+ void fmul(int i);
+ void fdiv(int i);
+
+ void fisub_s(const Operand& adr);
+
+ void faddp(int i = 1);
+ void fsubp(int i = 1);
+ void fsubrp(int i = 1);
+ void fmulp(int i = 1);
+ void fdivp(int i = 1);
+ void fprem();
+ void fprem1();
+
+ void fxch(int i = 1);
+ void fincstp();
+ void ffree(int i = 0);
+
+ void ftst();
+ void fucomp(int i);
+ void fucompp();
+ void fcompp();
+ void fnstsw_ax();
+ void fwait();
+
+ void frndint();
+
+ void sahf();
+
+ void cpuid();
+
+ // SSE2 instructions
+ void cvttss2si(Register dst, const Operand& src);
+ void cvttsd2si(Register dst, const Operand& src);
+
+ void cvtsi2sd(XMMRegister dst, const Operand& src);
+
+ void addsd(XMMRegister dst, XMMRegister src);
+ void subsd(XMMRegister dst, XMMRegister src);
+ void mulsd(XMMRegister dst, XMMRegister src);
+ void divsd(XMMRegister dst, XMMRegister src);
+
+ // Use either movsd or movlpd.
+ void movdbl(XMMRegister dst, const Operand& src);
+ void movdbl(const Operand& dst, XMMRegister src);
+
+ // Debugging
+ void Print();
+
+ // Check the code size generated from label to here.
+ int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
+
+ // Mark address of the ExitJSFrame code.
+ void RecordJSReturn();
+
+ // Record a comment relocation entry that can be used by a disassembler.
+ // Use --debug_code to enable.
+ void RecordComment(const char* msg);
+
+ void RecordPosition(int pos);
+ void RecordStatementPosition(int pos);
+
+ int pc_offset() const { return pc_ - buffer_; }
+ int last_position() const { return last_position_; }
+ bool last_position_is_statement() const {
+ return last_position_is_statement_;
+ }
+
+ // Check if there is less than kGap bytes available in the buffer.
+ // If this is the case, we need to grow the buffer before emitting
+ // an instruction or relocation information.
+ inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+
+ // Get the number of bytes available in the buffer.
+ inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512*MB;
+ static const int kMinimalBufferSize = 4*KB;
+
+ protected:
+ void movsd(XMMRegister dst, const Operand& src);
+ void movsd(const Operand& dst, XMMRegister src);
+
+ void emit_sse_operand(XMMRegister reg, const Operand& adr);
+ void emit_sse_operand(XMMRegister dst, XMMRegister src);
+
+
+ private:
+ // Code buffer:
+ // The buffer into which code and relocation info are generated.
+ byte* buffer_;
+ int buffer_size_;
+ // True if the assembler owns the buffer, false if buffer is external.
+ bool own_buffer_;
+
+ // code generation
+ byte* pc_; // the program counter; moves forward
+ RelocInfoWriter reloc_info_writer;
+
+ // push-pop elimination
+ byte* last_pc_;
+
+ // Jump-to-jump elimination:
+ // The last label to be bound to _binding_pos, if unbound.
+ Label unbound_label_;
+ // The position to which _unbound_label has to be bound, if present.
+ int binding_pos_;
+ // The position before which jumps cannot be eliminated.
+ int last_bound_pos_;
+
+ // source position information
+ int last_position_;
+ bool last_position_is_statement_;
+
+ byte* addr_at(int pos) { return buffer_ + pos; }
+ byte byte_at(int pos) { return buffer_[pos]; }
+ uint32_t long_at(int pos) {
+ return *reinterpret_cast<uint32_t*>(addr_at(pos));
+ }
+ void long_at_put(int pos, uint32_t x) {
+ *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
+ }
+
+ // code emission
+ void GrowBuffer();
+ inline void emit(uint32_t x);
+ inline void emit(Handle<Object> handle);
+ inline void emit(uint32_t x, RelocMode rmode);
+ inline void emit(const Immediate& x);
+
+ // instruction generation
+ void emit_arith_b(int op1, int op2, Register dst, int imm8);
+
+ // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
+ // with a given destination expression and an immediate operand. It attempts
+ // to use the shortest encoding possible.
+ // sel specifies the /n in the modrm byte (see the Intel PRM).
+ void emit_arith(int sel, Operand dst, const Immediate& x);
+
+ void emit_operand(Register reg, const Operand& adr);
+ void emit_operand(const Operand& adr, Register reg);
+
+ void emit_farith(int b1, int b2, int i);
+
+ // labels
+ void print(Label* L);
+ void bind_to(Label* L, int pos);
+ void link_to(Label* L, Label* appendix);
+
+ // record reloc info for current pc_
+ void RecordRelocInfo(RelocMode rmode, intptr_t data = 0);
+
+ friend class CodePatcher;
+ friend class EnsureSpace;
+};
+
+
+// Helper class that ensures that there is enough space for generating
+// instructions and relocation information. The constructor makes
+// sure that there is enough space and (in debug mode) the destructor
+// checks that we did not generate too much.
+class EnsureSpace BASE_EMBEDDED {
+ public:
+ explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
+ if (assembler_->overflow()) assembler_->GrowBuffer();
+#ifdef DEBUG
+ space_before_ = assembler_->available_space();
+#endif
+ }
+
+#ifdef DEBUG
+ ~EnsureSpace() {
+ int bytes_generated = space_before_ - assembler_->available_space();
+ ASSERT(bytes_generated < assembler_->kGap);
+ }
+#endif
+
+ private:
+ Assembler* assembler_;
+#ifdef DEBUG
+ int space_before_;
+#endif
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ASSEMBLER_IA32_H_
diff --git a/src/assembler.cc b/src/assembler.cc
new file mode 100644
index 0000000..d407007
--- /dev/null
+++ b/src/assembler.cc
@@ -0,0 +1,574 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+
+#include "v8.h"
+
+#include "arguments.h"
+#include "execution.h"
+#include "ic-inl.h"
+#include "factory.h"
+#include "runtime.h"
+#include "serialize.h"
+#include "stub-cache.h"
+
+namespace v8 { namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Label
+
+int Label::pos() const {
+ if (pos_ < 0) return -pos_ - 1;
+ if (pos_ > 0) return pos_ - 1;
+ UNREACHABLE();
+ return 0;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfoWriter and RelocIterator
+//
+// Encoding
+//
+// The most common modes are given single-byte encodings. Also, it is
+// easy to identify the type of reloc info and skip unwanted modes in
+// an iteration.
+//
+// The encoding relies on the fact that there are less than 14
+// different relocation modes.
+//
+// embedded_object: [6 bits pc delta] 00
+//
+// code_taget: [6 bits pc delta] 01
+//
+// position: [6 bits pc delta] 10,
+// [7 bits signed data delta] 0
+//
+// statement_position: [6 bits pc delta] 10,
+// [7 bits signed data delta] 1
+//
+// any nondata mode: 00 [4 bits rmode] 11,
+// 00 [6 bits pc delta]
+//
+// pc-jump: 00 1111 11,
+// 00 [6 bits pc delta]
+//
+// pc-jump: 01 1111 11,
+// (variable length) 7 - 26 bit pc delta, written in chunks of 7
+// bits, the lowest 7 bits written first.
+//
+// data-jump + pos: 00 1110 11,
+// signed int, lowest byte written first
+//
+// data-jump + st.pos: 01 1110 11,
+// signed int, lowest byte written first
+//
+// data-jump + comm.: 10 1110 11,
+// signed int, lowest byte written first
+//
+const int kMaxRelocModes = 14;
+
+const int kTagBits = 2;
+const int kTagMask = (1 << kTagBits) - 1;
+const int kExtraTagBits = 4;
+const int kPositionTypeTagBits = 1;
+const int kSmallDataBits = kBitsPerByte - kPositionTypeTagBits;
+
+const int kEmbeddedObjectTag = 0;
+const int kCodeTargetTag = 1;
+const int kPositionTag = 2;
+const int kDefaultTag = 3;
+
+const int kPCJumpTag = (1 << kExtraTagBits) - 1;
+
+const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
+const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
+
+const int kVariableLengthPCJumpTopTag = 1;
+const int kChunkBits = 7;
+const int kChunkMask = (1 << kChunkBits) - 1;
+const int kLastChunkTagBits = 1;
+const int kLastChunkTagMask = 1;
+const int kLastChunkTag = 1;
+
+
+const int kDataJumpTag = kPCJumpTag - 1;
+
+const int kNonstatementPositionTag = 0;
+const int kStatementPositionTag = 1;
+const int kCommentTag = 2;
+
+
+uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
+ // Return if the pc_delta can fit in kSmallPCDeltaBits bits.
+ // Otherwise write a variable length PC jump for the bits that do
+ // not fit in the kSmallPCDeltaBits bits.
+ if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
+ WriteExtraTag(kPCJumpTag, kVariableLengthPCJumpTopTag);
+ uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
+ ASSERT(pc_jump > 0);
+ // Write kChunkBits size chunks of the pc_jump.
+ for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
+ byte b = pc_jump & kChunkMask;
+ *--pos_ = b << kLastChunkTagBits;
+ }
+ // Tag the last chunk so it can be identified.
+ *pos_ = *pos_ | kLastChunkTag;
+ // Return the remaining kSmallPCDeltaBits of the pc_delta.
+ return pc_delta & kSmallPCDeltaMask;
+}
+
+
+void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) {
+ // Write a byte of tagged pc-delta, possibly preceded by var. length pc-jump.
+ pc_delta = WriteVariableLengthPCJump(pc_delta);
+ *--pos_ = pc_delta << kTagBits | tag;
+}
+
+
+void RelocInfoWriter::WriteTaggedData(int32_t data_delta, int tag) {
+ *--pos_ = data_delta << kPositionTypeTagBits | tag;
+}
+
+
+void RelocInfoWriter::WriteExtraTag(int extra_tag, int top_tag) {
+ *--pos_ = top_tag << (kTagBits + kExtraTagBits) |
+ extra_tag << kTagBits |
+ kDefaultTag;
+}
+
+
+void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) {
+ // Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump.
+ pc_delta = WriteVariableLengthPCJump(pc_delta);
+ WriteExtraTag(extra_tag, 0);
+ *--pos_ = pc_delta;
+}
+
+
+void RelocInfoWriter::WriteExtraTaggedData(int32_t data_delta, int top_tag) {
+ WriteExtraTag(kDataJumpTag, top_tag);
+ for (int i = 0; i < kIntSize; i++) {
+ *--pos_ = data_delta;
+ data_delta = ArithmeticShiftRight(data_delta, kBitsPerByte);
+ }
+}
+
+
+void RelocInfoWriter::Write(const RelocInfo* rinfo) {
+#ifdef DEBUG
+ byte* begin_pos = pos_;
+#endif
+ Counters::reloc_info_count.Increment();
+ ASSERT(rinfo->pc() - last_pc_ >= 0);
+ ASSERT(reloc_mode_count < kMaxRelocModes);
+ // Use unsigned delta-encoding for pc.
+ uint32_t pc_delta = rinfo->pc() - last_pc_;
+ RelocMode rmode = rinfo->rmode();
+
+ // The two most common modes are given small tags, and usually fit in a byte.
+ if (rmode == embedded_object) {
+ WriteTaggedPC(pc_delta, kEmbeddedObjectTag);
+ } else if (rmode == code_target) {
+ WriteTaggedPC(pc_delta, kCodeTargetTag);
+ } else if (rmode == position || rmode == statement_position) {
+ // Use signed delta-encoding for data.
+ int32_t data_delta = rinfo->data() - last_data_;
+ int pos_type_tag = rmode == position ? kNonstatementPositionTag
+ : kStatementPositionTag;
+ // Check if data is small enough to fit in a tagged byte.
+ if (is_intn(data_delta, kSmallDataBits)) {
+ WriteTaggedPC(pc_delta, kPositionTag);
+ WriteTaggedData(data_delta, pos_type_tag);
+ last_data_ = rinfo->data();
+ } else {
+ // Otherwise, use costly encoding.
+ WriteExtraTaggedPC(pc_delta, kPCJumpTag);
+ WriteExtraTaggedData(data_delta, pos_type_tag);
+ last_data_ = rinfo->data();
+ }
+ } else if (rmode == comment) {
+ // Comments are normally not generated, so we use the costly encoding.
+ WriteExtraTaggedPC(pc_delta, kPCJumpTag);
+ WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag);
+ last_data_ = rinfo->data();
+ } else {
+ // For all other modes we simply use the mode as the extra tag.
+ // None of these modes need a data component.
+ ASSERT(rmode < kPCJumpTag && rmode < kDataJumpTag);
+ WriteExtraTaggedPC(pc_delta, rmode);
+ }
+ last_pc_ = rinfo->pc();
+#ifdef DEBUG
+ ASSERT(begin_pos - pos_ <= kMaxSize);
+#endif
+}
+
+
+inline int RelocIterator::AdvanceGetTag() {
+ return *--pos_ & kTagMask;
+}
+
+
+inline int RelocIterator::GetExtraTag() {
+ return (*pos_ >> kTagBits) & ((1 << kExtraTagBits) - 1);
+}
+
+
+inline int RelocIterator::GetTopTag() {
+ return *pos_ >> (kTagBits + kExtraTagBits);
+}
+
+
+inline void RelocIterator::ReadTaggedPC() {
+ rinfo_.pc_ += *pos_ >> kTagBits;
+}
+
+
+inline void RelocIterator::AdvanceReadPC() {
+ rinfo_.pc_ += *--pos_;
+}
+
+
+void RelocIterator::AdvanceReadData() {
+ int32_t x = 0;
+ for (int i = 0; i < kIntSize; i++) {
+ x |= *--pos_ << i * kBitsPerByte;
+ }
+ rinfo_.data_ += x;
+}
+
+
+void RelocIterator::AdvanceReadVariableLengthPCJump() {
+ // Read the 32-kSmallPCDeltaBits most significant bits of the
+ // pc jump in kChunkBits bit chunks and shift them into place.
+ // Stop when the last chunk is encountered.
+ uint32_t pc_jump = 0;
+ for (int i = 0; i < kIntSize; i++) {
+ byte pc_jump_part = *--pos_;
+ pc_jump |= (pc_jump_part >> kLastChunkTagBits) << i * kChunkBits;
+ if ((pc_jump_part & kLastChunkTagMask) == 1) break;
+ }
+ // The least significant kSmallPCDeltaBits bits will be added
+ // later.
+ rinfo_.pc_ += pc_jump << kSmallPCDeltaBits;
+}
+
+
+inline int RelocIterator::GetPositionTypeTag() {
+ return *pos_ & ((1 << kPositionTypeTagBits) - 1);
+}
+
+
+inline void RelocIterator::ReadTaggedData() {
+ int8_t signed_b = *pos_;
+ rinfo_.data_ += ArithmeticShiftRight(signed_b, kPositionTypeTagBits);
+}
+
+
+inline RelocMode RelocIterator::DebugInfoModeFromTag(int tag) {
+ if (tag == kStatementPositionTag) {
+ return statement_position;
+ } else if (tag == kNonstatementPositionTag) {
+ return position;
+ } else {
+ ASSERT(tag == kCommentTag);
+ return comment;
+ }
+}
+
+
+void RelocIterator::next() {
+ ASSERT(!done());
+ // Basically, do the opposite of RelocInfoWriter::Write.
+ // Reading of data is as far as possible avoided for unwanted modes,
+ // but we must always update the pc.
+ //
+ // We exit this loop by returning when we find a mode we want.
+ while (pos_ > end_) {
+ int tag = AdvanceGetTag();
+ if (tag == kEmbeddedObjectTag) {
+ ReadTaggedPC();
+ if (SetMode(embedded_object)) return;
+ } else if (tag == kCodeTargetTag) {
+ ReadTaggedPC();
+ if (*(reinterpret_cast<int**>(rinfo_.pc())) ==
+ reinterpret_cast<int*>(0x61)) {
+ tag = 0;
+ }
+ if (SetMode(code_target)) return;
+ } else if (tag == kPositionTag) {
+ ReadTaggedPC();
+ Advance();
+ // Check if we want source positions.
+ if (mode_mask_ & RelocInfo::kPositionMask) {
+ // Check if we want this type of source position.
+ if (SetMode(DebugInfoModeFromTag(GetPositionTypeTag()))) {
+ // Finally read the data before returning.
+ ReadTaggedData();
+ return;
+ }
+ }
+ } else {
+ ASSERT(tag == kDefaultTag);
+ int extra_tag = GetExtraTag();
+ if (extra_tag == kPCJumpTag) {
+ int top_tag = GetTopTag();
+ if (top_tag == kVariableLengthPCJumpTopTag) {
+ AdvanceReadVariableLengthPCJump();
+ } else {
+ AdvanceReadPC();
+ }
+ } else if (extra_tag == kDataJumpTag) {
+ // Check if we want debug modes (the only ones with data).
+ if (mode_mask_ & RelocInfo::kDebugMask) {
+ int top_tag = GetTopTag();
+ AdvanceReadData();
+ if (SetMode(DebugInfoModeFromTag(top_tag))) return;
+ } else {
+ // Otherwise, just skip over the data.
+ Advance(kIntSize);
+ }
+ } else {
+ AdvanceReadPC();
+ if (SetMode(static_cast<RelocMode>(extra_tag))) return;
+ }
+ }
+ }
+ done_ = true;
+}
+
+
+RelocIterator::RelocIterator(Code* code, int mode_mask) {
+ rinfo_.pc_ = code->instruction_start();
+ rinfo_.data_ = 0;
+ // relocation info is read backwards
+ pos_ = code->relocation_start() + code->relocation_size();
+ end_ = code->relocation_start();
+ done_ = false;
+ mode_mask_ = mode_mask;
+ if (mode_mask_ == 0) pos_ = end_;
+ next();
+}
+
+
+RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
+ rinfo_.pc_ = desc.buffer;
+ rinfo_.data_ = 0;
+ // relocation info is read backwards
+ pos_ = desc.buffer + desc.buffer_size;
+ end_ = pos_ - desc.reloc_size;
+ done_ = false;
+ mode_mask_ = mode_mask;
+ if (mode_mask_ == 0) pos_ = end_;
+ next();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+
+#ifdef DEBUG
+const char* RelocInfo::RelocModeName(RelocMode rmode) {
+ switch (rmode) {
+ case no_reloc:
+ return "no reloc";
+ case embedded_object:
+ return "embedded object";
+ case embedded_string:
+ return "embedded string";
+ case js_construct_call:
+ return "code target (js construct call)";
+ case exit_js_frame:
+ return "code target (exit js frame)";
+ case code_target_context:
+ return "code target (context)";
+ case code_target:
+ return "code target";
+ case runtime_entry:
+ return "runtime entry";
+ case js_return:
+ return "js return";
+ case comment:
+ return "comment";
+ case position:
+ return "position";
+ case statement_position:
+ return "statement position";
+ case external_reference:
+ return "external reference";
+ case reloc_mode_count:
+ UNREACHABLE();
+ return "reloc_mode_count";
+ }
+ return "unknown relocation type";
+}
+
+
+void RelocInfo::Print() {
+ PrintF("%p %s", pc_, RelocModeName(rmode_));
+ if (rmode_ == comment) {
+ PrintF(" (%s)", data_);
+ } else if (rmode_ == embedded_object) {
+ PrintF(" (");
+ target_object()->ShortPrint();
+ PrintF(")");
+ } else if (rmode_ == external_reference) {
+ ExternalReferenceEncoder ref_encoder;
+ PrintF(" (%s) (%p)",
+ ref_encoder.NameOfAddress(*target_reference_address()),
+ *target_reference_address());
+ } else if (is_code_target(rmode_)) {
+ Code* code = Debug::GetCodeTarget(target_address());
+ PrintF(" (%s) (%p)", Code::Kind2String(code->kind()), target_address());
+ } else if (is_position(rmode_)) {
+ PrintF(" (%d)", data());
+ }
+
+ PrintF("\n");
+}
+
+
+void RelocInfo::Verify() {
+ switch (rmode_) {
+ case embedded_object:
+ Object::VerifyPointer(target_object());
+ break;
+ case js_construct_call:
+ case exit_js_frame:
+ case code_target_context:
+ case code_target: {
+ // convert inline target address to code object
+ Address addr = target_address();
+ ASSERT(addr != NULL);
+ // Check that we can find the right code object.
+ HeapObject* code = HeapObject::FromAddress(addr - Code::kHeaderSize);
+ Object* found = Heap::FindCodeObject(addr);
+ ASSERT(found->IsCode());
+ ASSERT(code->address() == HeapObject::cast(found)->address());
+ break;
+ }
+ case embedded_string:
+ case runtime_entry:
+ case js_return:
+ case comment:
+ case position:
+ case statement_position:
+ case external_reference:
+ case no_reloc:
+ break;
+ case reloc_mode_count:
+ UNREACHABLE();
+ break;
+ }
+}
+#endif // DEBUG
+
+
+// -----------------------------------------------------------------------------
+// Implementation of ExternalReference
+
+ExternalReference::ExternalReference(Builtins::CFunctionId id)
+ : address_(Builtins::c_function_address(id)) {}
+
+
+ExternalReference::ExternalReference(Builtins::Name name)
+ : address_(Builtins::builtin_address(name)) {}
+
+
+ExternalReference::ExternalReference(Runtime::FunctionId id)
+ : address_(Runtime::FunctionForId(id)->entry) {}
+
+
+ExternalReference::ExternalReference(Runtime::Function* f)
+ : address_(f->entry) {}
+
+
+ExternalReference::ExternalReference(const IC_Utility& ic_utility)
+ : address_(ic_utility.address()) {}
+
+
+ExternalReference::ExternalReference(const Debug_Address& debug_address)
+ : address_(debug_address.address()) {}
+
+
+ExternalReference::ExternalReference(StatsCounter* counter)
+ : address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
+
+
+ExternalReference::ExternalReference(Top::AddressId id)
+ : address_(Top::get_address_from_id(id)) {}
+
+
+ExternalReference::ExternalReference(const SCTableReference& table_ref)
+ : address_(table_ref.address()) {}
+
+
+ExternalReference ExternalReference::builtin_passed_function() {
+ return ExternalReference(&Builtins::builtin_passed_function);
+}
+
+ExternalReference ExternalReference::the_hole_value_location() {
+ return ExternalReference(Factory::the_hole_value().location());
+}
+
+
+ExternalReference ExternalReference::address_of_stack_guard_limit() {
+ return ExternalReference(StackGuard::address_of_jslimit());
+}
+
+
+ExternalReference ExternalReference::debug_break() {
+ return ExternalReference(FUNCTION_ADDR(Debug::Break));
+}
+
+
+ExternalReference ExternalReference::new_space_start() {
+ return ExternalReference(Heap::NewSpaceStart());
+}
+
+ExternalReference ExternalReference::new_space_allocation_top_address() {
+ return ExternalReference(Heap::NewSpaceAllocationTopAddress());
+}
+
+ExternalReference ExternalReference::new_space_allocation_limit_address() {
+ return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
+}
+
+ExternalReference ExternalReference::debug_step_in_fp_address() {
+ return ExternalReference(Debug::step_in_fp_addr());
+}
+
+} } // namespace v8::internal
diff --git a/src/assembler.h b/src/assembler.h
new file mode 100644
index 0000000..972b8e1
--- /dev/null
+++ b/src/assembler.h
@@ -0,0 +1,478 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+
+#ifndef V8_ASSEMBLER_H_
+#define V8_ASSEMBLER_H_
+
+#include "runtime.h"
+#include "top.h"
+#include "zone-inl.h"
+
+namespace v8 { namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Labels represent pc locations; they are typically jump or call targets.
+// After declaration, a label can be freely used to denote known or (yet)
+// unknown pc location. Assembler::bind() is used to bind a label to the
+// current pc. A label can be bound only once.
+
+class Label : public ZoneObject { // ShadowLables are dynamically allocated.
+ public:
+ INLINE(Label()) { Unuse(); }
+ INLINE(~Label()) { ASSERT(!is_linked()); }
+
+ INLINE(void Unuse()) { pos_ = 0; }
+
+ INLINE(bool is_bound() const) { return pos_ < 0; }
+ INLINE(bool is_unused() const) { return pos_ == 0; }
+ INLINE(bool is_linked() const) { return pos_ > 0; }
+
+ // Returns the position of bound or linked labels. Cannot be used
+ // for unused labels.
+ int pos() const;
+
+ private:
+ // pos_ encodes both the binding state (via its sign)
+ // and the binding position (via its value) of a label.
+ //
+ // pos_ < 0 bound label, pos() returns the jump target position
+ // pos_ == 0 unused label
+ // pos_ > 0 linked label, pos() returns the last reference position
+ int pos_;
+
+ void bind_to(int pos) {
+ pos_ = -pos - 1;
+ ASSERT(is_bound());
+ }
+ void link_to(int pos) {
+ pos_ = pos + 1;
+ ASSERT(is_linked());
+ }
+
+ friend class Assembler;
+ friend class Displacement;
+ friend class LabelShadow;
+};
+
+
+// A LabelShadow is a label that temporarily shadows another label. It
+// is used to catch linking and binding of labels in certain scopes,
+// e.g. try blocks. LabelShadows are themselves labels which can be
+// used (only) after they are not shadowing anymore.
+class LabelShadow: public Label {
+ public:
+ explicit LabelShadow(Label* shadowed) {
+ ASSERT(shadowed != NULL);
+ shadowed_ = shadowed;
+ shadowed_pos_ = shadowed->pos_;
+ shadowed->Unuse();
+#ifdef DEBUG
+ is_shadowing_ = true;
+#endif
+ }
+
+ ~LabelShadow() {
+ ASSERT(!is_shadowing_);
+ }
+
+ void StopShadowing() {
+ ASSERT(is_shadowing_ && is_unused());
+ pos_ = shadowed_->pos_;
+ shadowed_->pos_ = shadowed_pos_;
+#ifdef DEBUG
+ is_shadowing_ = false;
+#endif
+ }
+
+ Label* shadowed() const { return shadowed_; }
+
+ private:
+ Label* shadowed_;
+ int shadowed_pos_;
+#ifdef DEBUG
+ bool is_shadowing_;
+#endif
+};
+
+
+// -----------------------------------------------------------------------------
+// Relocation information
+
+// The constant kNoPosition is used with the collecting of source positions
+// in the relocation information. Two types of source positions are collected
+// "position" (RelocMode position) and "statement position" (RelocMode
+// statement_position). The "position" is collected at places in the source
+// code which are of interest when making stack traces to pin-point the source
+// location of a stack frame as close as possible. The "statement position" is
+// collected at the beginning at each statement, and is used to indicate
+// possible break locations. kNoPosition is used to indicate an
+// invalid/uninitialized position value.
+static const int kNoPosition = -1;
+
+
+enum RelocMode {
+ // Please note the order is important (see is_code_target).
+ js_construct_call, // code target that is an exit JavaScript frame stub.
+ exit_js_frame, // code target that is an exit JavaScript frame stub.
+ code_target_context, // code target used for contextual loads.
+ code_target, // code target which is not any of the above.
+ embedded_object,
+ embedded_string,
+
+ // Everything after runtime_entry (inclusive) is not GC'ed.
+ runtime_entry,
+ js_return, // Marks start of the ExitJSFrame code.
+ comment,
+ position, // See comment for kNoPosition above.
+ statement_position, // See comment for kNoPosition above.
+ external_reference, // The address of an external C++ function.
+ // add more as needed
+ no_reloc, // never recorded
+
+ // Pseudo-types
+ reloc_mode_count,
+ last_code_enum = code_target
+};
+
+
+inline int RelocMask(RelocMode mode) {
+ return 1 << mode;
+}
+
+
+inline bool is_js_construct_call(RelocMode mode) {
+ return mode == js_construct_call;
+}
+
+
+inline bool is_exit_js_frame(RelocMode mode) {
+ return mode == exit_js_frame;
+}
+
+
+inline bool is_code_target(RelocMode mode) {
+ return mode <= last_code_enum;
+}
+
+
+inline bool is_js_return(RelocMode mode) {
+ return mode == js_return;
+}
+
+
+inline bool is_comment(RelocMode mode) {
+ return mode == comment;
+}
+
+
+inline bool is_position(RelocMode mode) {
+ return mode == position || mode == statement_position;
+}
+
+
+inline bool is_statement_position(RelocMode mode) {
+ return mode == statement_position;
+}
+
+inline bool is_external_reference(RelocMode mode) {
+ return mode == external_reference;
+}
+
+// Relocation information consists of the address (pc) of the datum
+// to which the relocation information applies, the relocation mode
+// (rmode), and an optional data field. The relocation mode may be
+// "descriptive" and not indicate a need for relocation, but simply
+// describe a property of the datum. Such rmodes are useful for GC
+// and nice disassembly output.
+
+class RelocInfo BASE_EMBEDDED {
+ public:
+ RelocInfo() {}
+ RelocInfo(byte* pc, RelocMode rmode, intptr_t data)
+ : pc_(pc), rmode_(rmode), data_(data) {
+ }
+
+ // Accessors
+ byte* pc() const { return pc_; }
+ void set_pc(byte* pc) { pc_ = pc; }
+ RelocMode rmode() const { return rmode_; }
+ intptr_t data() const { return data_; }
+
+ // Apply a relocation by delta bytes
+ INLINE(void apply(int delta));
+
+ // Read/modify the code target in the branch/call instruction this relocation
+ // applies to; can only be called if this->is_code_target(rmode_)
+ INLINE(Address target_address());
+ INLINE(void set_target_address(Address target));
+ INLINE(Object* target_object());
+ INLINE(Object** target_object_address());
+ INLINE(void set_target_object(Object* target));
+
+ // Read/modify the reference in the instruction this relocation
+ // applies to; can only be called if rmode_ is external_reference
+ INLINE(Address* target_reference_address());
+
+ // Read/modify the address of a call instruction. This is used to relocate
+ // the break points where straight-line code is patched with a call
+ // instruction.
+ INLINE(Address call_address());
+ INLINE(void set_call_address(Address target));
+ INLINE(Object* call_object());
+ INLINE(Object** call_object_address());
+ INLINE(void set_call_object(Object* target));
+
+ // Patch the code with some other code.
+ void patch_code(byte* instructions, int instruction_count);
+
+ // Patch the code with a call.
+ void patch_code_with_call(Address target, int guard_bytes);
+ INLINE(bool is_call_instruction());
+
+#ifdef DEBUG
+ // Debugging
+ void Print();
+ void Verify();
+ static const char* RelocModeName(RelocMode rmode);
+#endif
+
+ static const int kCodeTargetMask = (1 << (last_code_enum + 1)) - 1;
+ static const int kPositionMask = 1 << position | 1 << statement_position;
+ static const int kDebugMask = kPositionMask | 1 << comment;
+ static const int kApplyMask; // Modes affected by apply. Depends on arch.
+
+ private:
+ // On ARM, note that pc_ is the address of the constant pool entry
+ // to be relocated and not the address of the instruction
+ // referencing the constant pool entry (except when rmode_ ==
+ // comment).
+ byte* pc_;
+ RelocMode rmode_;
+ intptr_t data_;
+ friend class RelocIterator;
+};
+
+
+// RelocInfoWriter serializes a stream of relocation info. It writes towards
+// lower addresses.
+class RelocInfoWriter BASE_EMBEDDED {
+ public:
+ RelocInfoWriter() : pos_(NULL), last_pc_(NULL), last_data_(0) {}
+ RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc),
+ last_data_(0) {}
+
+ byte* pos() const { return pos_; }
+ byte* last_pc() const { return last_pc_; }
+
+ void Write(const RelocInfo* rinfo);
+
+ // Update the state of the stream after reloc info buffer
+ // and/or code is moved while the stream is active.
+ void Reposition(byte* pos, byte* pc) {
+ pos_ = pos;
+ last_pc_ = pc;
+ }
+
+ // Max size (bytes) of a written RelocInfo.
+ static const int kMaxSize = 12;
+
+ private:
+ inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
+ inline void WriteTaggedPC(uint32_t pc_delta, int tag);
+ inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
+ inline void WriteExtraTaggedData(int32_t data_delta, int top_tag);
+ inline void WriteTaggedData(int32_t data_delta, int tag);
+ inline void WriteExtraTag(int extra_tag, int top_tag);
+
+ byte* pos_;
+ byte* last_pc_;
+ intptr_t last_data_;
+ DISALLOW_EVIL_CONSTRUCTORS(RelocInfoWriter);
+};
+
+
+// A RelocIterator iterates over relocation information.
+// Typical use:
+//
+// for (RelocIterator it(code); !it.done(); it.next()) {
+// // do something with it.rinfo() here
+// }
+//
+// A mask can be specified to skip unwanted modes.
+class RelocIterator: public Malloced {
+ public:
+ // Create a new iterator positioned at
+ // the beginning of the reloc info.
+ // Relocation information with mode k is included in the
+ // iteration iff bit k of mode_mask is set.
+ explicit RelocIterator(Code* code, int mode_mask = -1);
+ explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
+
+ // Iteration
+ bool done() const { return done_; }
+ void next();
+
+ // Return pointer valid until next next().
+ RelocInfo* rinfo() {
+ ASSERT(!done());
+ return &rinfo_;
+ }
+
+ private:
+ // Advance* moves the position before/after reading.
+ // *Read* reads from current byte(s) into rinfo_.
+ // *Get* just reads and returns info on current byte.
+ void Advance(int bytes = 1) { pos_ -= bytes; }
+ int AdvanceGetTag();
+ int GetExtraTag();
+ int GetTopTag();
+ void ReadTaggedPC();
+ void AdvanceReadPC();
+ void AdvanceReadData();
+ void AdvanceReadVariableLengthPCJump();
+ int GetPositionTypeTag();
+ void ReadTaggedData();
+
+ static RelocMode DebugInfoModeFromTag(int tag);
+
+ // If the given mode is wanted, set it in rinfo_ and return true.
+ // Else return false. Used for efficiently skipping unwanted modes.
+ bool SetMode(RelocMode mode) {
+ return (mode_mask_ & 1 << mode) ? (rinfo_.rmode_ = mode, true) : false;
+ }
+
+ byte* pos_;
+ byte* end_;
+ RelocInfo rinfo_;
+ bool done_;
+ int mode_mask_;
+ DISALLOW_EVIL_CONSTRUCTORS(RelocIterator);
+};
+
+
+//------------------------------------------------------------------------------
+// External function
+
+//----------------------------------------------------------------------------
+class IC_Utility;
+class Debug_Address;
+class SCTableReference;
+
+// An ExternalReference represents a C++ address called from the generated
+// code. All references to C++ functions and must be encapsulated in an
+// ExternalReference instance. This is done in order to track the origin of
+// all external references in the code.
+class ExternalReference BASE_EMBEDDED {
+ public:
+ explicit ExternalReference(Builtins::CFunctionId id);
+
+ explicit ExternalReference(Builtins::Name name);
+
+ explicit ExternalReference(Runtime::FunctionId id);
+
+ explicit ExternalReference(Runtime::Function* f);
+
+ explicit ExternalReference(const IC_Utility& ic_utility);
+
+ explicit ExternalReference(const Debug_Address& debug_address);
+
+ explicit ExternalReference(StatsCounter* counter);
+
+ explicit ExternalReference(Top::AddressId id);
+
+ explicit ExternalReference(const SCTableReference& table_ref);
+
+ // One-of-a-kind references. These references are not part of a general
+ // pattern. This means that they have to be added to the
+ // ExternalReferenceTable in serialize.cc manually.
+
+ static ExternalReference builtin_passed_function();
+
+ // Static variable Factory::the_hole_value.location()
+ static ExternalReference the_hole_value_location();
+
+ // Static variable StackGuard::address_of_limit()
+ static ExternalReference address_of_stack_guard_limit();
+
+ // Function Debug::Break()
+ static ExternalReference debug_break();
+
+ // Static variable Heap::NewSpaceStart()
+ static ExternalReference new_space_start();
+
+ // Used for fast allocation in generated code.
+ static ExternalReference new_space_allocation_top_address();
+ static ExternalReference new_space_allocation_limit_address();
+
+ // Used to check if single stepping is enabled in generated code.
+ static ExternalReference debug_step_in_fp_address();
+
+ Address address() const {return address_;}
+
+ private:
+ explicit ExternalReference(void* address)
+ : address_(reinterpret_cast<Address>(address)) {}
+
+ Address address_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+// Move these into inline file?
+
+static inline bool is_intn(int x, int n) {
+ return -(1 << (n-1)) <= x && x < (1 << (n-1));
+}
+
+static inline bool is_int24(int x) { return is_intn(x, 24); }
+static inline bool is_int8(int x) { return is_intn(x, 8); }
+
+static inline bool is_uintn(int x, int n) {
+ return (x & -(1 << n)) == 0;
+}
+
+static inline bool is_uint3(int x) { return is_uintn(x, 3); }
+static inline bool is_uint4(int x) { return is_uintn(x, 4); }
+static inline bool is_uint5(int x) { return is_uintn(x, 5); }
+static inline bool is_uint8(int x) { return is_uintn(x, 8); }
+static inline bool is_uint12(int x) { return is_uintn(x, 12); }
+static inline bool is_uint16(int x) { return is_uintn(x, 16); }
+static inline bool is_uint24(int x) { return is_uintn(x, 24); }
+
+} } // namespace v8::internal
+
+#endif // V8_ASSEMBLER_H_
diff --git a/src/ast.cc b/src/ast.cc
new file mode 100644
index 0000000..232bfad
--- /dev/null
+++ b/src/ast.cc
@@ -0,0 +1,184 @@
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ast.h"
+#include "scopes.h"
+
+namespace v8 { namespace internal {
+
+
+VariableProxySentinel VariableProxySentinel::this_proxy_(true);
+VariableProxySentinel VariableProxySentinel::identifier_proxy_(false);
+ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_;
+Property Property::this_property_(VariableProxySentinel::this_proxy(), NULL, 0);
+Call Call::sentinel_(NULL, NULL, false, 0);
+
+
+// ----------------------------------------------------------------------------
+// All the Accept member functions for each syntax tree node type.
+
+#define DECL_ACCEPT(type) \
+ void type::Accept(Visitor* v) { \
+ if (v->CheckStackOverflow()) return; \
+ v->Visit##type(this); \
+ }
+NODE_LIST(DECL_ACCEPT)
+#undef DECL_ACCEPT
+
+
+// ----------------------------------------------------------------------------
+// Implementation of other node functionality.
+
+VariableProxy::VariableProxy(Handle<String> name,
+ bool is_this,
+ bool inside_with)
+ : name_(name),
+ var_(NULL),
+ is_this_(is_this),
+ inside_with_(inside_with) {
+ // names must be canonicalized for fast equality checks
+ ASSERT(name->IsSymbol());
+ // at least one access, otherwise no need for a VariableProxy
+ var_uses_.RecordAccess(1);
+}
+
+
+VariableProxy::VariableProxy(bool is_this)
+ : is_this_(is_this) {
+}
+
+
+void VariableProxy::BindTo(Variable* var) {
+ ASSERT(var_ == NULL); // must be bound only once
+ ASSERT(var != NULL); // must bind
+ ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name()));
+ // Ideally CONST-ness should match. However, this is very hard to achieve
+ // because we don't know the exact semantics of conflicting (const and
+ // non-const) multiple variable declarations, const vars introduced via
+ // eval() etc. Const-ness and variable declarations are a complete mess
+ // in JS. Sigh...
+ // ASSERT(var->mode() == Variable::CONST || !is_const());
+ var_ = var;
+ var->var_uses()->RecordUses(&var_uses_);
+ var->obj_uses()->RecordUses(&obj_uses_);
+}
+
+
+#ifdef DEBUG
+
+const char* LoopStatement::OperatorString() const {
+ switch (type()) {
+ case DO_LOOP: return "DO";
+ case FOR_LOOP: return "FOR";
+ case WHILE_LOOP: return "WHILE";
+ }
+ return NULL;
+}
+
+#endif // DEBUG
+
+
+Token::Value Assignment::binary_op() const {
+ switch (op_) {
+ case Token::ASSIGN_BIT_OR: return Token::BIT_OR;
+ case Token::ASSIGN_BIT_XOR: return Token::BIT_XOR;
+ case Token::ASSIGN_BIT_AND: return Token::BIT_AND;
+ case Token::ASSIGN_SHL: return Token::SHL;
+ case Token::ASSIGN_SAR: return Token::SAR;
+ case Token::ASSIGN_SHR: return Token::SHR;
+ case Token::ASSIGN_ADD: return Token::ADD;
+ case Token::ASSIGN_SUB: return Token::SUB;
+ case Token::ASSIGN_MUL: return Token::MUL;
+ case Token::ASSIGN_DIV: return Token::DIV;
+ case Token::ASSIGN_MOD: return Token::MOD;
+ default: UNREACHABLE();
+ }
+ return Token::ILLEGAL;
+}
+
+
+bool FunctionLiteral::AllowsLazyCompilation() {
+ return scope()->AllowsLazyCompilation();
+}
+
+
+ObjectLiteral::Property::Property(Literal* key, Expression* value) {
+ key_ = key;
+ value_ = value;
+ Object* k = *key->handle();
+ if (k->IsSymbol() && Heap::Proto_symbol()->Equals(String::cast(k))) {
+ kind_ = PROTOTYPE;
+ } else {
+ kind_ = value_->AsLiteral() == NULL ? COMPUTED : CONSTANT;
+ }
+}
+
+
+ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
+ key_ = new Literal(value->name());
+ value_ = value;
+ kind_ = is_getter ? GETTER : SETTER;
+}
+
+
+
+void LabelCollector::AddLabel(Label* label) {
+ // Add the label to the collector, but discard duplicates.
+ int length = labels_->length();
+ for (int i = 0; i < length; i++) {
+ if (labels_->at(i) == label) return;
+ }
+ labels_->Add(label);
+}
+
+
+// ----------------------------------------------------------------------------
+// Implementation of Visitor
+
+
+void Visitor::VisitStatements(ZoneList<Statement*>* statements) {
+ for (int i = 0; i < statements->length(); i++) {
+ Visit(statements->at(i));
+ }
+}
+
+
+void Visitor::VisitExpressions(ZoneList<Expression*>* expressions) {
+ for (int i = 0; i < expressions->length(); i++) {
+ // The variable statement visiting code may pass NULL expressions
+ // to this code. Maybe this should be handled by introducing an
+ // undefined expression or literal? Revisit this code if this
+ // changes
+ Expression* expression = expressions->at(i);
+ if (expression != NULL) Visit(expression);
+ }
+}
+
+
+} } // namespace v8::internal
diff --git a/src/ast.h b/src/ast.h
new file mode 100644
index 0000000..48b5dc7
--- /dev/null
+++ b/src/ast.h
@@ -0,0 +1,1233 @@
+// Copyright 2006-2008 Google Inc. All Rights Reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_AST_H_
+#define V8_AST_H_
+
+#include "execution.h"
+#include "factory.h"
+#include "runtime.h"
+#include "token.h"
+#include "variables.h"
+#include "macro-assembler.h"
+
+namespace v8 { namespace internal {
+
+// The abstract syntax tree is an intermediate, light-weight
+// representation of the parsed JavaScript code suitable for
+// compilation to native code.
+
+// Nodes are allocated in a separate zone, which allows faster
+// allocation and constant-time deallocation of the entire syntax
+// tree.
+
+
+// ----------------------------------------------------------------------------
+// Nodes of the abstract syntax tree. Only concrete classes are
+// enumerated here.
+
+#define NODE_LIST(V) \
+ V(Block) \
+ V(Declaration) \
+ V(ExpressionStatement) \
+ V(EmptyStatement) \
+ V(IfStatement) \
+ V(ContinueStatement) \
+ V(BreakStatement) \
+ V(ReturnStatement) \
+ V(WithEnterStatement) \
+ V(WithExitStatement) \
+ V(SwitchStatement) \
+ V(LoopStatement) \
+ V(ForInStatement) \
+ V(TryCatch) \
+ V(TryFinally) \
+ V(DebuggerStatement) \
+ V(FunctionLiteral) \
+ V(FunctionBoilerplateLiteral) \
+ V(Conditional) \
+ V(Slot) \
+ V(VariableProxy) \
+ V(Literal) \
+ V(RegExpLiteral) \
+ V(ObjectLiteral) \
+ V(ArrayLiteral) \
+ V(Assignment) \
+ V(Throw) \
+ V(Property) \
+ V(Call) \
+ V(CallNew) \
+ V(CallRuntime) \
+ V(UnaryOperation) \
+ V(CountOperation) \
+ V(BinaryOperation) \
+ V(CompareOperation) \
+ V(ThisFunction)
+
+
+#define DEF_FORWARD_DECLARATION(type) class type;
+NODE_LIST(DEF_FORWARD_DECLARATION)
+#undef DEF_FORWARD_DECLARATION
+
+
+// Typedef only introduced to avoid unreadable code.
+// Please do appreciate the required space in "> >".
+typedef ZoneList<Handle<String> > ZoneStringList;
+
+
+class Node: public ZoneObject {
+ public:
+ Node(): statement_pos_(kNoPosition) { }
+ virtual ~Node() { }
+ virtual void Accept(Visitor* v) = 0;
+
+ // Type testing & conversion.
+ virtual Statement* AsStatement() { return NULL; }
+ virtual ExpressionStatement* AsExpressionStatement() { return NULL; }
+ virtual EmptyStatement* AsEmptyStatement() { return NULL; }
+ virtual Expression* AsExpression() { return NULL; }
+ virtual Literal* AsLiteral() { return NULL; }
+ virtual Slot* AsSlot() { return NULL; }
+ virtual VariableProxy* AsVariableProxy() { return NULL; }
+ virtual Property* AsProperty() { return NULL; }
+ virtual Call* AsCall() { return NULL; }
+ virtual LabelCollector* AsLabelCollector() { return NULL; }
+ virtual BreakableStatement* AsBreakableStatement() { return NULL; }
+ virtual IterationStatement* AsIterationStatement() { return NULL; }
+ virtual UnaryOperation* AsUnaryOperation() { return NULL; }
+ virtual BinaryOperation* AsBinaryOperation() { return NULL; }
+ virtual Assignment* AsAssignment() { return NULL; }
+ virtual FunctionLiteral* AsFunctionLiteral() { return NULL; }
+
+ void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
+ int statement_pos() const { return statement_pos_; }
+
+ private:
+ int statement_pos_;
+};
+
+
+class Statement: public Node {
+ public:
+ virtual Statement* AsStatement() { return this; }
+ virtual ReturnStatement* AsReturnStatement() { return NULL; }
+
+ bool IsEmpty() { return AsEmptyStatement() != NULL; }
+};
+
+
+class Expression: public Node {
+ public:
+ virtual Expression* AsExpression() { return this; }
+
+ virtual bool IsValidLeftHandSide() { return false; }
+
+ // Mark the expression as being compiled as an expression
+ // statement. This is used to transform postfix increments to
+ // (faster) prefix increments.
+ virtual void MarkAsStatement() { /* do nothing */ }
+};
+
+
+/**
+ * A sentinel used during pre parsing that represents some expression
+ * that is a valid left hand side without having to actually build
+ * the expression.
+ */
+class ValidLeftHandSideSentinel: public Expression {
+ public:
+ virtual bool IsValidLeftHandSide() { return true; }
+ virtual void Accept(Visitor* v) { UNREACHABLE(); }
+ static ValidLeftHandSideSentinel* instance() { return &instance_; }
+ private:
+ static ValidLeftHandSideSentinel instance_;
+};
+
+
+class BreakableStatement: public Statement {
+ public:
+ enum Type {
+ TARGET_FOR_ANONYMOUS,
+ TARGET_FOR_NAMED_ONLY
+ };
+
+ // The labels associated with this statement. May be NULL;
+ // if it is != NULL, guaranteed to contain at least one entry.
+ ZoneStringList* labels() const { return labels_; }
+
+ // Type testing & conversion.
+ virtual BreakableStatement* AsBreakableStatement() { return this; }
+
+ // Code generation
+ Label* break_target() { return &break_target_; }
+
+ // Used during code generation for restoring the stack when a
+ // break/continue crosses a statement that keeps stuff on the stack.
+ int break_stack_height() { return break_stack_height_; }
+ void set_break_stack_height(int height) { break_stack_height_ = height; }
+
+ // Testers.
+ bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
+
+ protected:
+ BreakableStatement(ZoneStringList* labels, Type type)
+ : labels_(labels), type_(type) {
+ ASSERT(labels == NULL || labels->length() > 0);
+ }
+
+ private:
+ ZoneStringList* labels_;
+ Type type_;
+ Label break_target_;
+ int break_stack_height_;
+};
+
+
+class Block: public BreakableStatement {
+ public:
+ Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
+ : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
+ statements_(capacity),
+ is_initializer_block_(is_initializer_block) { }
+
+ virtual void Accept(Visitor* v);
+
+ void AddStatement(Statement* statement) { statements_.Add(statement); }
+
+ ZoneList<Statement*>* statements() { return &statements_; }
+ bool is_initializer_block() const { return is_initializer_block_; }
+
+ private:
+ ZoneList<Statement*> statements_;
+ bool is_initializer_block_;
+};
+
+
+class Declaration: public Node {
+ public:
+ Declaration(VariableProxy* proxy, Variable::Mode mode, FunctionLiteral* fun)
+ : proxy_(proxy),
+ mode_(mode),
+ fun_(fun) {
+ ASSERT(mode == Variable::VAR || mode == Variable::CONST);
+ // At the moment there are no "const functions"'s in JavaScript...
+ ASSERT(fun == NULL || mode == Variable::VAR);
+ }
+
+ virtual void Accept(Visitor* v);
+
+ VariableProxy* proxy() const { return proxy_; }
+ Variable::Mode mode() const { return mode_; }
+ FunctionLiteral* fun() const { return fun_; } // may be NULL
+
+ private:
+ VariableProxy* proxy_;
+ Variable::Mode mode_;
+ FunctionLiteral* fun_;
+};
+
+
+class IterationStatement: public BreakableStatement {
+ public:
+ // Type testing & conversion.
+ virtual IterationStatement* AsIterationStatement() { return this; }
+
+ Statement* body() const { return body_; }
+
+ // Code generation
+ Label* continue_target() { return &continue_target_; }
+
+ protected:
+ explicit IterationStatement(ZoneStringList* labels)
+ : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) { }
+
+ void Initialize(Statement* body) {
+ body_ = body;
+ }
+
+ private:
+ Statement* body_;
+ Label continue_target_;
+};
+
+
+class LoopStatement: public IterationStatement {
+ public:
+ enum Type { DO_LOOP, FOR_LOOP, WHILE_LOOP };
+
+ LoopStatement(ZoneStringList* labels, Type type)
+ : IterationStatement(labels), type_(type), init_(NULL),
+ cond_(NULL), next_(NULL) { }
+
+ void Initialize(Statement* init,
+ Expression* cond,
+ Statement* next,
+ Statement* body) {
+ ASSERT(init == NULL || type_ == FOR_LOOP);
+ ASSERT(next == NULL || type_ == FOR_LOOP);
+ IterationStatement::Initialize(body);
+ init_ = init;
+ cond_ = cond;
+ next_ = next;
+ }
+
+ virtual void Accept(Visitor* v);
+
+ Type type() const { return type_; }
+ Statement* init() const { return init_; }
+ Expression* cond() const { return cond_; }
+ Statement* next() const { return next_; }
+
+#ifdef DEBUG
+ const char* OperatorString() const;
+#endif
+
+ private:
+ Type type_;
+ Statement* init_;
+ Expression* cond_;
+ Statement* next_;
+};
+
+
+class ForInStatement: public IterationStatement {
+ public:
+ explicit ForInStatement(ZoneStringList* labels)
+ : IterationStatement(labels), each_(NULL), enumerable_(NULL) { }
+
+ void Initialize(Expression* each, Expression* enumerable, Statement* body) {
+ IterationStatement::Initialize(body);
+ each_ = each;
+ enumerable_ = enumerable;
+ }
+
+ virtual void Accept(Visitor* v);
+
+ Expression* each() const { return each_; }
+ Expression* enumerable() const { return enumerable_; }
+
+ private:
+ Expression* each_;
+ Expression* enumerable_;
+};
+
+
+class ExpressionStatement: public Statement {
+ public:
+ explicit ExpressionStatement(Expression* expression)
+ : expression_(expression) { }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion.
+ virtual ExpressionStatement* AsExpressionStatement() { return this; }
+
+ void set_expression(Expression* e) { expression_ = e; }
+ Expression* expression() { return expression_; }
+
+ private:
+ Expression* expression_;
+};
+
+
+class ContinueStatement: public Statement {
+ public:
+ explicit ContinueStatement(IterationStatement* target)
+ : target_(target) { }
+
+ virtual void Accept(Visitor* v);
+
+ IterationStatement* target() const { return target_; }
+
+ private:
+ IterationStatement* target_;
+};
+
+
+class BreakStatement: public Statement {
+ public:
+ explicit BreakStatement(BreakableStatement* target)
+ : target_(target) { }
+
+ virtual void Accept(Visitor* v);
+
+ BreakableStatement* target() const { return target_; }
+
+ private:
+ BreakableStatement* target_;
+};
+
+
+class ReturnStatement: public Statement {
+ public:
+ explicit ReturnStatement(Expression* expression)
+ : expression_(expression) { }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion.
+ virtual ReturnStatement* AsReturnStatement() { return this; }
+
+ Expression* expression() { return expression_; }
+
+ private:
+ Expression* expression_;
+};
+
+
+class WithEnterStatement: public Statement {
+ public:
+ explicit WithEnterStatement(Expression* expression)
+ : expression_(expression) { }
+
+ virtual void Accept(Visitor* v);
+
+ Expression* expression() const { return expression_; }
+
+ private:
+ Expression* expression_;
+};
+
+
+class WithExitStatement: public Statement {
+ public:
+ WithExitStatement() { }
+
+ virtual void Accept(Visitor* v);
+};
+
+
+class CaseClause: public ZoneObject {
+ public:
+ CaseClause(Expression* label, ZoneList<Statement*>* statements)
+ : label_(label), statements_(statements) { }
+
+ bool is_default() const { return label_ == NULL; }
+ Expression* label() const {
+ CHECK(!is_default());
+ return label_;
+ }
+ ZoneList<Statement*>* statements() const { return statements_; }
+
+ private:
+ Expression* label_;
+ ZoneList<Statement*>* statements_;
+};
+
+
+class SwitchStatement: public BreakableStatement {
+ public:
+ explicit SwitchStatement(ZoneStringList* labels)
+ : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
+ tag_(NULL), cases_(NULL) { }
+
+ void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
+ tag_ = tag;
+ cases_ = cases;
+ }
+
+ virtual void Accept(Visitor* v);
+
+ Expression* tag() const { return tag_; }
+ ZoneList<CaseClause*>* cases() const { return cases_; }
+
+ private:
+ Expression* tag_;
+ ZoneList<CaseClause*>* cases_;
+};
+
+
+// If-statements always have non-null references to their then- and
+// else-parts. When parsing if-statements with no explicit else-part,
+// the parser implicitly creates an empty statement. Use the
+// HasThenStatement() and HasElseStatement() functions to check if a
+// given if-statement has a then- or an else-part containing code.
+class IfStatement: public Statement {
+ public:
+ IfStatement(Expression* condition,
+ Statement* then_statement,
+ Statement* else_statement)
+ : condition_(condition),
+ then_statement_(then_statement),
+ else_statement_(else_statement) { }
+
+ virtual void Accept(Visitor* v);
+
+ bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
+ bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
+
+ Expression* condition() const { return condition_; }
+ Statement* then_statement() const { return then_statement_; }
+ Statement* else_statement() const { return else_statement_; }
+
+ private:
+ Expression* condition_;
+ Statement* then_statement_;
+ Statement* else_statement_;
+};
+
+
+// NOTE: LabelCollectors are represented as nodes to fit in the target
+// stack in the compiler; this should probably be reworked.
+class LabelCollector: public Node {
+ public:
+ explicit LabelCollector(ZoneList<Label*>* labels) : labels_(labels) { }
+
+ // Adds a label to the collector. The collector stores a pointer not
+ // a copy of the label to make binding work, so make sure not to
+ // pass in references to something on the stack.
+ void AddLabel(Label* label);
+
+ // Virtual behaviour. LabelCollectors are never part of the AST.
+ virtual void Accept(Visitor* v) { UNREACHABLE(); }
+ virtual LabelCollector* AsLabelCollector() { return this; }
+
+ ZoneList<Label*>* labels() { return labels_; }
+
+ private:
+ ZoneList<Label*>* labels_;
+};
+
+
+class TryStatement: public Statement {
+ public:
+ explicit TryStatement(Block* try_block)
+ : try_block_(try_block), escaping_labels_(NULL) { }
+
+ void set_escaping_labels(ZoneList<Label*>* labels) {
+ escaping_labels_ = labels;
+ }
+
+ Block* try_block() const { return try_block_; }
+ ZoneList<Label*>* escaping_labels() const { return escaping_labels_; }
+
+ private:
+ Block* try_block_;
+ ZoneList<Label*>* escaping_labels_;
+};
+
+
+class TryCatch: public TryStatement {
+ public:
+ TryCatch(Block* try_block, Expression* catch_var, Block* catch_block)
+ : TryStatement(try_block),
+ catch_var_(catch_var),
+ catch_block_(catch_block) {
+ ASSERT(catch_var->AsVariableProxy() != NULL);
+ }
+
+ virtual void Accept(Visitor* v);
+
+ Expression* catch_var() const { return catch_var_; }
+ Block* catch_block() const { return catch_block_; }
+
+ private:
+ Expression* catch_var_;
+ Block* catch_block_;
+};
+
+
+class TryFinally: public TryStatement {
+ public:
+ TryFinally(Block* try_block, Expression* finally_var, Block* finally_block)
+ : TryStatement(try_block),
+ finally_var_(finally_var),
+ finally_block_(finally_block) { }
+
+ virtual void Accept(Visitor* v);
+
+ // If the finally block is non-trivial it may be problematic to have
+ // extra stuff on the expression stack while evaluating it. The
+ // finally variable is used to hold the state instead of storing it
+ // on the stack. It may be NULL in which case the state is stored on
+ // the stack.
+ Expression* finally_var() const { return finally_var_; }
+
+ Block* finally_block() const { return finally_block_; }
+
+ private:
+ Expression* finally_var_;
+ Block* finally_block_;
+};
+
+
+class DebuggerStatement: public Statement {
+ public:
+ virtual void Accept(Visitor* v);
+};
+
+
+class EmptyStatement: public Statement {
+ public:
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion.
+ virtual EmptyStatement* AsEmptyStatement() { return this; }
+};
+
+
+class Literal: public Expression {
+ public:
+ explicit Literal(Handle<Object> handle) : handle_(handle) { }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion.
+ virtual Literal* AsLiteral() { return this; }
+
+ // Check if this literal is identical to the other literal.
+ bool IsIdenticalTo(const Literal* other) const {
+ return handle_.is_identical_to(other->handle_);
+ }
+
+ // Identity testers.
+ bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
+ bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
+ bool IsFalse() const {
+ return handle_.is_identical_to(Factory::false_value());
+ }
+
+ Handle<Object> handle() const { return handle_; }
+
+ private:
+ Handle<Object> handle_;
+};
+
+
+// Base class for literals that needs space in the corresponding JSFunction.
+class MaterializedLiteral: public Expression {
+ public:
+ explicit MaterializedLiteral(int literal_index)
+ : literal_index_(literal_index) {}
+ int literal_index() { return literal_index_; }
+ private:
+ int literal_index_;
+};
+
+
+// An object literal has a boilerplate object that is used
+// for minimizing the work when constructing it at runtime.
+class ObjectLiteral: public MaterializedLiteral {
+ public:
+ // Property is used for passing information
+ // about an object literal's properties from the parser
+ // to the code generator.
+ class Property: public ZoneObject {
+ public:
+
+ enum Kind {
+ CONSTANT, // Property with constant value (at compile time).
+ COMPUTED, // Property with computed value (at execution time).
+ GETTER, SETTER, // Property is an accessor function.
+ PROTOTYPE // Property is __proto__.
+ };
+
+ Property(Literal* key, Expression* value);
+ Property(bool is_getter, FunctionLiteral* value);
+
+ Literal* key() { return key_; }
+ Expression* value() { return value_; }
+ Kind kind() { return kind_; }
+
+ private:
+ Literal* key_;
+ Expression* value_;
+ Kind kind_;
+ };
+
+ ObjectLiteral(Handle<FixedArray> constant_properties,
+ Expression* result,
+ ZoneList<Property*>* properties,
+ int literal_index)
+ : MaterializedLiteral(literal_index),
+ constant_properties_(constant_properties),
+ result_(result),
+ properties_(properties) {
+ }
+
+ virtual void Accept(Visitor* v);
+
+ Handle<FixedArray> constant_properties() const {
+ return constant_properties_;
+ }
+ Expression* result() const { return result_; }
+ ZoneList<Property*>* properties() const { return properties_; }
+
+ private:
+ Handle<FixedArray> constant_properties_;
+ Expression* result_;
+ ZoneList<Property*>* properties_;
+};
+
+
+// Node for capturing a regexp literal.
+class RegExpLiteral: public MaterializedLiteral {
+ public:
+ RegExpLiteral(Handle<String> pattern,
+ Handle<String> flags,
+ int literal_index)
+ : MaterializedLiteral(literal_index),
+ pattern_(pattern),
+ flags_(flags) {}
+
+ virtual void Accept(Visitor* v);
+
+ Handle<String> pattern() const { return pattern_; }
+ Handle<String> flags() const { return flags_; }
+
+ private:
+ Handle<String> pattern_;
+ Handle<String> flags_;
+};
+
+// An array literal has a literals object that is used
+// used for minimizing the work when contructing it at runtime.
+class ArrayLiteral: public Expression {
+ public:
+ ArrayLiteral(Handle<FixedArray> literals,
+ Expression* result,
+ ZoneList<Expression*>* values)
+ : literals_(literals), result_(result), values_(values) {
+ }
+
+ virtual void Accept(Visitor* v);
+
+ Handle<FixedArray> literals() const { return literals_; }
+ Expression* result() const { return result_; }
+ ZoneList<Expression*>* values() const { return values_; }
+
+ private:
+ Handle<FixedArray> literals_;
+ Expression* result_;
+ ZoneList<Expression*>* values_;
+};
+
+
+class VariableProxy: public Expression {
+ public:
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion
+ virtual Property* AsProperty() {
+ return var_ == NULL ? NULL : var_->AsProperty();
+ }
+ virtual VariableProxy* AsVariableProxy() { return this; }
+
+ Variable* AsVariable() {
+ return this == NULL || var_ == NULL ? NULL : var_->AsVariable();
+ }
+ virtual bool IsValidLeftHandSide() {
+ return var_ == NULL ? true : var_->IsValidLeftHandSide();
+ }
+ bool IsVariable(Handle<String> n) {
+ return !is_this() && name().is_identical_to(n);
+ }
+
+ // If this assertion fails it means that some code has tried to
+ // treat the special "this" variable as an ordinary variable with
+ // the name "this".
+ Handle<String> name() const { return name_; }
+ Variable* var() const { return var_; }
+ UseCount* var_uses() { return &var_uses_; }
+ UseCount* obj_uses() { return &obj_uses_; }
+ bool is_this() const { return is_this_; }
+ bool inside_with() const { return inside_with_; }
+
+ // Bind this proxy to the variable var.
+ void BindTo(Variable* var);
+
+ protected:
+ Handle<String> name_;
+ Variable* var_; // resolved variable, or NULL
+ bool is_this_;
+ bool inside_with_;
+
+ // VariableProxy usage info.
+ UseCount var_uses_; // uses of the variable value
+ UseCount obj_uses_; // uses of the object the variable points to
+
+ VariableProxy(Handle<String> name, bool is_this, bool inside_with);
+ explicit VariableProxy(bool is_this);
+
+ friend class Scope;
+};
+
+
+class VariableProxySentinel: public VariableProxy {
+ public:
+ virtual bool IsValidLeftHandSide() { return !is_this(); }
+ static VariableProxySentinel* this_proxy() { return &this_proxy_; }
+ static VariableProxySentinel* identifier_proxy() {
+ return &identifier_proxy_;
+ }
+
+ private:
+ explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { }
+ static VariableProxySentinel this_proxy_;
+ static VariableProxySentinel identifier_proxy_;
+};
+
+
+class Slot: public Expression {
+ public:
+ enum Type {
+ // A slot in the parameter section on the stack. index() is
+ // the parameter index, counting left-to-right, starting at 0.
+ PARAMETER,
+
+ // A slot in the local section on the stack. index() is
+ // the variable index in the stack frame, starting at 0.
+ LOCAL,
+
+ // An indexed slot in a heap context. index() is the
+ // variable index in the context object on the heap,
+ // starting at 0. var()->scope() is the corresponding
+ // scope.
+ CONTEXT,
+
+ // A named slot in a heap context. var()->name() is the
+ // variable name in the context object on the heap,
+ // with lookup starting at the current context. index()
+ // is invalid.
+ LOOKUP,
+
+ // A property in the global object. var()->name() is
+ // the property name.
+ GLOBAL
+ };
+
+ Slot(Variable* var, Type type, int index)
+ : var_(var), type_(type), index_(index) {
+ ASSERT(var != NULL);
+ }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion
+ virtual Slot* AsSlot() { return this; }
+
+ // Accessors
+ Variable* var() const { return var_; }
+ Type type() const { return type_; }
+ int index() const { return index_; }
+
+ private:
+ Variable* var_;
+ Type type_;
+ int index_;
+};
+
+
+class Property: public Expression {
+ public:
+ Property(Expression* obj, Expression* key, int pos)
+ : obj_(obj), key_(key), pos_(pos) { }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion
+ virtual Property* AsProperty() { return this; }
+
+ virtual bool IsValidLeftHandSide() { return true; }
+
+ Expression* obj() const { return obj_; }
+ Expression* key() const { return key_; }
+ int position() const { return pos_; }
+
+ // Returns a property singleton property access on 'this'. Used
+ // during preparsing.
+ static Property* this_property() { return &this_property_; }
+
+ private:
+ Expression* obj_;
+ Expression* key_;
+ int pos_;
+
+ // Dummy property used during preparsing
+ static Property this_property_;
+};
+
+
+class Call: public Expression {
+ public:
+ Call(Expression* expression,
+ ZoneList<Expression*>* arguments,
+ bool is_eval,
+ int pos)
+ : expression_(expression),
+ arguments_(arguments),
+ is_eval_(is_eval),
+ pos_(pos) { }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing and conversion.
+ virtual Call* AsCall() { return this; }
+
+ Expression* expression() const { return expression_; }
+ ZoneList<Expression*>* arguments() const { return arguments_; }
+ bool is_eval() { return is_eval_; }
+ int position() { return pos_; }
+
+ static Call* sentinel() { return &sentinel_; }
+
+ private:
+ Expression* expression_;
+ ZoneList<Expression*>* arguments_;
+ bool is_eval_;
+ int pos_;
+
+ static Call sentinel_;
+};
+
+
+class CallNew: public Call {
+ public:
+ CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
+ : Call(expression, arguments, false, pos) { }
+
+ virtual void Accept(Visitor* v);
+};
+
+
+// The CallRuntime class does not represent any official JavaScript
+// language construct. Instead it is used to call a C or JS function
+// with a set of arguments. This is used from the builtins that are
+// implemented in JavaScript (see "v8natives.js").
+class CallRuntime: public Expression {
+ public:
+ CallRuntime(Handle<String> name,
+ Runtime::Function* function,
+ ZoneList<Expression*>* arguments)
+ : name_(name), function_(function), arguments_(arguments) { }
+
+ virtual void Accept(Visitor* v);
+
+ Handle<String> name() const { return name_; }
+ Runtime::Function* function() const { return function_; }
+ ZoneList<Expression*>* arguments() const { return arguments_; }
+
+ private:
+ Handle<String> name_;
+ Runtime::Function* function_;
+ ZoneList<Expression*>* arguments_;
+};
+
+
+class UnaryOperation: public Expression {
+ public:
+ UnaryOperation(Token::Value op, Expression* expression)
+ : op_(op), expression_(expression) {
+ ASSERT(Token::IsUnaryOp(op));
+ }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion
+ virtual UnaryOperation* AsUnaryOperation() { return this; }
+
+ Token::Value op() const { return op_; }
+ Expression* expression() const { return expression_; }
+
+ private:
+ Token::Value op_;
+ Expression* expression_;
+};
+
+
+class BinaryOperation: public Expression {
+ public:
+ BinaryOperation(Token::Value op, Expression* left, Expression* right)
+ : op_(op), left_(left), right_(right) {
+ ASSERT(Token::IsBinaryOp(op));
+ }
+
+ virtual void Accept(Visitor* v);
+
+ // Type testing & conversion
+ virtual BinaryOperation* AsBinaryOperation() { return this; }
+
+ // True iff the result can be safely overwritten (to avoid allocation).
+ // False for operations that can return one of their operands.
+ bool ResultOverwriteAllowed() {
+ switch (op_) {
+ case Token::COMMA:
+ case Token::OR:
+ case Token::AND:
+ return false;
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ return true;
+ default:
+ UNREACHABLE();
+ }
+ return false;
+ }
+
+ Token::Value op() const { return op_; }
+ Expression* left() const { return left_; }
+ Expression* right() const { return right_; }
+
+ private:
+ Token::Value op_;
+ Expression* left_;
+ Expression* right_;
+};
+
+
+class CountOperation: public Expression {
+ public:
+ CountOperation(bool is_prefix, Token::Value op, Expression* expression)
+ : is_prefix_(is_prefix), op_(op), expression_(expression) {
+ ASSERT(Token::IsCountOp(op));
+ }
+
+ virtual void Accept(Visitor* v);
+
+ bool is_prefix() const { return is_prefix_; }
+ bool is_postfix() const { return !is_prefix_; }
+ Token::Value op() const { return op_; }
+ Expression* expression() const { return expression_; }
+
+ virtual void MarkAsStatement() { is_prefix_ = true; }
+
+ private:
+ bool is_prefix_;
+ Token::Value op_;
+ Expression* expression_;
+};
+
+
+class CompareOperation: public Expression {
+ public:
+ CompareOperation(Token::Value op, Expression* left, Expression* right)
+ : op_(op), left_(left), right_(right) {
+ ASSERT(Token::IsCompareOp(op));
+ }
+
+ virtual void Accept(Visitor* v);
+
+ Token::Value op() const { return op_; }
+ Expression* left() const { return left_; }
+ Expression* right() const { return right_; }
+
+ private:
+ Token::Value op_;
+ Expression* left_;
+ Expression* right_;
+};
+
+
+class Conditional: public Expression {
+ public:
+ Conditional(Expression* condition,
+ Expression* then_expression,
+ Expression* else_expression)
+ : condition_(condition),
+ then_expression_(then_expression),
+ else_expression_(else_expression) { }
+
+ virtual void Accept(Visitor* v);
+
+ Expression* condition() const { return condition_; }
+ Expression* then_expression() const { return then_expression_; }
+ Expression* else_expression() const { return else_expression_; }
+
+ private:
+ Expression* condition_;
+ Expression* then_expression_;
+ Expression* else_expression_;
+};
+
+
+class Assignment: public Expression {
+ public:
+ Assignment(Token::Value op, Expression* target, Expression* value, int pos)
+ : op_(op), target_(target), value_(value), pos_(pos) {
+ ASSERT(Token::IsAssignmentOp(op));
+ }
+
+ virtual void Accept(Visitor* v);
+ virtual Assignment* AsAssignment() { return this; }