xorl %eax, %eax

CVE-2010-2962: Linux kernel i915 GEM Memory Corruption

with 3 comments

Kees Cook discovered and reported this vulnerability. The susceptible code can be found at drivers/gpu/drm/i915/i915_gem.c where the Intel’s i915 GEM (Graphics Execution Manager) driver resides. The first vulnerable routine is the IOCTL handler for reading data from the object referenced by handle which is this:

int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                     struct drm_file *file_priv)
{
        struct drm_i915_gem_pread *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
        int ret;
      ...
        if (args->offset > obj->size || args->size > obj->size ||
            args->offset + args->size > obj->size) {
                drm_gem_object_unreference_unlocked(obj);
                return -EINVAL;
        }

        if (i915_gem_object_needs_bit17_swizzle(obj)) {
                ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
        } else {
                ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
                if (ret != 0)
                        ret = i915_gem_shmem_pread_slow(dev, obj, args,
                                                        file_priv);
        }

        drm_gem_object_unreference_unlocked(obj);

        return ret;
}

So, the first ‘if’ clause checks a number of things. Those are:
– the arguments’ offset isn’t greater than the object’s size
– the arguments’ size is not greater than object’s size
– the arguments’ offset plus size do not exceed the size of the object
If any of these is true, it will call drm_gem_object_unreference_unlocked() on the processed object ‘obj’. This is a simple inline function located at include/drm/drmP.h that decrements the reference counter of the given object using kred_put() from lib/kref.c and frees object’s memory using drm_gem_object_free_unlocked() like this:

static inline void
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
{
        if (obj != NULL)
                kref_put(&obj->refcount, drm_gem_object_free_unlocked);
}

After that check, the code will invoke i915_gem_object_needs_bit17_swizzle() on that object. This function will check for ‘I915_BIT_6_SWIZZLE_9_10_17’ flag and it will either call i915_gem_shmem_pread_slow() or i915_gem_shmem_pread_fast(). Here is a snippet of the latter routine…

static int
i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
                          struct drm_i915_gem_pread *args,
                          struct drm_file *file_priv)
{
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        ssize_t remain;
        loff_t offset, page_base;
        char __user *user_data;
        int page_offset, page_length;
        int ret;

        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
       ...
        while (remain > 0) {
       ...
                ret = fast_shmem_read(obj_priv->pages,
                                      page_base, page_offset,
                                      user_data, page_length);
       ...
        return ret;
}

So, it will eventually call fast_shmem_read() passing user controlled data to it. This is a simple copy function that utilizes __copy_to_user_inatomic() to perform the actual copy operation.

static inline int
fast_shmem_read(struct page **pages,
                loff_t page_base, int page_offset,
                char __user *data,
                int length)
{
        char __iomem *vaddr;
        int unwritten;

        vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
        if (vaddr == NULL)
                return -ENOMEM;
        unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
        kunmap_atomic(vaddr, KM_USER0);

        if (unwritten)
                return -EFAULT;

        return 0;
}

Back to i915_gem_pread_ioctl() we can now move on the i915_gem_shmem_pread_slow() function which is this:

static int
i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
                          struct drm_i915_gem_pread *args,
                          struct drm_file *file_priv)
{
       ...
        uint64_t data_ptr = args->data_ptr;
        int do_bit17_swizzling;

        remain = args->size;
       ...
        user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
       ...
        pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
                                      num_pages, 1, 0, user_pages, NULL);
       ...
                if (do_bit17_swizzling) {
                        slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
                                              shmem_page_offset,
                                              user_pages[data_page_index],
                                              data_page_offset,
                                              page_length,
                                              1);
                } else {
                        slow_shmem_copy(user_pages[data_page_index],
                                        data_page_offset,
                                        obj_priv->pages[shmem_page_index],
                                        shmem_page_offset,
                                        page_length);
                }
       ...
	return ret;
}

Here, the user controlled data are obtained using get_user_pages() of mm/memory.c and then copied using either slow_shmem_bit17_copy() or slow_shmem_copy(). You might have noticed that in both routines called by i915_gem_pread_ioctl() there is no check that user controlled data ‘args->data_ptr’ and ‘args->size’ are within the user-space’s limit.
Because of this, a user could make ‘args->data_ptr’ pointing to some kernel memory and consequently forcing the above copy operations writing data to some arbitrary kernel memory instead of a userspace buffer. To fix this, i915_gem_pread_ioctl() was changed to include an access_oK() check before moving on like this:

        if (args->offset > obj->size || args->size > obj->size ||
            args->offset + args->size > obj->size) {
-               drm_gem_object_unreference_unlocked(obj);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err;
+       }
+
+       if (!access_ok(VERIFY_WRITE,
+                      (char __user *)(uintptr_t)args->data_ptr,
+                      args->size)) {
+               ret = -EFAULT;
+               goto err;
        }

And also, the drm_gem_object_unreference_unlocked() was removed and a jump to ‘err’ was added since the end of this routine was renamed to ‘err’ and does just this..

 
+err:
        drm_gem_object_unreference_unlocked(obj);
-
        return ret;
 }

A similar vulnerability was present at i915_gem_pwrite_ioctl() which is the equivalent write IOCTL handler too. Here is that code…

int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                      struct drm_file *file_priv)
{
        struct drm_i915_gem_pwrite *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
        int ret = 0;
      ...
        if (args->offset > obj->size || args->size > obj->size ||
            args->offset + args->size > obj->size) {
                drm_gem_object_unreference_unlocked(obj);
                return -EINVAL;
        }
      ...
        if (obj_priv->phys_obj)
                ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
        else if (obj_priv->tiling_mode == I915_TILING_NONE &&
                 dev->gtt_total != 0 &&
                 obj->write_domain != I915_GEM_DOMAIN_CPU) {
                ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
                if (ret == -EFAULT) {
                        ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
                                                       file_priv);
                }
        } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
                ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
        } else {
                ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
                if (ret == -EFAULT) {
                        ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
                                                         file_priv);
                }
        }
      ...
	return ret;
}

This means that there is no check in the user derived addresses and the routine could result in calling i915_gem_phys_pwrite(), i915_gem_gtt_pwrite_fast(), i915_gem_gtt_pwrite_slow(), i915_gem_shmem_pwrite_slow() or i915_gem_shmem_pwrite_fast(). Almost all of them are vulnerable to the previous kind of vulnerability leading to arbitrary memory corruption. The first one, i915_gem_phys_pwrite():

static int
i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
                     struct drm_i915_gem_pwrite *args,
                     struct drm_file *file_priv)
{
       ...
        char __user *user_data;

        user_data = (char __user *) (uintptr_t) args->data_ptr;
       ...
        ret = copy_from_user(obj_addr, user_data, args->size);
       ...
        return 0;
}

The second one, i915_gem_gtt_pwrite_fast():

static int
i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
                         struct drm_i915_gem_pwrite *args,
                         struct drm_file *file_priv)
{
       ...
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
        if (!access_ok(VERIFY_READ, user_data, remain))
                return -EFAULT;
       ...
                ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
                                       page_offset, user_data, page_length);
       ...
        return ret;
}

As you can see, this one isn’t exploitable since it checks the ‘user_data”s location before executing the fast_user_write() operation to it. The next routine that might be invoked is i915_gem_gtt_pwrite_slow() which is shown below.

static int
i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                         struct drm_i915_gem_pwrite *args,
                         struct drm_file *file_priv)
{
       ...
        user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
       ...
        pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
                                      num_pages, 0, 0, user_pages, NULL);
       ...
                slow_kernel_write(dev_priv->mm.gtt_mapping,
                                  gtt_page_base, gtt_page_offset,
                                  user_pages[data_page_index],
                                  data_page_offset,
                                  page_length);
       ...
	return ret;
}

That has no check either.
Next we have i915_gem_shmem_pwrite_slow() which is this:

static int
i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                           struct drm_i915_gem_pwrite *args,
                           struct drm_file *file_priv)
{
      ...
        uint64_t data_ptr = args->data_ptr;
      ...
        user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
      ...
        pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
                                      num_pages, 0, 0, user_pages, NULL);
      ...
                if (do_bit17_swizzling) {
                        slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
                                              shmem_page_offset,
                                              user_pages[data_page_index],
                                              data_page_offset,
                                              page_length,
                                              0);
                } else {
                        slow_shmem_copy(obj_priv->pages[shmem_page_index],
                                        shmem_page_offset,
                                        user_pages[data_page_index],
                                        data_page_offset,
                                        page_length);
                }
       ...
	return ret;
}

The final one is the i915_gem_shmem_pwrite_fast() which you can see here:

static int
i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
                           struct drm_i915_gem_pwrite *args,
                           struct drm_file *file_priv)
{
       ...
        user_data = (char __user *) (uintptr_t) args->data_ptr;
       ...
                ret = fast_shmem_write(obj_priv->pages,
                                       page_base, page_offset,
                                       user_data, page_length); 
       ...
	return ret;
}

So, the write IOCTL handling routine was patched in a similar manner. But firstly, the access_ok() check of i915_gem_gtt_pwrite_fast() was removed…

        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
-       if (!access_ok(VERIFY_READ, user_data, remain))
-               return -EFAULT;

since it was now moved to i915_gem_pwrite_ioctl()…

        if (args->offset > obj->size || args->size > obj->size ||
            args->offset + args->size > obj->size) {
-               drm_gem_object_unreference_unlocked(obj);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err;
+       }
+
+       if (!access_ok(VERIFY_READ,
+                      (char __user *)(uintptr_t)args->data_ptr,
+                      args->size)) {
+               ret = -EFAULT;
+               goto err;
        }

again, the new ‘err’ label was added to avoid the redundant drm_gem_object_unreference_unlocked()

 
+err:
        drm_gem_object_unreference_unlocked(obj);
-
        return ret;
 }

Written by xorl

October 16, 2010 at 21:23

Posted in bugs, linux

3 Responses

Subscribe to comments with RSS.

  1. Hello,
    Thanks for this nice article.
    I don’t understand why calling get_user_pages without access_ok is a vulnerability ?
    Regards,

    Frank

    Frank

    September 20, 2011 at 19:43

  2. The problem was that there was no check that the provided pointers were within userland, get_user_pages() does not check for such cases and consequently, the subsequent routines could be tricked into using kernel-land pointers as user-space ones.

    xorl

    September 21, 2011 at 20:17

  3. It seems that calling a get_user_pages with a kernel land pointer instead of an userland pointer just returns a -EFAULT. Am I wrong ?

    Frank

    September 22, 2011 at 12:52


Leave a Reply

Fill in your details below or click an icon to log in:

WordPress.com Logo

You are commenting using your WordPress.com account. Log Out / Change )

Twitter picture

You are commenting using your Twitter account. Log Out / Change )

Facebook photo

You are commenting using your Facebook account. Log Out / Change )

Google+ photo

You are commenting using your Google+ account. Log Out / Change )

Connecting to %s