# This is a BitKeeper generated patch for the following project:
# Project Name: Linux kernel tree
# This patch format is intended for GNU patch command version 2.5 or higher.
# This patch includes the following deltas:
#	           ChangeSet	1.641   -> 1.642  
#	drivers/char/agp/agpgart_be-sis.c	1.1     -> 1.2     drivers/char/agp/sis.c (moved)
#	drivers/char/agp/agpgart_fe.c	1.13    -> 1.14    drivers/char/agp/frontend.c (moved)
#	drivers/char/agp/agpgart_be.c	1.35    -> 1.36    drivers/char/agp/agp.c (moved)
#	drivers/char/agp/Makefile	1.5     -> 1.6    
#	drivers/char/agp/agpgart_be-sworks.c	1.1     -> 1.2     drivers/char/agp/sworks.c (moved)
#	drivers/char/agp/agpgart_be-i8x0.c	1.2     -> 1.3     drivers/char/agp/i8x0.c (moved)
#	drivers/char/agp/agpgart_be-i810.c	1.1     -> 1.2     drivers/char/agp/i810.c (moved)
#	drivers/char/agp/agpgart_be-amd.c	1.1     -> 1.2     drivers/char/agp/amd.c (moved)
#	drivers/char/agp/agpgart_be-i460.c	1.1     -> 1.2     drivers/char/agp/i460.c (moved)
#	drivers/char/agp/agpgart_be-ali.c	1.1     -> 1.2     drivers/char/agp/ali.c (moved)
#	drivers/char/agp/agpgart_be-hp.c	1.1     -> 1.2     drivers/char/agp/hp.c (moved)
#	drivers/char/agp/agpgart_be-via.c	1.1     -> 1.2     drivers/char/agp/via.c (moved)
#
# The following is the BitKeeper ChangeSet Log
# --------------------------------------------
# 02/07/15	greg@kroah.com	1.642
# agpgart: renamed the agp files to make more sense
# --------------------------------------------
#
diff -Nru a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
--- a/drivers/char/agp/Makefile	Wed Jul 17 11:29:31 2002
+++ b/drivers/char/agp/Makefile	Wed Jul 17 11:29:31 2002
@@ -3,19 +3,19 @@
 # space ioctl interface to use agp memory.  It also adds a kernel interface
 # that other drivers could use to manipulate agp memory.
 
-export-objs := agpgart_be.o
+export-objs := agp.o
 
-agpgart-y := agpgart_fe.o agpgart_be.o
+agpgart-y := agp.o frontend.o
 
-agpgart-$(CONFIG_AGP_INTEL)	+=	agpgart_be-i8x0.o
-agpgart-$(CONFIG_AGP_I810)	+=	agpgart_be-i810.o
-agpgart-$(CONFIG_AGP_VIA)	+=	agpgart_be-via.o
-agpgart-$(CONFIG_AGP_AMD)	+=	agpgart_be-amd.o
-agpgart-$(CONFIG_AGP_SIS)	+=	agpgart_be-sis.o
-agpgart-$(CONFIG_AGP_ALI)	+=	agpgart_be-ali.o
-agpgart-$(CONFIG_AGP_SWORKS)	+=	agpgart_be-sworks.o
-agpgart-$(CONFIG_AGP_I460)	+=	agpgart_be-i460.o
-agpgart-$(CONFIG_AGP_HP_ZX1)	+=	agpgart_be-hp.o
+agpgart-$(CONFIG_AGP_INTEL)	+= i8x0.o
+agpgart-$(CONFIG_AGP_I810)	+= i810.o
+agpgart-$(CONFIG_AGP_VIA)	+= via.o
+agpgart-$(CONFIG_AGP_AMD)	+= amd.o
+agpgart-$(CONFIG_AGP_SIS)	+= sis.o
+agpgart-$(CONFIG_AGP_ALI)	+= ali.o
+agpgart-$(CONFIG_AGP_SWORKS)	+= sworks.o
+agpgart-$(CONFIG_AGP_I460)	+= i460.o
+agpgart-$(CONFIG_AGP_HP_ZX1)	+= hp.o
 agpgart-objs := $(agpgart-y)
 
 obj-$(CONFIG_AGP) += agpgart.o
diff -Nru a/drivers/char/agp/agp.c b/drivers/char/agp/agp.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/drivers/char/agp/agp.c	Wed Jul 17 11:29:31 2002
@@ -0,0 +1,1662 @@
+/*
+ * AGPGART module version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO: 
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/pm.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+MODULE_AUTHOR("Jeff Hartmann <jhartmann@precisioninsight.com>");
+MODULE_PARM(agp_try_unsupported, "1i");
+MODULE_LICENSE("GPL and additional rights");
+EXPORT_SYMBOL(agp_free_memory);
+EXPORT_SYMBOL(agp_allocate_memory);
+EXPORT_SYMBOL(agp_copy_info);
+EXPORT_SYMBOL(agp_bind_memory);
+EXPORT_SYMBOL(agp_unbind_memory);
+EXPORT_SYMBOL(agp_enable);
+EXPORT_SYMBOL(agp_backend_acquire);
+EXPORT_SYMBOL(agp_backend_release);
+
+struct agp_bridge_data agp_bridge = { type: NOT_SUPPORTED };
+static int agp_try_unsupported __initdata = 0;
+
+int agp_backend_acquire(void)
+{
+	if (agp_bridge.type == NOT_SUPPORTED)
+		return -EINVAL;
+
+	atomic_inc(&agp_bridge.agp_in_use);
+
+	if (atomic_read(&agp_bridge.agp_in_use) != 1) {
+		atomic_dec(&agp_bridge.agp_in_use);
+		return -EBUSY;
+	}
+	MOD_INC_USE_COUNT;
+	return 0;
+}
+
+void agp_backend_release(void)
+{
+	if (agp_bridge.type == NOT_SUPPORTED)
+		return;
+
+	atomic_dec(&agp_bridge.agp_in_use);
+	MOD_DEC_USE_COUNT;
+}
+
+/* 
+ * Generic routines for handling agp_memory structures -
+ * They use the basic page allocation routines to do the
+ * brunt of the work.
+ */
+
+
+void agp_free_key(int key)
+{
+
+	if (key < 0)
+		return;
+
+	if (key < MAXKEY)
+		clear_bit(key, agp_bridge.key_list);
+}
+
+static int agp_get_key(void)
+{
+	int bit;
+
+	bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY);
+	if (bit < MAXKEY) {
+		set_bit(bit, agp_bridge.key_list);
+		return bit;
+	}
+	return -1;
+}
+
+agp_memory *agp_create_memory(int scratch_pages)
+{
+	agp_memory *new;
+
+	new = kmalloc(sizeof(agp_memory), GFP_KERNEL);
+
+	if (new == NULL)
+		return NULL;
+
+	memset(new, 0, sizeof(agp_memory));
+	new->key = agp_get_key();
+
+	if (new->key < 0) {
+		kfree(new);
+		return NULL;
+	}
+	new->memory = vmalloc(PAGE_SIZE * scratch_pages);
+
+	if (new->memory == NULL) {
+		agp_free_key(new->key);
+		kfree(new);
+		return NULL;
+	}
+	new->num_scratch_pages = scratch_pages;
+	return new;
+}
+
+void agp_free_memory(agp_memory * curr)
+{
+	int i;
+
+	if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL))
+		return;
+
+	if (curr->is_bound == TRUE)
+		agp_unbind_memory(curr);
+
+	if (curr->type != 0) {
+		agp_bridge.free_by_type(curr);
+		return;
+	}
+	if (curr->page_count != 0) {
+		for (i = 0; i < curr->page_count; i++) {
+			curr->memory[i] &= ~(0x00000fff);
+			agp_bridge.agp_destroy_page(phys_to_virt(curr->memory[i]));
+		}
+	}
+	agp_free_key(curr->key);
+	vfree(curr->memory);
+	kfree(curr);
+	MOD_DEC_USE_COUNT;
+}
+
+#define ENTRIES_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
+
+agp_memory *agp_allocate_memory(size_t page_count, u32 type)
+{
+	int scratch_pages;
+	agp_memory *new;
+	int i;
+
+	if (agp_bridge.type == NOT_SUPPORTED)
+		return NULL;
+
+	if ((atomic_read(&agp_bridge.current_memory_agp) + page_count) >
+	    agp_bridge.max_memory_agp) {
+		return NULL;
+	}
+
+	if (type != 0) {
+		new = agp_bridge.alloc_by_type(page_count, type);
+		return new;
+	}
+      	/* We always increase the module count, since free auto-decrements
+	 * it
+	 */
+
+	MOD_INC_USE_COUNT;
+
+	scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
+
+	new = agp_create_memory(scratch_pages);
+
+	if (new == NULL) {
+		MOD_DEC_USE_COUNT;
+		return NULL;
+	}
+
+	for (i = 0; i < page_count; i++) {
+		void *addr = agp_bridge.agp_alloc_page();
+
+		if (addr == NULL) {
+			/* Free this structure */
+			agp_free_memory(new);
+			return NULL;
+		}
+		new->memory[i] = agp_bridge.mask_memory(virt_to_phys(addr), type);
+		new->page_count++;
+	}
+
+	flush_agp_mappings();
+
+	return new;
+}
+
+/* End - Generic routines for handling agp_memory structures */
+
+static int agp_return_size(void)
+{
+	int current_size;
+	void *temp;
+
+	temp = agp_bridge.current_size;
+
+	switch (agp_bridge.size_type) {
+	case U8_APER_SIZE:
+		current_size = A_SIZE_8(temp)->size;
+		break;
+	case U16_APER_SIZE:
+		current_size = A_SIZE_16(temp)->size;
+		break;
+	case U32_APER_SIZE:
+		current_size = A_SIZE_32(temp)->size;
+		break;
+	case LVL2_APER_SIZE:
+		current_size = A_SIZE_LVL2(temp)->size;
+		break;
+	case FIXED_APER_SIZE:
+		current_size = A_SIZE_FIX(temp)->size;
+		break;
+	default:
+		current_size = 0;
+		break;
+	}
+
+	return current_size;
+}
+
+/* Routine to copy over information structure */
+
+void agp_copy_info(agp_kern_info * info)
+{
+	unsigned long page_mask = 0;
+	int i;
+
+	memset(info, 0, sizeof(agp_kern_info));
+	if (agp_bridge.type == NOT_SUPPORTED) {
+		info->chipset = agp_bridge.type;
+		return;
+	}
+	info->version.major = agp_bridge.version->major;
+	info->version.minor = agp_bridge.version->minor;
+	info->device = agp_bridge.dev;
+	info->chipset = agp_bridge.type;
+	info->mode = agp_bridge.mode;
+	info->aper_base = agp_bridge.gart_bus_addr;
+	info->aper_size = agp_return_size();
+	info->max_memory = agp_bridge.max_memory_agp;
+	info->current_memory = atomic_read(&agp_bridge.current_memory_agp);
+	info->cant_use_aperture = agp_bridge.cant_use_aperture;
+
+	for(i = 0; i < agp_bridge.num_of_masks; i++)
+		page_mask |= agp_bridge.mask_memory(page_mask, i);
+
+	info->page_mask = ~page_mask;
+}
+
+/* End - Routine to copy over information structure */
+
+/*
+ * Routines for handling swapping of agp_memory into the GATT -
+ * These routines take agp_memory and insert them into the GATT.
+ * They call device specific routines to actually write to the GATT.
+ */
+
+int agp_bind_memory(agp_memory * curr, off_t pg_start)
+{
+	int ret_val;
+
+	if ((agp_bridge.type == NOT_SUPPORTED) ||
+	    (curr == NULL) || (curr->is_bound == TRUE)) {
+		return -EINVAL;
+	}
+	if (curr->is_flushed == FALSE) {
+		CACHE_FLUSH();
+		curr->is_flushed = TRUE;
+	}
+	ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type);
+
+	if (ret_val != 0)
+		return ret_val;
+
+	curr->is_bound = TRUE;
+	curr->pg_start = pg_start;
+	return 0;
+}
+
+int agp_unbind_memory(agp_memory * curr)
+{
+	int ret_val;
+
+	if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL))
+		return -EINVAL;
+
+	if (curr->is_bound != TRUE)
+		return -EINVAL;
+
+	ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type);
+
+	if (ret_val != 0)
+		return ret_val;
+
+	curr->is_bound = FALSE;
+	curr->pg_start = 0;
+	return 0;
+}
+
+/* End - Routines for handling swapping of agp_memory into the GATT */
+
+/* 
+ * Driver routines - start
+ * Currently this module supports the following chipsets:
+ * i810, i815, 440lx, 440bx, 440gx, i830, i840, i845, i850, i860, via vp3,
+ * via mvp3, via kx133, via kt133, amd irongate, amd 761, amd 762, ALi M1541,
+ * and generic support for the SiS chipsets.
+ */
+
+/* Generic Agp routines - Start */
+
+void agp_generic_agp_enable(u32 mode)
+{
+	struct pci_dev *device = NULL;
+	u32 command, scratch; 
+	u8 cap_ptr;
+
+	pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 4, &command);
+
+	/*
+	 * PASS1: go throu all devices that claim to be
+	 *        AGP devices and collect their data.
+	 */
+
+
+	pci_for_each_dev(device) {
+		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
+		if (cap_ptr != 0x00) {
+			/*
+			 * Ok, here we have a AGP device. Disable impossible 
+			 * settings, and adjust the readqueue to the minimum.
+			 */
+
+			pci_read_config_dword(device, cap_ptr + 4, &scratch);
+
+			/* adjust RQ depth */
+			command = ((command & ~0xff000000) |
+			     min_t(u32, (mode & 0xff000000),
+				 min_t(u32, (command & 0xff000000),
+				     (scratch & 0xff000000))));
+
+			/* disable SBA if it's not supported */
+			if (!((command & 0x00000200) &&
+			      (scratch & 0x00000200) &&
+			      (mode & 0x00000200)))
+				command &= ~0x00000200;
+
+			/* disable FW if it's not supported */
+			if (!((command & 0x00000010) &&
+			      (scratch & 0x00000010) &&
+			      (mode & 0x00000010)))
+				command &= ~0x00000010;
+
+			if (!((command & 4) &&
+			      (scratch & 4) &&
+			      (mode & 4)))
+				command &= ~0x00000004;
+
+			if (!((command & 2) &&
+			      (scratch & 2) &&
+			      (mode & 2)))
+				command &= ~0x00000002;
+
+			if (!((command & 1) &&
+			      (scratch & 1) &&
+			      (mode & 1)))
+				command &= ~0x00000001;
+		}
+	}
+	/*
+	 * PASS2: Figure out the 4X/2X/1X setting and enable the
+	 *        target (our motherboard chipset).
+	 */
+
+	if (command & 4)
+		command &= ~3;	/* 4X */
+
+	if (command & 2)
+		command &= ~5;	/* 2X */
+
+	if (command & 1)
+		command &= ~6;	/* 1X */
+
+	command |= 0x00000100;
+
+	pci_write_config_dword(agp_bridge.dev,
+			       agp_bridge.capndx + 8,
+			       command);
+
+	/*
+	 * PASS3: Go throu all AGP devices and update the
+	 *        command registers.
+	 */
+
+	pci_for_each_dev(device) {
+		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
+		if (cap_ptr != 0x00)
+			pci_write_config_dword(device, cap_ptr + 8, command);
+	}
+}
+
+int agp_generic_create_gatt_table(void)
+{
+	char *table;
+	char *table_end;
+	int size;
+	int page_order;
+	int num_entries;
+	int i;
+	void *temp;
+	struct page *page;
+
+	/* The generic routines can't handle 2 level gatt's */
+	if (agp_bridge.size_type == LVL2_APER_SIZE) {
+		return -EINVAL;
+	}
+
+	table = NULL;
+	i = agp_bridge.aperture_size_idx;
+	temp = agp_bridge.current_size;
+	size = page_order = num_entries = 0;
+
+	if (agp_bridge.size_type != FIXED_APER_SIZE) {
+		do {
+			switch (agp_bridge.size_type) {
+			case U8_APER_SIZE:
+				size = A_SIZE_8(temp)->size;
+				page_order =
+				    A_SIZE_8(temp)->page_order;
+				num_entries =
+				    A_SIZE_8(temp)->num_entries;
+				break;
+			case U16_APER_SIZE:
+				size = A_SIZE_16(temp)->size;
+				page_order = A_SIZE_16(temp)->page_order;
+				num_entries = A_SIZE_16(temp)->num_entries;
+				break;
+			case U32_APER_SIZE:
+				size = A_SIZE_32(temp)->size;
+				page_order = A_SIZE_32(temp)->page_order;
+				num_entries = A_SIZE_32(temp)->num_entries;
+				break;
+				/* This case will never really happen. */
+			case FIXED_APER_SIZE:
+			case LVL2_APER_SIZE:
+			default:
+				size = page_order = num_entries = 0;
+				break;
+			}
+
+			table = (char *) __get_free_pages(GFP_KERNEL,
+							  page_order);
+
+			if (table == NULL) {
+				i++;
+				switch (agp_bridge.size_type) {
+				case U8_APER_SIZE:
+					agp_bridge.current_size = A_IDX8();
+					break;
+				case U16_APER_SIZE:
+					agp_bridge.current_size = A_IDX16();
+					break;
+				case U32_APER_SIZE:
+					agp_bridge.current_size = A_IDX32();
+					break;
+					/* This case will never really 
+					 * happen. 
+					 */
+				case FIXED_APER_SIZE:
+				case LVL2_APER_SIZE:
+				default:
+					agp_bridge.current_size =
+					    agp_bridge.current_size;
+					break;
+				}
+				temp = agp_bridge.current_size;	
+			} else {
+				agp_bridge.aperture_size_idx = i;
+			}
+		} while ((table == NULL) &&
+			 (i < agp_bridge.num_aperture_sizes));
+	} else {
+		size = ((struct aper_size_info_fixed *) temp)->size;
+		page_order = ((struct aper_size_info_fixed *) temp)->page_order;
+		num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
+		table = (char *) __get_free_pages(GFP_KERNEL, page_order);
+	}
+
+	if (table == NULL)
+		return -ENOMEM;
+
+	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+		SetPageReserved(page);
+
+	agp_bridge.gatt_table_real = (unsigned long *) table;
+	CACHE_FLUSH();
+	agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
+					(PAGE_SIZE * (1 << page_order)));
+	CACHE_FLUSH();
+
+	if (agp_bridge.gatt_table == NULL) {
+		for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+			ClearPageReserved(page);
+
+		free_pages((unsigned long) table, page_order);
+
+		return -ENOMEM;
+	}
+	agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real);
+
+	for (i = 0; i < num_entries; i++)
+		agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page;
+
+	return 0;
+}
+
+int agp_generic_suspend(void)
+{
+	return 0;
+}
+
+void agp_generic_resume(void)
+{
+	return;
+}
+
+int agp_generic_free_gatt_table(void)
+{
+	int page_order;
+	char *table, *table_end;
+	void *temp;
+	struct page *page;
+
+	temp = agp_bridge.current_size;
+
+	switch (agp_bridge.size_type) {
+	case U8_APER_SIZE:
+		page_order = A_SIZE_8(temp)->page_order;
+		break;
+	case U16_APER_SIZE:
+		page_order = A_SIZE_16(temp)->page_order;
+		break;
+	case U32_APER_SIZE:
+		page_order = A_SIZE_32(temp)->page_order;
+		break;
+	case FIXED_APER_SIZE:
+		page_order = A_SIZE_FIX(temp)->page_order;
+		break;
+	case LVL2_APER_SIZE:
+		/* The generic routines can't deal with 2 level gatt's */
+		return -EINVAL;
+		break;
+	default:
+		page_order = 0;
+		break;
+	}
+
+	/* Do not worry about freeing memory, because if this is
+	 * called, then all agp memory is deallocated and removed
+	 * from the table.
+	 */
+
+	iounmap(agp_bridge.gatt_table);
+	table = (char *) agp_bridge.gatt_table_real;
+	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+		ClearPageReserved(page);
+
+	free_pages((unsigned long) agp_bridge.gatt_table_real, page_order);
+	return 0;
+}
+
+int agp_generic_insert_memory(agp_memory * mem, off_t pg_start, int type)
+{
+	int i, j, num_entries;
+	void *temp;
+
+	temp = agp_bridge.current_size;
+
+	switch (agp_bridge.size_type) {
+	case U8_APER_SIZE:
+		num_entries = A_SIZE_8(temp)->num_entries;
+		break;
+	case U16_APER_SIZE:
+		num_entries = A_SIZE_16(temp)->num_entries;
+		break;
+	case U32_APER_SIZE:
+		num_entries = A_SIZE_32(temp)->num_entries;
+		break;
+	case FIXED_APER_SIZE:
+		num_entries = A_SIZE_FIX(temp)->num_entries;
+		break;
+	case LVL2_APER_SIZE:
+		/* The generic routines can't deal with 2 level gatt's */
+		return -EINVAL;
+		break;
+	default:
+		num_entries = 0;
+		break;
+	}
+
+	if (type != 0 || mem->type != 0) {
+		/* The generic routines know nothing of memory types */
+		return -EINVAL;
+	}
+
+	if ((pg_start + mem->page_count) > num_entries)
+		return -EINVAL;
+
+	j = pg_start;
+
+	while (j < (pg_start + mem->page_count)) {
+		if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
+			return -EBUSY;
+		}
+		j++;
+	}
+
+	if (mem->is_flushed == FALSE) {
+		CACHE_FLUSH();
+		mem->is_flushed = TRUE;
+	}
+
+	for (i = 0, j = pg_start; i < mem->page_count; i++, j++)
+		agp_bridge.gatt_table[j] = mem->memory[i];
+
+	agp_bridge.tlb_flush(mem);
+	return 0;
+}
+
+int agp_generic_remove_memory(agp_memory * mem, off_t pg_start, int type)
+{
+	int i;
+
+	if (type != 0 || mem->type != 0) {
+		/* The generic routines know nothing of memory types */
+		return -EINVAL;
+	}
+	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+		agp_bridge.gatt_table[i] =
+		    (unsigned long) agp_bridge.scratch_page;
+	}
+
+	agp_bridge.tlb_flush(mem);
+	return 0;
+}
+
+agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
+{
+	return NULL;
+}
+
+void agp_generic_free_by_type(agp_memory * curr)
+{
+	if (curr->memory != NULL)
+		vfree(curr->memory);
+
+	agp_free_key(curr->key);
+	kfree(curr);
+}
+
+/* 
+ * Basic Page Allocation Routines -
+ * These routines handle page allocation
+ * and by default they reserve the allocated 
+ * memory.  They also handle incrementing the
+ * current_memory_agp value, Which is checked
+ * against a maximum value.
+ */
+
+void *agp_generic_alloc_page(void)
+{
+	struct page * page;
+	
+	page = alloc_page(GFP_KERNEL);
+	if (page == NULL)
+		return 0;
+
+	map_page_into_agp(page);
+
+	get_page(page);
+	SetPageLocked(page);
+	atomic_inc(&agp_bridge.current_memory_agp);
+	return page_address(page);
+}
+
+void agp_generic_destroy_page(void *addr)
+{
+	struct page *page;
+
+	if (addr == NULL)
+		return;
+
+	page = virt_to_page(addr);
+	unmap_page_from_agp(page);
+	put_page(page);
+	unlock_page(page);
+	free_page((unsigned long)addr);
+	atomic_dec(&agp_bridge.current_memory_agp);
+}
+
+/* End Basic Page Allocation Routines */
+
+void agp_enable(u32 mode)
+{
+	if (agp_bridge.type == NOT_SUPPORTED)
+		return;
+	agp_bridge.agp_enable(mode);
+}
+
+/* End - Generic Agp routines */
+
+
+/* per-chipset initialization data.
+ * note -- all chipsets for a single vendor MUST be grouped together
+ */
+static struct {
+	unsigned short device_id; /* first, to make table easier to read */
+	unsigned short vendor_id;
+	enum chipset_type chipset;
+	const char *vendor_name;
+	const char *chipset_name;
+	int (*chipset_setup) (struct pci_dev *pdev);
+} agp_bridge_info[] __initdata = {
+
+#ifdef CONFIG_AGP_ALI
+	{
+		device_id:	PCI_DEVICE_ID_AL_M1541_0,
+		vendor_id:	PCI_VENDOR_ID_AL,
+		chipset:	ALI_M1541,
+		vendor_name:	"Ali",
+		chipset_name:	"M1541",
+		chipset_setup:	ali_generic_setup,
+	},
+	{
+		device_id:	PCI_DEVICE_ID_AL_M1621_0,
+		vendor_id:	PCI_VENDOR_ID_AL,
+		chipset:	ALI_M1621,
+		vendor_name:	"Ali",
+		chipset_name:	"M1621",
+		chipset_setup:	ali_generic_setup,
+	},
+	{
+		device_id:	PCI_DEVICE_ID_AL_M1631_0,
+		vendor_id:	PCI_VENDOR_ID_AL,
+		chipset:	ALI_M1631,
+		vendor_name:	"Ali",
+		chipset_name:	"M1631",
+		chipset_setup:	ali_generic_setup,
+	},
+	{
+		device_id:	PCI_DEVICE_ID_AL_M1632_0,
+		vendor_id:	PCI_VENDOR_ID_AL,
+		chipset:	ALI_M1632,
+		vendor_name:	"Ali",
+		chipset_name:	"M1632",
+		chipset_setup:	ali_generic_setup,
+	},
+	{
+		device_id:	PCI_DEVICE_ID_AL_M1641_0,
+		vendor_id:	PCI_VENDOR_ID_AL,
+		chipset:	ALI_M1641,
+		vendor_name:	"Ali",
+		chipset_name:	"M1641",
+		chipset_setup:	ali_generic_setup,
+	},
+	{
+		device_id:	PCI_DEVICE_ID_AL_M1644_0,
+		vendor_id:	PCI_VENDOR_ID_AL,
+		chipset:	ALI_M1644,
+		vendor_name:	"Ali",
+		chipset_name:	"M1644",
+		chipset_setup:	ali_generic_setup,
+	},
+	{
+		device_id:	PCI_DEVICE_ID_AL_M1647_0,
+		vendor_id:	PCI_VENDOR_ID_AL,
+		chipset:	ALI_M1647,
+		vendor_name:	"Ali",
+		chipset_name:	"M1647",
+		chipset_setup:	ali_generic_setup,
+	},
+	{
+		device_id:	PCI_DEVICE_ID_AL_M1651_0,
+		vendor_id:	PCI_VENDOR_ID_AL,
+		chipset:	ALI_M1651,
+		vendor_name:	"Ali",
+		chipset_name:	"M1651",
+		chipset_setup:	ali_generic_setup,
+	},
+	{
+		device_id:	0,
+		vendor_id:	PCI_VENDOR_ID_AL,
+		chipset:	ALI_GENERIC,
+		vendor_name:	"Ali",
+		chipset_name:	"Generic",
+		chipset_setup:	ali_generic_setup,
+	},
+#endif /* CONFIG_AGP_ALI */
+
+#ifdef CONFIG_AGP_AMD
+	{
+		device_id:	PCI_DEVICE_ID_AMD_IRONGATE_0,
+		vendor_id:	PCI_VENDOR_ID_AMD,
+		chipset:	AMD_IRONGATE,
+		vendor_name:	"AMD",
+		chipset_name:	"Irongate",
+		chipset_setup:	amd_irongate_setup,
+	},
+	{
+		device_id:	PCI_DEVICE_ID_AMD_761_0,
+		vendor_id:	PCI_VENDOR_ID_AMD,
+		chipset:	AMD_761,
+		vendor_name:	"AMD",
+		chipset_name:	"761",
+		chipset_setup:	amd_irongate_setup,
+	},
+	{
+		device_id:	PCI_DEVICE_ID_AMD_762_0,
+		vendor_id:	PCI_VENDOR_ID_AMD,
+		chipset:	AMD_762,
+		vendor_name:	"AMD",
+		chipset_name:	"760MP",
+		chipset_setup:	amd_irongate_setup,
+	},
+	{
+		device_id:	0,
+		vendor_id:	PCI_VENDOR_ID_AMD,
+		chipset:	AMD_GENERIC,
+		vendor_name:	"AMD",
+		chipset_name:	"Generic",
+		chipset_setup:	amd_irongate_setup,
+	},
+#endif /* CONFIG_AGP_AMD */
+
+#ifdef CONFIG_AGP_INTEL
+	{
+		device_id:	PCI_DEVICE_ID_INTEL_82443LX_0,
+		vendor_id:	PCI_VENDOR_ID_INTEL,
+		chipset:	INTEL_LX,
+		vendor_name:	"Intel",
+		chipset_name:	"440LX",
+		chipset_setup:	intel_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_INTEL_82443BX_0,
+		vendor_id:	PCI_VENDOR_ID_INTEL,
+		chipset:	INTEL_BX,
+		vendor_name:	"Intel",
+		chipset_name:	"440BX",
+		chipset_setup:	intel_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_INTEL_82443GX_0,
+		vendor_id:	PCI_VENDOR_ID_INTEL,
+		chipset:	INTEL_GX,
+		vendor_name:	"Intel",
+		chipset_name:	"440GX",
+		chipset_setup:	intel_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_INTEL_815_0,
+		vendor_id:	PCI_VENDOR_ID_INTEL,
+		chipset:	INTEL_I815,
+		vendor_name:	"Intel",
+		chipset_name:	"i815",
+		chipset_setup:	intel_815_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_INTEL_820_0,
+		vendor_id:	PCI_VENDOR_ID_INTEL,
+		chipset:	INTEL_I820,
+		vendor_name:	"Intel",
+		chipset_name:	"i820",
+		chipset_setup:	intel_820_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_INTEL_820_UP_0,
+		vendor_id:	PCI_VENDOR_ID_INTEL,
+		chipset:	INTEL_I820,
+		vendor_name:	"Intel",
+		chipset_name:	"i820",
+		chipset_setup:	intel_820_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_INTEL_830_M_0,
+		vendor_id:	PCI_VENDOR_ID_INTEL,
+		chipset:	INTEL_I830_M,
+		vendor_name:	"Intel",
+		chipset_name:	"i830M",
+		chipset_setup:	intel_830mp_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_INTEL_845_G_0,
+		vendor_id:	PCI_VENDOR_ID_INTEL,
+		chipset:	INTEL_I845_G,
+		vendor_name:	"Intel",
+		chipset_name:	"i845G",
+		chipset_setup:	intel_830mp_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_INTEL_840_0,
+		vendor_id:	PCI_VENDOR_ID_INTEL,
+		chipset:	INTEL_I840,
+		vendor_name:	"Intel",
+		chipset_name:	"i840",
+		chipset_setup:	intel_840_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_INTEL_845_0,
+		vendor_id:	PCI_VENDOR_ID_INTEL,
+		chipset:	INTEL_I845,
+		vendor_name:	"Intel",
+		chipset_name:	"i845",
+		chipset_setup:	intel_845_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_INTEL_850_0,
+		vendor_id:	PCI_VENDOR_ID_INTEL,
+		chipset:	INTEL_I850,
+		vendor_name:	"Intel",
+		chipset_name:	"i850",
+		chipset_setup:	intel_850_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_INTEL_860_0,
+		vendor_id:	PCI_VENDOR_ID_INTEL,
+		chipset:	INTEL_I860,
+		vendor_name:	"Intel",
+		chipset_name:	"i860",
+		chipset_setup:	intel_860_setup
+	},
+	{
+		device_id:	0,
+		vendor_id:	PCI_VENDOR_ID_INTEL,
+		chipset:	INTEL_GENERIC,
+		vendor_name:	"Intel",
+		chipset_name:	"Generic",
+		chipset_setup:	intel_generic_setup
+	},
+
+#endif /* CONFIG_AGP_INTEL */
+
+#ifdef CONFIG_AGP_SIS
+	{
+		device_id:	PCI_DEVICE_ID_SI_740,
+		vendor_id:	PCI_VENDOR_ID_SI,
+		chipset:	SIS_GENERIC,
+		vendor_name:	"SiS",
+		chipset_name:	"740",
+		chipset_setup:	sis_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_SI_650,
+		vendor_id:	PCI_VENDOR_ID_SI,
+		chipset:	SIS_GENERIC,
+		vendor_name:	"SiS",
+		chipset_name:	"650",
+		chipset_setup:	sis_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_SI_645,
+		vendor_id:	PCI_VENDOR_ID_SI,
+		chipset:	SIS_GENERIC,
+		vendor_name:	"SiS",
+		chipset_name:	"645",
+		chipset_setup:	sis_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_SI_735,
+		vendor_id:	PCI_VENDOR_ID_SI,
+		chipset:	SIS_GENERIC,
+		vendor_name:	"SiS",
+		chipset_name:	"735",
+		chipset_setup:	sis_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_SI_745,
+		vendor_id:	PCI_VENDOR_ID_SI,
+		chipset:	SIS_GENERIC,
+		vendor_name:	"SiS",
+		chipset_name:	"745",
+		chipset_setup:	sis_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_SI_730,
+		vendor_id:	PCI_VENDOR_ID_SI,
+		chipset:	SIS_GENERIC,
+		vendor_name:	"SiS",
+		chipset_name:	"730",
+		chipset_setup:	sis_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_SI_630,
+		vendor_id:	PCI_VENDOR_ID_SI,
+		chipset:	SIS_GENERIC,
+		vendor_name:	"SiS",
+		chipset_name:	"630",
+		chipset_setup:	sis_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_SI_540,
+		vendor_id:	PCI_VENDOR_ID_SI,
+		chipset:	SIS_GENERIC,
+		vendor_name:	"SiS",
+		chipset_name:	"540",
+		chipset_setup:	sis_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_SI_620,
+		vendor_id:	PCI_VENDOR_ID_SI,
+		chipset:	SIS_GENERIC,
+		vendor_name:	"SiS",
+		chipset_name:	"620",
+		chipset_setup:	sis_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_SI_530,
+		vendor_id:	PCI_VENDOR_ID_SI,
+		chipset:	SIS_GENERIC,
+		vendor_name:	"SiS",
+		chipset_name:	"530",
+		chipset_setup:	sis_generic_setup
+	},
+        {
+		device_id:	PCI_DEVICE_ID_SI_550,
+		vendor_id:	PCI_VENDOR_ID_SI,
+		chipset:	SIS_GENERIC,
+		vendor_name:	"SiS",
+		chipset_name:	"550",
+		chipset_setup:	sis_generic_setup
+	},
+	{
+		device_id:	0,
+		vendor_id:	PCI_VENDOR_ID_SI,
+		chipset:	SIS_GENERIC,
+		vendor_name:	"SiS",
+		chipset_name:	"Generic",
+		chipset_setup:	sis_generic_setup
+	},
+#endif /* CONFIG_AGP_SIS */
+
+#ifdef CONFIG_AGP_VIA
+	{
+		device_id:	PCI_DEVICE_ID_VIA_8501_0,
+		vendor_id:	PCI_VENDOR_ID_VIA,
+		chipset:	VIA_MVP4,
+		vendor_name:	"Via",
+		chipset_name:	"MVP4",
+		chipset_setup:	via_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_VIA_82C597_0,
+		vendor_id:	PCI_VENDOR_ID_VIA,
+		chipset:	VIA_VP3,
+		vendor_name:	"Via",
+		chipset_name:	"VP3",
+		chipset_setup:	via_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_VIA_82C598_0,
+		vendor_id:	PCI_VENDOR_ID_VIA,
+		chipset:	VIA_MVP3,
+		vendor_name:	"Via",
+		chipset_name:	"MVP3",
+		chipset_setup:	via_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_VIA_82C691_0,
+		vendor_id:	PCI_VENDOR_ID_VIA,
+		chipset:	VIA_APOLLO_PRO,
+		vendor_name:	"Via",
+		chipset_name:	"Apollo Pro",
+		chipset_setup:	via_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_VIA_8371_0,
+		vendor_id:	PCI_VENDOR_ID_VIA,
+		chipset:	VIA_APOLLO_KX133,
+		vendor_name:	"Via",
+		chipset_name:	"Apollo Pro KX133",
+		chipset_setup:	via_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_VIA_8363_0,
+		vendor_id:	PCI_VENDOR_ID_VIA,
+		chipset:	VIA_APOLLO_KT133,
+		vendor_name:	"Via",
+		chipset_name:	"Apollo Pro KT133",
+		chipset_setup:	via_generic_setup
+	},
+	{
+		device_id:	PCI_DEVICE_ID_VIA_8367_0,
+		vendor_id:	PCI_VENDOR_ID_VIA,
+		chipset:	VIA_APOLLO_KT133,
+		vendor_name:	"Via",
+		chipset_name:	"Apollo Pro KT266",
+		chipset_setup:	via_generic_setup
+	},
+	{
+		device_id:	0,
+		vendor_id:	PCI_VENDOR_ID_VIA,
+		chipset:	VIA_GENERIC,
+		vendor_name:	"Via",
+		chipset_name:	"Generic",
+		chipset_setup:	via_generic_setup
+	},
+#endif /* CONFIG_AGP_VIA */
+
+#ifdef CONFIG_AGP_HP_ZX1
+	{
+		device_id:	PCI_DEVICE_ID_HP_ZX1_LBA,
+		vendor_id:	PCI_VENDOR_ID_HP,
+		chipset:	HP_ZX1,
+		vendor_name:	"HP",
+		chipset_name:	"ZX1",
+		chipset_setup:	hp_zx1_setup
+	},
+#endif
+
+	{ }, /* dummy final entry, always present */
+};
+
+
+/* scan table above for supported devices */
+static int __init agp_lookup_host_bridge (struct pci_dev *pdev)
+{
+	int i;
+	
+	for (i = 0; i < ARRAY_SIZE (agp_bridge_info); i++)
+		if (pdev->vendor == agp_bridge_info[i].vendor_id)
+			break;
+
+	if (i >= ARRAY_SIZE (agp_bridge_info)) {
+		printk (KERN_DEBUG PFX "unsupported bridge\n");
+		return -ENODEV;
+	}
+
+	while ((i < ARRAY_SIZE (agp_bridge_info)) &&
+	       (agp_bridge_info[i].vendor_id == pdev->vendor)) {
+		if (pdev->device == agp_bridge_info[i].device_id) {
+#ifdef CONFIG_AGP_ALI
+			if (pdev->device == PCI_DEVICE_ID_AL_M1621_0) {
+				u8 hidden_1621_id;
+
+				pci_read_config_byte(pdev, 0xFB, &hidden_1621_id);
+				switch (hidden_1621_id) {
+				case 0x31:
+					agp_bridge_info[i].chipset_name="M1631";
+					break;
+				case 0x32:
+					agp_bridge_info[i].chipset_name="M1632";
+					break;
+				case 0x41:
+					agp_bridge_info[i].chipset_name="M1641";
+					break;
+				case 0x43:
+					break;
+				case 0x47:
+					agp_bridge_info[i].chipset_name="M1647";
+					break;
+				case 0x51:
+					agp_bridge_info[i].chipset_name="M1651";
+					break;
+				default:
+					break;
+				}
+			}
+#endif
+
+			printk (KERN_INFO PFX "Detected %s %s chipset\n",
+				agp_bridge_info[i].vendor_name,
+				agp_bridge_info[i].chipset_name);
+			agp_bridge.type = agp_bridge_info[i].chipset;
+			return agp_bridge_info[i].chipset_setup (pdev);
+		}
+		
+		i++;
+	}
+
+	i--; /* point to vendor generic entry (device_id == 0) */
+
+	/* try init anyway, if user requests it AND
+	 * there is a 'generic' bridge entry for this vendor */
+	if (agp_try_unsupported && agp_bridge_info[i].device_id == 0) {
+		printk(KERN_WARNING PFX "Trying generic %s routines"
+		       " for device id: %04x\n",
+		       agp_bridge_info[i].vendor_name, pdev->device);
+		agp_bridge.type = agp_bridge_info[i].chipset;
+		return agp_bridge_info[i].chipset_setup (pdev);
+	}
+
+	printk(KERN_ERR PFX "Unsupported %s chipset (device id: %04x),"
+	       " you might want to try agp_try_unsupported=1.\n",
+	       agp_bridge_info[i].vendor_name, pdev->device);
+	return -ENODEV;
+}
+
+
+/* Supported Device Scanning routine */
+
+static int __init agp_find_supported_device(struct pci_dev *dev)
+{
+	u8 cap_ptr = 0x00;
+
+	agp_bridge.dev = dev;
+
+	/* Need to test for I810 here */
+#ifdef CONFIG_AGP_I810
+	if (dev->vendor == PCI_VENDOR_ID_INTEL) {
+		struct pci_dev *i810_dev;
+
+		switch (dev->device) {
+		case PCI_DEVICE_ID_INTEL_810_0:
+			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+					       PCI_DEVICE_ID_INTEL_810_1,
+						   NULL);
+			if (i810_dev == NULL) {
+				printk(KERN_ERR PFX "Detected an Intel i810,"
+				       " but could not find the secondary"
+				       " device.\n");
+				return -ENODEV;
+			}
+			printk(KERN_INFO PFX "Detected an Intel "
+			       "i810 Chipset.\n");
+			agp_bridge.type = INTEL_I810;
+			return intel_i810_setup (i810_dev);
+
+		case PCI_DEVICE_ID_INTEL_810_DC100_0:
+			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+					 PCI_DEVICE_ID_INTEL_810_DC100_1,
+						   NULL);
+			if (i810_dev == NULL) {
+				printk(KERN_ERR PFX "Detected an Intel i810 "
+				       "DC100, but could not find the "
+				       "secondary device.\n");
+				return -ENODEV;
+			}
+			printk(KERN_INFO PFX "Detected an Intel i810 "
+			       "DC100 Chipset.\n");
+			agp_bridge.type = INTEL_I810;
+			return intel_i810_setup(i810_dev);
+
+		case PCI_DEVICE_ID_INTEL_810_E_0:
+			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+					     PCI_DEVICE_ID_INTEL_810_E_1,
+						   NULL);
+			if (i810_dev == NULL) {
+				printk(KERN_ERR PFX "Detected an Intel i810 E"
+				    ", but could not find the secondary "
+				       "device.\n");
+				return -ENODEV;
+			}
+			printk(KERN_INFO PFX "Detected an Intel i810 E "
+			       "Chipset.\n");
+			agp_bridge.type = INTEL_I810;
+			return intel_i810_setup(i810_dev);
+
+		 case PCI_DEVICE_ID_INTEL_815_0:
+		   /* The i815 can operate either as an i810 style
+		    * integrated device, or as an AGP4X motherboard.
+		    *
+		    * This only addresses the first mode:
+		    */
+			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+						   PCI_DEVICE_ID_INTEL_815_1,
+						   NULL);
+			if (i810_dev == NULL) {
+				printk(KERN_ERR PFX "agpgart: Detected an "
+					"Intel i815, but could not find the"
+					" secondary device. Assuming a "
+					"non-integrated video card.\n");
+				break;
+			}
+			printk(KERN_INFO PFX "agpgart: Detected an Intel i815 "
+				"Chipset.\n");
+			agp_bridge.type = INTEL_I810;
+			return intel_i810_setup(i810_dev);
+
+		case PCI_DEVICE_ID_INTEL_845_G_0:
+			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+					PCI_DEVICE_ID_INTEL_845_G_1, NULL);
+			if(i810_dev && PCI_FUNC(i810_dev->devfn) != 0) {
+				i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+					PCI_DEVICE_ID_INTEL_845_G_1, i810_dev);
+			}
+
+			if (i810_dev == NULL) {
+				/* 
+				 * We probably have a I845MP chipset
+				 * with an external graphics
+				 * card. It will be initialized later 
+				 */
+				agp_bridge.type = INTEL_I845_G;
+				break;
+			}
+			printk(KERN_INFO PFX "Detected an Intel "
+				   "845G Chipset.\n");
+			agp_bridge.type = INTEL_I810;
+			return intel_i830_setup(i810_dev);
+		   
+		case PCI_DEVICE_ID_INTEL_830_M_0:
+			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+						   PCI_DEVICE_ID_INTEL_830_M_1,
+						   NULL);
+			if(i810_dev && PCI_FUNC(i810_dev->devfn) != 0) {
+				i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+							   PCI_DEVICE_ID_INTEL_830_M_1,
+							   i810_dev);
+			}
+
+			if (i810_dev == NULL) {
+				/* Intel 830MP with external graphic card */
+				/* It will be initialized later */
+				agp_bridge.type = INTEL_I830_M;
+				break;
+			}
+			printk(KERN_INFO PFX "Detected an Intel "
+				   "830M Chipset.\n");
+			agp_bridge.type = INTEL_I810;
+			return intel_i830_setup(i810_dev);
+		default:
+			break;
+		}
+	}
+#endif /* CONFIG_AGP_I810 */
+
+#ifdef CONFIG_AGP_SWORKS
+	/* Everything is on func 1 here so we are hardcoding function one */
+	if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS) {
+		struct pci_dev *bridge_dev;
+
+		bridge_dev = pci_find_slot ((unsigned int)dev->bus->number, 
+					    PCI_DEVFN(0, 1));
+		if(bridge_dev == NULL) {
+			printk(KERN_INFO PFX "agpgart: Detected a Serverworks "
+			       "Chipset, but could not find the secondary "
+			       "device.\n");
+			return -ENODEV;
+		}
+
+		switch (dev->device) {
+		case PCI_DEVICE_ID_SERVERWORKS_HE:
+			agp_bridge.type = SVWRKS_HE;
+			return serverworks_setup(bridge_dev);
+
+		case PCI_DEVICE_ID_SERVERWORKS_LE:
+		case 0x0007:
+			agp_bridge.type = SVWRKS_LE;
+			return serverworks_setup(bridge_dev);
+
+		default:
+			if(agp_try_unsupported) {
+				agp_bridge.type = SVWRKS_GENERIC;
+				return serverworks_setup(bridge_dev);
+			}
+			break;
+		}
+	}
+
+#endif /* CONFIG_AGP_SWORKS */
+
+#ifdef CONFIG_AGP_HP_ZX1
+	if (dev->vendor == PCI_VENDOR_ID_HP) {
+		/* ZX1 LBAs can be either PCI or AGP bridges */
+		if (pci_find_capability(dev, PCI_CAP_ID_AGP)) {
+			printk(KERN_INFO PFX "Detected HP ZX1 AGP "
+			       "chipset at %s\n", dev->slot_name);
+			agp_bridge.type = HP_ZX1;
+			agp_bridge.dev = dev;
+			return hp_zx1_setup(dev);
+		}
+		return -ENODEV;
+	}
+#endif /* CONFIG_AGP_HP_ZX1 */
+
+	/* find capndx */
+	cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP);
+	if (cap_ptr == 0x00)
+		return -ENODEV;
+	agp_bridge.capndx = cap_ptr;
+
+	/* Fill in the mode register */
+	pci_read_config_dword(agp_bridge.dev,
+			      agp_bridge.capndx + 4,
+			      &agp_bridge.mode);
+
+	/* probe for known chipsets */
+	return agp_lookup_host_bridge (dev);
+}
+
+struct agp_max_table {
+	int mem;
+	int agp;
+};
+
+static struct agp_max_table maxes_table[9] __initdata =
+{
+	{0, 0},
+	{32, 4},
+	{64, 28},
+	{128, 96},
+	{256, 204},
+	{512, 440},
+	{1024, 942},
+	{2048, 1920},
+	{4096, 3932}
+};
+
+static int __init agp_find_max (void)
+{
+	long memory, index, result;
+
+	memory = virt_to_phys(high_memory) >> 20;
+	index = 1;
+
+	while ((memory > maxes_table[index].mem) &&
+	       (index < 8)) {
+		index++;
+	}
+
+	result = maxes_table[index - 1].agp +
+	   ( (memory - maxes_table[index - 1].mem)  *
+	     (maxes_table[index].agp - maxes_table[index - 1].agp)) /
+	   (maxes_table[index].mem - maxes_table[index - 1].mem);
+
+	printk(KERN_INFO PFX "Maximum main memory to use "
+	       "for agp memory: %ldM\n", result);
+	result = result << (20 - PAGE_SHIFT);
+        return result;
+}
+
+#define AGPGART_VERSION_MAJOR 0
+#define AGPGART_VERSION_MINOR 99
+
+static struct agp_version agp_current_version =
+{
+	major:	AGPGART_VERSION_MAJOR,
+	minor:	AGPGART_VERSION_MINOR,
+};
+
+static int __init agp_backend_initialize(struct pci_dev *dev)
+{
+	int size_value, rc, got_gatt=0, got_keylist=0;
+
+	memset(&agp_bridge, 0, sizeof(struct agp_bridge_data));
+	agp_bridge.type = NOT_SUPPORTED;
+	agp_bridge.max_memory_agp = agp_find_max();
+	agp_bridge.version = &agp_current_version;
+
+	rc = agp_find_supported_device(dev);
+	if (rc) {
+		/* not KERN_ERR because error msg should have already printed */
+		printk(KERN_DEBUG PFX "no supported devices found.\n");
+		return rc;
+	}
+
+	if (agp_bridge.needs_scratch_page == TRUE) {
+		void *addr;
+		addr = agp_bridge.agp_alloc_page();
+
+		if (addr == NULL) {
+			printk(KERN_ERR PFX "unable to get memory for "
+			       "scratch page.\n");
+			return -ENOMEM;
+		}
+		agp_bridge.scratch_page = virt_to_phys(addr);
+		agp_bridge.scratch_page =
+		    agp_bridge.mask_memory(agp_bridge.scratch_page, 0);
+	}
+
+	size_value = agp_bridge.fetch_size();
+
+	if (size_value == 0) {
+		printk(KERN_ERR PFX "unable to determine aperture size.\n");
+		rc = -EINVAL;
+		goto err_out;
+	}
+	if (agp_bridge.create_gatt_table()) {
+		printk(KERN_ERR PFX "unable to get memory for graphics "
+		       "translation table.\n");
+		rc = -ENOMEM;
+		goto err_out;
+	}
+	got_gatt = 1;
+	
+	agp_bridge.key_list = vmalloc(PAGE_SIZE * 4);
+	if (agp_bridge.key_list == NULL) {
+		printk(KERN_ERR PFX "error allocating memory for key lists.\n");
+		rc = -ENOMEM;
+		goto err_out;
+	}
+	got_keylist = 1;
+	
+	/* FIXME vmalloc'd memory not guaranteed contiguous */
+	memset(agp_bridge.key_list, 0, PAGE_SIZE * 4);
+
+	if (agp_bridge.configure()) {
+		printk(KERN_ERR PFX "error configuring host chipset.\n");
+		rc = -EINVAL;
+		goto err_out;
+	}
+
+	printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n",
+	       size_value, agp_bridge.gart_bus_addr);
+
+	return 0;
+
+err_out:
+	if (agp_bridge.needs_scratch_page == TRUE) {
+		agp_bridge.scratch_page &= ~(0x00000fff);
+		agp_bridge.agp_destroy_page(phys_to_virt(agp_bridge.scratch_page));
+	}
+	if (got_gatt)
+		agp_bridge.free_gatt_table();
+	if (got_keylist)
+		vfree(agp_bridge.key_list);
+	return rc;
+}
+
+
+/* cannot be __exit b/c as it could be called from __init code */
+static void agp_backend_cleanup(void)
+{
+	agp_bridge.cleanup();
+	agp_bridge.free_gatt_table();
+	vfree(agp_bridge.key_list);
+
+	if (agp_bridge.needs_scratch_page == TRUE) {
+		agp_bridge.scratch_page &= ~(0x00000fff);
+		agp_bridge.agp_destroy_page(phys_to_virt(agp_bridge.scratch_page));
+	}
+}
+
+static int agp_power(struct pm_dev *dev, pm_request_t rq, void *data)
+{
+	switch(rq)
+	{
+		case PM_SUSPEND:
+			return agp_bridge.suspend();
+		case PM_RESUME:
+			agp_bridge.resume();
+			return 0;
+	}		
+	return 0;
+}
+
+extern int agp_frontend_initialize(void);
+extern void agp_frontend_cleanup(void);
+
+static const drm_agp_t drm_agp = {
+	&agp_free_memory,
+	&agp_allocate_memory,
+	&agp_bind_memory,
+	&agp_unbind_memory,
+	&agp_enable,
+	&agp_backend_acquire,
+	&agp_backend_release,
+	&agp_copy_info
+};
+
+static int agp_probe (struct pci_dev *dev, const struct pci_device_id *ent)
+{
+	int ret_val;
+
+	if (agp_bridge.type != NOT_SUPPORTED) {
+		printk (KERN_DEBUG "Oops, don't init a 2nd agpgart device.\n");
+		return -ENODEV;
+	}
+
+	ret_val = agp_backend_initialize(dev);
+	if (ret_val) {
+		agp_bridge.type = NOT_SUPPORTED;
+		return ret_val;
+	}
+	ret_val = agp_frontend_initialize();
+	if (ret_val) {
+		agp_bridge.type = NOT_SUPPORTED;
+		agp_backend_cleanup();
+		return ret_val;
+	}
+
+	inter_module_register("drm_agp", THIS_MODULE, &drm_agp);
+	
+	pm_register(PM_PCI_DEV, PM_PCI_ID(agp_bridge.dev), agp_power);
+	return 0;
+}
+
+static struct pci_device_id agp_pci_table[] __initdata = {
+	{
+	class:		(PCI_CLASS_BRIDGE_HOST << 8),
+	class_mask:	~0,
+	vendor:		PCI_ANY_ID,
+	device:		PCI_ANY_ID,
+	subvendor:	PCI_ANY_ID,
+	subdevice:	PCI_ANY_ID,
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_pci_table);
+
+static struct pci_driver agp_pci_driver = {
+	name:		"agpgart",
+	id_table:	agp_pci_table,
+	probe:		agp_probe,
+};
+
+static int __init agp_init(void)
+{
+	int ret_val;
+
+	printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Jeff Hartmann\n",
+	       AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
+
+	ret_val = pci_module_init(&agp_pci_driver);
+	if (ret_val) {
+		agp_bridge.type = NOT_SUPPORTED;
+		return ret_val;
+	}
+	return 0;
+}
+
+static void __exit agp_cleanup(void)
+{
+	pci_unregister_driver(&agp_pci_driver);
+	if (agp_bridge.type != NOT_SUPPORTED) {
+		pm_unregister_all(agp_power);
+		agp_frontend_cleanup();
+		agp_backend_cleanup();
+		inter_module_unregister("drm_agp");
+	}
+}
+
+module_init(agp_init);
+module_exit(agp_cleanup);
diff -Nru a/drivers/char/agp/agpgart_be-ali.c b/drivers/char/agp/agpgart_be-ali.c
--- a/drivers/char/agp/agpgart_be-ali.c	Wed Jul 17 11:29:31 2002
+++ /dev/null	Wed Dec 31 16:00:00 1969
@@ -1,265 +0,0 @@
-/*
- * AGPGART module version 0.99
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * TODO: 
- * - Allocate more than order 0 pages to avoid too much linear map splitting.
- */
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/agp_backend.h>
-#include "agp.h"
-
-static int ali_fetch_size(void)
-{
-	int i;
-	u32 temp;
-	struct aper_size_info_32 *values;
-
-	pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
-	temp &= ~(0xfffffff0);
-	values = A_SIZE_32(agp_bridge.aperture_sizes);
-
-	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-		if (temp == values[i].size_value) {
-			agp_bridge.previous_size =
-			    agp_bridge.current_size = (void *) (values + i);
-			agp_bridge.aperture_size_idx = i;
-			return values[i].size;
-		}
-	}
-
-	return 0;
-}
-
-static void ali_tlbflush(agp_memory * mem)
-{
-	u32 temp;
-
-	pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
-// clear tag
-	pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL,
-			((temp & 0xfffffff0) | 0x00000001|0x00000002));
-}
-
-static void ali_cleanup(void)
-{
-	struct aper_size_info_32 *previous_size;
-	u32 temp;
-
-	previous_size = A_SIZE_32(agp_bridge.previous_size);
-
-	pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
-// clear tag
-	pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL,
-			((temp & 0xffffff00) | 0x00000001|0x00000002));
-
-	pci_read_config_dword(agp_bridge.dev,  ALI_ATTBASE, &temp);
-	pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
-			((temp & 0x00000ff0) | previous_size->size_value));
-}
-
-static int ali_configure(void)
-{
-	u32 temp;
-	struct aper_size_info_32 *current_size;
-
-	current_size = A_SIZE_32(agp_bridge.current_size);
-
-	/* aperture size and gatt addr */
-	pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
-	temp = (((temp & 0x00000ff0) | (agp_bridge.gatt_bus_addr & 0xfffff000))
-			| (current_size->size_value & 0xf));
-	pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, temp);
-
-	/* tlb control */
-
-	/*
-	 *	Question: Jeff, ALi's patch deletes this:
-	 *
-	 *	pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
-	 *	pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
-	 *			       ((temp & 0xffffff00) | 0x00000010));
-	 *
-	 *	and replaces it with the following, which seems to duplicate the
-	 *	next couple of lines below it. I suspect this was an oversight,
-	 *	but you might want to check up on this?
-	 */
-	
-	pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-	/* address to map to */
-	pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-#if 0
-	if (agp_bridge.type == ALI_M1541) {
-		u32 nlvm_addr = 0;
-
-		switch (current_size->size_value) {
-			case 0:  break;
-			case 1:  nlvm_addr = 0x100000;break;
-			case 2:  nlvm_addr = 0x200000;break;
-			case 3:  nlvm_addr = 0x400000;break;
-			case 4:  nlvm_addr = 0x800000;break;
-			case 6:  nlvm_addr = 0x1000000;break;
-			case 7:  nlvm_addr = 0x2000000;break;
-			case 8:  nlvm_addr = 0x4000000;break;
-			case 9:  nlvm_addr = 0x8000000;break;
-			case 10: nlvm_addr = 0x10000000;break;
-			default: break;
-		}
-		nlvm_addr--;
-		nlvm_addr&=0xfff00000;
-
-		nlvm_addr+= agp_bridge.gart_bus_addr;
-		nlvm_addr|=(agp_bridge.gart_bus_addr>>12);
-		printk(KERN_INFO PFX "nlvm top &base = %8x\n",nlvm_addr);
-	}
-#endif
-
-	pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
-	temp &= 0xffffff7f;		//enable TLB
-	pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, temp);
-
-	return 0;
-}
-
-static unsigned long ali_mask_memory(unsigned long addr, int type)
-{
-	/* Memory type is ignored */
-
-	return addr | agp_bridge.masks[0].mask;
-}
-
-static void ali_cache_flush(void)
-{
-	global_cache_flush();
-
-	if (agp_bridge.type == ALI_M1541) {
-		int i, page_count;
-		u32 temp;
-
-		page_count = 1 << A_SIZE_32(agp_bridge.current_size)->page_order;
-		for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) {
-			pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp);
-			pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL,
-					(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
-					  (agp_bridge.gatt_bus_addr + i)) |
-					    ALI_CACHE_FLUSH_EN));
-		}
-	}
-}
-
-static void *ali_alloc_page(void)
-{
-	void *adr = agp_generic_alloc_page();
-	u32 temp;
-
-	if (adr == 0)
-		return 0;
-
-	if (agp_bridge.type == ALI_M1541) {
-		pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp);
-		pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL,
-				(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
-				  virt_to_phys(adr)) |
-				    ALI_CACHE_FLUSH_EN ));
-	}
-	return adr;
-}
-
-static void ali_destroy_page(void * addr)
-{
-	u32 temp;
-
-	if (addr == NULL)
-		return;
-
-	global_cache_flush();
-
-	if (agp_bridge.type == ALI_M1541) {
-		pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp);
-		pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL,
-				(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
-				  virt_to_phys(addr)) |
-				    ALI_CACHE_FLUSH_EN));
-	}
-
-	agp_generic_destroy_page(addr);
-}
-
-/* Setup function */
-static struct gatt_mask ali_generic_masks[] =
-{
-	{mask: 0x00000000, type: 0}
-};
-
-static struct aper_size_info_32 ali_generic_sizes[7] =
-{
-	{256, 65536, 6, 10},
-	{128, 32768, 5, 9},
-	{64, 16384, 4, 8},
-	{32, 8192, 3, 7},
-	{16, 4096, 2, 6},
-	{8, 2048, 1, 4},
-	{4, 1024, 0, 3}
-};
-
-int __init ali_generic_setup (struct pci_dev *pdev)
-{
-	agp_bridge.masks = ali_generic_masks;
-	agp_bridge.num_of_masks = 1;
-	agp_bridge.aperture_sizes = (void *) ali_generic_sizes;
-	agp_bridge.size_type = U32_APER_SIZE;
-	agp_bridge.num_aperture_sizes = 7;
-	agp_bridge.dev_private_data = NULL;
-	agp_bridge.needs_scratch_page = FALSE;
-	agp_bridge.configure = ali_configure;
-	agp_bridge.fetch_size = ali_fetch_size;
-	agp_bridge.cleanup = ali_cleanup;
-	agp_bridge.tlb_flush = ali_tlbflush;
-	agp_bridge.mask_memory = ali_mask_memory;
-	agp_bridge.agp_enable = agp_generic_agp_enable;
-	agp_bridge.cache_flush = ali_cache_flush;
-	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-	agp_bridge.insert_memory = agp_generic_insert_memory;
-	agp_bridge.remove_memory = agp_generic_remove_memory;
-	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-	agp_bridge.free_by_type = agp_generic_free_by_type;
-	agp_bridge.agp_alloc_page = ali_alloc_page;
-	agp_bridge.agp_destroy_page = ali_destroy_page;
-	agp_bridge.suspend = agp_generic_suspend;
-	agp_bridge.resume = agp_generic_resume;
-	agp_bridge.cant_use_aperture = 0;
-
-	return 0;
-	
-	(void) pdev; /* unused */
-}
-
diff -Nru a/drivers/char/agp/agpgart_be-amd.c b/drivers/char/agp/agpgart_be-amd.c
--- a/drivers/char/agp/agpgart_be-amd.c	Wed Jul 17 11:29:31 2002
+++ /dev/null	Wed Dec 31 16:00:00 1969
@@ -1,408 +0,0 @@
-/*
- * AGPGART module version 0.99
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * TODO: 
- * - Allocate more than order 0 pages to avoid too much linear map splitting.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/agp_backend.h>
-#include "agp.h"
-
-struct amd_page_map {
-	unsigned long *real;
-	unsigned long *remapped;
-};
-
-static struct _amd_irongate_private {
-	volatile u8 *registers;
-	struct amd_page_map **gatt_pages;
-	int num_tables;
-} amd_irongate_private;
-
-static int amd_create_page_map(struct amd_page_map *page_map)
-{
-	int i;
-
-	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
-	if (page_map->real == NULL) {
-		return -ENOMEM;
-	}
-	SetPageReserved(virt_to_page(page_map->real));
-	CACHE_FLUSH();
-	page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), 
-					    PAGE_SIZE);
-	if (page_map->remapped == NULL) {
-		ClearPageReserved(virt_to_page(page_map->real));
-		free_page((unsigned long) page_map->real);
-		page_map->real = NULL;
-		return -ENOMEM;
-	}
-	CACHE_FLUSH();
-
-	for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
-		page_map->remapped[i] = agp_bridge.scratch_page;
-	}
-
-	return 0;
-}
-
-static void amd_free_page_map(struct amd_page_map *page_map)
-{
-	iounmap(page_map->remapped);
-	ClearPageReserved(virt_to_page(page_map->real));
-	free_page((unsigned long) page_map->real);
-}
-
-static void amd_free_gatt_pages(void)
-{
-	int i;
-	struct amd_page_map **tables;
-	struct amd_page_map *entry;
-
-	tables = amd_irongate_private.gatt_pages;
-	for(i = 0; i < amd_irongate_private.num_tables; i++) {
-		entry = tables[i];
-		if (entry != NULL) {
-			if (entry->real != NULL) {
-				amd_free_page_map(entry);
-			}
-			kfree(entry);
-		}
-	}
-	kfree(tables);
-}
-
-static int amd_create_gatt_pages(int nr_tables)
-{
-	struct amd_page_map **tables;
-	struct amd_page_map *entry;
-	int retval = 0;
-	int i;
-
-	tables = kmalloc((nr_tables + 1) * sizeof(struct amd_page_map *), 
-			 GFP_KERNEL);
-	if (tables == NULL) {
-		return -ENOMEM;
-	}
-	memset(tables, 0, sizeof(struct amd_page_map *) * (nr_tables + 1));
-	for (i = 0; i < nr_tables; i++) {
-		entry = kmalloc(sizeof(struct amd_page_map), GFP_KERNEL);
-		if (entry == NULL) {
-			retval = -ENOMEM;
-			break;
-		}
-		memset(entry, 0, sizeof(struct amd_page_map));
-		tables[i] = entry;
-		retval = amd_create_page_map(entry);
-		if (retval != 0) break;
-	}
-	amd_irongate_private.num_tables = nr_tables;
-	amd_irongate_private.gatt_pages = tables;
-
-	if (retval != 0) amd_free_gatt_pages();
-
-	return retval;
-}
-
-/* Since we don't need contigious memory we just try
- * to get the gatt table once
- */
-
-#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
-#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
-	GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
-#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) 
-#define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
-	GET_PAGE_DIR_IDX(addr)]->remapped)
-
-static int amd_create_gatt_table(void)
-{
-	struct aper_size_info_lvl2 *value;
-	struct amd_page_map page_dir;
-	unsigned long addr;
-	int retval;
-	u32 temp;
-	int i;
-
-	value = A_SIZE_LVL2(agp_bridge.current_size);
-	retval = amd_create_page_map(&page_dir);
-	if (retval != 0) {
-		return retval;
-	}
-
-	retval = amd_create_gatt_pages(value->num_entries / 1024);
-	if (retval != 0) {
-		amd_free_page_map(&page_dir);
-		return retval;
-	}
-
-	agp_bridge.gatt_table_real = page_dir.real;
-	agp_bridge.gatt_table = page_dir.remapped;
-	agp_bridge.gatt_bus_addr = virt_to_phys(page_dir.real);
-
-	/* Get the address for the gart region.
-	 * This is a bus address even on the alpha, b/c its
-	 * used to program the agp master not the cpu
-	 */
-
-	pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp);
-	addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-	agp_bridge.gart_bus_addr = addr;
-
-	/* Calculate the agp offset */
-	for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
-		page_dir.remapped[GET_PAGE_DIR_OFF(addr)] =
-			virt_to_phys(amd_irongate_private.gatt_pages[i]->real);
-		page_dir.remapped[GET_PAGE_DIR_OFF(addr)] |= 0x00000001;
-	}
-
-	return 0;
-}
-
-static int amd_free_gatt_table(void)
-{
-	struct amd_page_map page_dir;
-   
-	page_dir.real = agp_bridge.gatt_table_real;
-	page_dir.remapped = agp_bridge.gatt_table;
-
-	amd_free_gatt_pages();
-	amd_free_page_map(&page_dir);
-	return 0;
-}
-
-static int amd_irongate_fetch_size(void)
-{
-	int i;
-	u32 temp;
-	struct aper_size_info_lvl2 *values;
-
-	pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
-	temp = (temp & 0x0000000e);
-	values = A_SIZE_LVL2(agp_bridge.aperture_sizes);
-	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-		if (temp == values[i].size_value) {
-			agp_bridge.previous_size =
-			    agp_bridge.current_size = (void *) (values + i);
-
-			agp_bridge.aperture_size_idx = i;
-			return values[i].size;
-		}
-	}
-
-	return 0;
-}
-
-static int amd_irongate_configure(void)
-{
-	struct aper_size_info_lvl2 *current_size;
-	u32 temp;
-	u16 enable_reg;
-
-	current_size = A_SIZE_LVL2(agp_bridge.current_size);
-
-	/* Get the memory mapped registers */
-	pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp);
-	temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-	amd_irongate_private.registers = (volatile u8 *) ioremap(temp, 4096);
-
-	/* Write out the address of the gatt table */
-	OUTREG32(amd_irongate_private.registers, AMD_ATTBASE,
-		 agp_bridge.gatt_bus_addr);
-
-	/* Write the Sync register */
-	pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80);
-   
-   	/* Set indexing mode */
-   	pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL2, 0x00);
-
-	/* Write the enable register */
-	enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
-	enable_reg = (enable_reg | 0x0004);
-	OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
-
-	/* Write out the size register */
-	pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
-	temp = (((temp & ~(0x0000000e)) | current_size->size_value)
-		| 0x00000001);
-	pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
-
-	/* Flush the tlb */
-	OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
-
-	return 0;
-}
-
-static void amd_irongate_cleanup(void)
-{
-	struct aper_size_info_lvl2 *previous_size;
-	u32 temp;
-	u16 enable_reg;
-
-	previous_size = A_SIZE_LVL2(agp_bridge.previous_size);
-
-	enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
-	enable_reg = (enable_reg & ~(0x0004));
-	OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
-
-	/* Write back the previous size and disable gart translation */
-	pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
-	temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
-	pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
-	iounmap((void *) amd_irongate_private.registers);
-}
-
-/*
- * This routine could be implemented by taking the addresses
- * written to the GATT, and flushing them individually.  However
- * currently it just flushes the whole table.  Which is probably
- * more efficent, since agp_memory blocks can be a large number of
- * entries.
- */
-
-static void amd_irongate_tlbflush(agp_memory * temp)
-{
-	OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
-}
-
-static unsigned long amd_irongate_mask_memory(unsigned long addr, int type)
-{
-	/* Only type 0 is supported by the irongate */
-
-	return addr | agp_bridge.masks[0].mask;
-}
-
-static int amd_insert_memory(agp_memory * mem,
-			     off_t pg_start, int type)
-{
-	int i, j, num_entries;
-	unsigned long *cur_gatt;
-	unsigned long addr;
-
-	num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries;
-
-	if (type != 0 || mem->type != 0) {
-		return -EINVAL;
-	}
-	if ((pg_start + mem->page_count) > num_entries) {
-		return -EINVAL;
-	}
-
-	j = pg_start;
-	while (j < (pg_start + mem->page_count)) {
-		addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
-		cur_gatt = GET_GATT(addr);
-		if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) {
-			return -EBUSY;
-		}
-		j++;
-	}
-
-	if (mem->is_flushed == FALSE) {
-		CACHE_FLUSH();
-		mem->is_flushed = TRUE;
-	}
-
-	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-		addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
-		cur_gatt = GET_GATT(addr);
-		cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i];
-	}
-	agp_bridge.tlb_flush(mem);
-	return 0;
-}
-
-static int amd_remove_memory(agp_memory * mem, off_t pg_start,
-			     int type)
-{
-	int i;
-	unsigned long *cur_gatt;
-	unsigned long addr;
-
-	if (type != 0 || mem->type != 0) {
-		return -EINVAL;
-	}
-	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
-		addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr;
-		cur_gatt = GET_GATT(addr);
-		cur_gatt[GET_GATT_OFF(addr)] = 
-			(unsigned long) agp_bridge.scratch_page;
-	}
-
-	agp_bridge.tlb_flush(mem);
-	return 0;
-}
-
-static struct aper_size_info_lvl2 amd_irongate_sizes[7] =
-{
-	{2048, 524288, 0x0000000c},
-	{1024, 262144, 0x0000000a},
-	{512, 131072, 0x00000008},
-	{256, 65536, 0x00000006},
-	{128, 32768, 0x00000004},
-	{64, 16384, 0x00000002},
-	{32, 8192, 0x00000000}
-};
-
-static struct gatt_mask amd_irongate_masks[] =
-{
-	{mask: 0x00000001, type: 0}
-};
-
-int __init amd_irongate_setup (struct pci_dev *pdev)
-{
-	agp_bridge.masks = amd_irongate_masks;
-	agp_bridge.num_of_masks = 1;
-	agp_bridge.aperture_sizes = (void *) amd_irongate_sizes;
-	agp_bridge.size_type = LVL2_APER_SIZE;
-	agp_bridge.num_aperture_sizes = 7;
-	agp_bridge.dev_private_data = (void *) &amd_irongate_private;
-	agp_bridge.needs_scratch_page = FALSE;
-	agp_bridge.configure = amd_irongate_configure;
-	agp_bridge.fetch_size = amd_irongate_fetch_size;
-	agp_bridge.cleanup = amd_irongate_cleanup;
-	agp_bridge.tlb_flush = amd_irongate_tlbflush;
-	agp_bridge.mask_memory = amd_irongate_mask_memory;
-	agp_bridge.agp_enable = agp_generic_agp_enable;
-	agp_bridge.cache_flush = global_cache_flush;
-	agp_bridge.create_gatt_table = amd_create_gatt_table;
-	agp_bridge.free_gatt_table = amd_free_gatt_table;
-	agp_bridge.insert_memory = amd_insert_memory;
-	agp_bridge.remove_memory = amd_remove_memory;
-	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-	agp_bridge.free_by_type = agp_generic_free_by_type;
-	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
-	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-	agp_bridge.suspend = agp_generic_suspend;
-	agp_bridge.resume = agp_generic_resume;
-	agp_bridge.cant_use_aperture = 0;
-
-	return 0;
-	
-	(void) pdev; /* unused */
-}
-
diff -Nru a/drivers/char/agp/agpgart_be-hp.c b/drivers/char/agp/agpgart_be-hp.c
--- a/drivers/char/agp/agpgart_be-hp.c	Wed Jul 17 11:29:31 2002
+++ /dev/null	Wed Dec 31 16:00:00 1969
@@ -1,394 +0,0 @@
-/*
- * AGPGART module version 0.99
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * TODO: 
- * - Allocate more than order 0 pages to avoid too much linear map splitting.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/agp_backend.h>
-#include "agp.h"
-
-
-#ifndef log2
-#define log2(x)		ffz(~(x))
-#endif
-
-#define HP_ZX1_IOVA_BASE	GB(1UL)
-#define HP_ZX1_IOVA_SIZE	GB(1UL)
-#define HP_ZX1_GART_SIZE	(HP_ZX1_IOVA_SIZE / 2)
-#define HP_ZX1_SBA_IOMMU_COOKIE	0x0000badbadc0ffeeUL
-
-#define HP_ZX1_PDIR_VALID_BIT	0x8000000000000000UL
-#define HP_ZX1_IOVA_TO_PDIR(va)	((va - hp_private.iova_base) >> \
-					hp_private.io_tlb_shift)
-
-static struct aper_size_info_fixed hp_zx1_sizes[] =
-{
-	{0, 0, 0},		/* filled in by hp_zx1_fetch_size() */
-};
-
-static struct gatt_mask hp_zx1_masks[] =
-{
-	{mask: HP_ZX1_PDIR_VALID_BIT, type: 0}
-};
-
-static struct _hp_private {
-	struct pci_dev *ioc;
-	volatile u8 *registers;
-	u64 *io_pdir;		// PDIR for entire IOVA
-	u64 *gatt;		// PDIR just for GART (subset of above)
-	u64 gatt_entries;
-	u64 iova_base;
-	u64 gart_base;
-	u64 gart_size;
-	u64 io_pdir_size;
-	int io_pdir_owner;	// do we own it, or share it with sba_iommu?
-	int io_page_size;
-	int io_tlb_shift;
-	int io_tlb_ps;		// IOC ps config
-	int io_pages_per_kpage;
-} hp_private;
-
-static int __init hp_zx1_ioc_shared(void)
-{
-	struct _hp_private *hp = &hp_private;
-
-	printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
-
-	/*
-	 * IOC already configured by sba_iommu module; just use
-	 * its setup.  We assume:
-	 * 	- IOVA space is 1Gb in size
-	 * 	- first 512Mb is IOMMU, second 512Mb is GART
-	 */
-	hp->io_tlb_ps = INREG64(hp->registers, HP_ZX1_TCNFG);
-	switch (hp->io_tlb_ps) {
-		case 0: hp->io_tlb_shift = 12; break;
-		case 1: hp->io_tlb_shift = 13; break;
-		case 2: hp->io_tlb_shift = 14; break;
-		case 3: hp->io_tlb_shift = 16; break;
-		default:
-			printk(KERN_ERR PFX "Invalid IOTLB page size "
-			       "configuration 0x%x\n", hp->io_tlb_ps);
-			hp->gatt = 0;
-			hp->gatt_entries = 0;
-			return -ENODEV;
-	}
-	hp->io_page_size = 1 << hp->io_tlb_shift;
-	hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
-
-	hp->iova_base = INREG64(hp->registers, HP_ZX1_IBASE) & ~0x1;
-	hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
-
-	hp->gart_size = HP_ZX1_GART_SIZE;
-	hp->gatt_entries = hp->gart_size / hp->io_page_size;
-
-	hp->io_pdir = phys_to_virt(INREG64(hp->registers, HP_ZX1_PDIR_BASE));
-	hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
-
-	if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
-	    	hp->gatt = 0;
-		hp->gatt_entries = 0;
-		printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
-		       "GART disabled\n");
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-static int __init hp_zx1_ioc_owner(u8 ioc_rev)
-{
-	struct _hp_private *hp = &hp_private;
-
-	printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
-
-	/*
-	 * Select an IOV page size no larger than system page size.
-	 */
-	if (PAGE_SIZE >= KB(64)) {
-		hp->io_tlb_shift = 16;
-		hp->io_tlb_ps = 3;
-	} else if (PAGE_SIZE >= KB(16)) {
-		hp->io_tlb_shift = 14;
-		hp->io_tlb_ps = 2;
-	} else if (PAGE_SIZE >= KB(8)) {
-		hp->io_tlb_shift = 13;
-		hp->io_tlb_ps = 1;
-	} else {
-		hp->io_tlb_shift = 12;
-		hp->io_tlb_ps = 0;
-	}
-	hp->io_page_size = 1 << hp->io_tlb_shift;
-	hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
-
-	hp->iova_base = HP_ZX1_IOVA_BASE;
-	hp->gart_size = HP_ZX1_GART_SIZE;
-	hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
-
-	hp->gatt_entries = hp->gart_size / hp->io_page_size;
-	hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
-
-	return 0;
-}
-
-static int __init hp_zx1_ioc_init(void)
-{
-	struct _hp_private *hp = &hp_private;
-	struct pci_dev *ioc;
-	int i;
-	u8 ioc_rev;
-
-	ioc = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_IOC, NULL);
-	if (!ioc) {
-		printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no IOC\n");
-		return -ENODEV;
-	}
-	hp->ioc = ioc;
-
-	pci_read_config_byte(ioc, PCI_REVISION_ID, &ioc_rev);
-
-	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
-		if (pci_resource_flags(ioc, i) == IORESOURCE_MEM) {
-			hp->registers = (u8 *) ioremap(pci_resource_start(ioc,
-									    i),
-						    pci_resource_len(ioc, i));
-			break;
-		}
-	}
-	if (!hp->registers) {
-		printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no CSRs\n");
-
-		return -ENODEV;
-	}
-
-	/*
-	 * If the IOTLB is currently disabled, we can take it over.
-	 * Otherwise, we have to share with sba_iommu.
-	 */
-	hp->io_pdir_owner = (INREG64(hp->registers, HP_ZX1_IBASE) & 0x1) == 0;
-
-	if (hp->io_pdir_owner)
-		return hp_zx1_ioc_owner(ioc_rev);
-
-	return hp_zx1_ioc_shared();
-}
-
-static int hp_zx1_fetch_size(void)
-{
-	int size;
-
-	size = hp_private.gart_size / MB(1);
-	hp_zx1_sizes[0].size = size;
-	agp_bridge.current_size = (void *) &hp_zx1_sizes[0];
-	return size;
-}
-
-static int hp_zx1_configure(void)
-{
-	struct _hp_private *hp = &hp_private;
-
-	agp_bridge.gart_bus_addr = hp->gart_base;
-	agp_bridge.capndx = pci_find_capability(agp_bridge.dev, PCI_CAP_ID_AGP);
-	pci_read_config_dword(agp_bridge.dev,
-		agp_bridge.capndx + PCI_AGP_STATUS, &agp_bridge.mode);
-
-	if (hp->io_pdir_owner) {
-		OUTREG64(hp->registers, HP_ZX1_PDIR_BASE,
-			virt_to_phys(hp->io_pdir));
-		OUTREG64(hp->registers, HP_ZX1_TCNFG, hp->io_tlb_ps);
-		OUTREG64(hp->registers, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
-		OUTREG64(hp->registers, HP_ZX1_IBASE, hp->iova_base | 0x1);
-		OUTREG64(hp->registers, HP_ZX1_PCOM,
-			hp->iova_base | log2(HP_ZX1_IOVA_SIZE));
-		INREG64(hp->registers, HP_ZX1_PCOM);
-	}
-
-	return 0;
-}
-
-static void hp_zx1_cleanup(void)
-{
-	struct _hp_private *hp = &hp_private;
-
-	if (hp->io_pdir_owner)
-		OUTREG64(hp->registers, HP_ZX1_IBASE, 0);
-	iounmap((void *) hp->registers);
-}
-
-static void hp_zx1_tlbflush(agp_memory * mem)
-{
-	struct _hp_private *hp = &hp_private;
-
-	OUTREG64(hp->registers, HP_ZX1_PCOM, 
-		hp->gart_base | log2(hp->gart_size));
-	INREG64(hp->registers, HP_ZX1_PCOM);
-}
-
-static int hp_zx1_create_gatt_table(void)
-{
-	struct _hp_private *hp = &hp_private;
-	int i;
-
-	if (hp->io_pdir_owner) {
-		hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
-						get_order(hp->io_pdir_size));
-		if (!hp->io_pdir) {
-			printk(KERN_ERR PFX "Couldn't allocate contiguous "
-				"memory for I/O PDIR\n");
-			hp->gatt = 0;
-			hp->gatt_entries = 0;
-			return -ENOMEM;
-		}
-		memset(hp->io_pdir, 0, hp->io_pdir_size);
-
-		hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
-	}
-
-	for (i = 0; i < hp->gatt_entries; i++) {
-		hp->gatt[i] = (unsigned long) agp_bridge.scratch_page;
-	}
-
-	return 0;
-}
-
-static int hp_zx1_free_gatt_table(void)
-{
-	struct _hp_private *hp = &hp_private;
-	
-	if (hp->io_pdir_owner)
-		free_pages((unsigned long) hp->io_pdir,
-			    get_order(hp->io_pdir_size));
-	else
-		hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
-	return 0;
-}
-
-static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type)
-{
-	struct _hp_private *hp = &hp_private;
-	int i, k;
-	off_t j, io_pg_start;
-	int io_pg_count;
-
-	if (type != 0 || mem->type != 0) {
-		return -EINVAL;
-	}
-
-	io_pg_start = hp->io_pages_per_kpage * pg_start;
-	io_pg_count = hp->io_pages_per_kpage * mem->page_count;
-	if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
-		return -EINVAL;
-	}
-
-	j = io_pg_start;
-	while (j < (io_pg_start + io_pg_count)) {
-		if (hp->gatt[j]) {
-			return -EBUSY;
-		}
-		j++;
-	}
-
-	if (mem->is_flushed == FALSE) {
-		CACHE_FLUSH();
-		mem->is_flushed = TRUE;
-	}
-
-	for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
-		unsigned long paddr;
-
-		paddr = mem->memory[i];
-		for (k = 0;
-		     k < hp->io_pages_per_kpage;
-		     k++, j++, paddr += hp->io_page_size) {
-			hp->gatt[j] = agp_bridge.mask_memory(paddr, type);
-		}
-	}
-
-	agp_bridge.tlb_flush(mem);
-	return 0;
-}
-
-static int hp_zx1_remove_memory(agp_memory * mem, off_t pg_start, int type)
-{
-	struct _hp_private *hp = &hp_private;
-	int i, io_pg_start, io_pg_count;
-
-	if (type != 0 || mem->type != 0) {
-		return -EINVAL;
-	}
-
-	io_pg_start = hp->io_pages_per_kpage * pg_start;
-	io_pg_count = hp->io_pages_per_kpage * mem->page_count;
-	for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
-		hp->gatt[i] = agp_bridge.scratch_page;
-	}
-
-	agp_bridge.tlb_flush(mem);
-	return 0;
-}
-
-static unsigned long hp_zx1_mask_memory(unsigned long addr, int type)
-{
-	return HP_ZX1_PDIR_VALID_BIT | addr;
-}
-
-static unsigned long hp_zx1_unmask_memory(unsigned long addr)
-{
-	return addr & ~(HP_ZX1_PDIR_VALID_BIT);
-}
-
-int __init hp_zx1_setup (struct pci_dev *pdev)
-{
-	agp_bridge.masks = hp_zx1_masks;
-	agp_bridge.num_of_masks = 1;
-	agp_bridge.dev_private_data = NULL;
-	agp_bridge.size_type = FIXED_APER_SIZE;
-	agp_bridge.needs_scratch_page = FALSE;
-	agp_bridge.configure = hp_zx1_configure;
-	agp_bridge.fetch_size = hp_zx1_fetch_size;
-	agp_bridge.cleanup = hp_zx1_cleanup;
-	agp_bridge.tlb_flush = hp_zx1_tlbflush;
-	agp_bridge.mask_memory = hp_zx1_mask_memory;
-	agp_bridge.unmask_memory = hp_zx1_unmask_memory;
-	agp_bridge.agp_enable = agp_generic_agp_enable;
-	agp_bridge.cache_flush = global_cache_flush;
-	agp_bridge.create_gatt_table = hp_zx1_create_gatt_table;
-	agp_bridge.free_gatt_table = hp_zx1_free_gatt_table;
-	agp_bridge.insert_memory = hp_zx1_insert_memory;
-	agp_bridge.remove_memory = hp_zx1_remove_memory;
-	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-	agp_bridge.free_by_type = agp_generic_free_by_type;
-	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
-	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-	agp_bridge.cant_use_aperture = 1;
-
-	return hp_zx1_ioc_init();
-
-	(void) pdev; /* unused */
-}
-
diff -Nru a/drivers/char/agp/agpgart_be-i460.c b/drivers/char/agp/agpgart_be-i460.c
--- a/drivers/char/agp/agpgart_be-i460.c	Wed Jul 17 11:29:31 2002
+++ /dev/null	Wed Dec 31 16:00:00 1969
@@ -1,595 +0,0 @@
-/*
- * AGPGART module version 0.99
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * TODO: 
- * - Allocate more than order 0 pages to avoid too much linear map splitting.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/agp_backend.h>
-#include "agp.h"
-
-/* BIOS configures the chipset so that one of two apbase registers are used */
-static u8 intel_i460_dynamic_apbase = 0x10;
-
-/* 460 supports multiple GART page sizes, so GART pageshift is dynamic */
-static u8 intel_i460_pageshift = 12;
-static u32 intel_i460_pagesize;
-
-/* Keep track of which is larger, chipset or kernel page size. */
-static u32 intel_i460_cpk = 1;
-
-/* Structure for tracking partial use of 4MB GART pages */
-static u32 **i460_pg_detail = NULL;
-static u32 *i460_pg_count = NULL;
-
-#define I460_CPAGES_PER_KPAGE (PAGE_SIZE >> intel_i460_pageshift)
-#define I460_KPAGES_PER_CPAGE ((1 << intel_i460_pageshift) >> PAGE_SHIFT)
-
-#define I460_SRAM_IO_DISABLE		(1 << 4)
-#define I460_BAPBASE_ENABLE		(1 << 3)
-#define I460_AGPSIZ_MASK		0x7
-#define I460_4M_PS			(1 << 1)
-
-#define log2(x)				ffz(~(x))
-
-static inline void intel_i460_read_back (volatile u32 *entry)
-{
-	/*
-	 * The 460 spec says we have to read the last location written to
-	 * make sure that all writes have taken effect
-	 */
-	*entry;
-}
-
-static int intel_i460_fetch_size(void)
-{
-	int i;
-	u8 temp;
-	struct aper_size_info_8 *values;
-
-	/* Determine the GART page size */
-	pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &temp);
-	intel_i460_pageshift = (temp & I460_4M_PS) ? 22 : 12;
-	intel_i460_pagesize = 1UL << intel_i460_pageshift;
-
-	values = A_SIZE_8(agp_bridge.aperture_sizes);
-
-	pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp);
-
-	/* Exit now if the IO drivers for the GART SRAMS are turned off */
-	if (temp & I460_SRAM_IO_DISABLE) {
-		printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n");
-		printk(KERN_ERR PFX "AGPGART operation not possible\n");
-		return 0;
-	}
-
-	/* Make sure we don't try to create an 2 ^ 23 entry GATT */
-	if ((intel_i460_pageshift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) {
-		printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n");
-		return 0;
-	}
-
-	/* Determine the proper APBASE register */
-	if (temp & I460_BAPBASE_ENABLE)
-		intel_i460_dynamic_apbase = INTEL_I460_BAPBASE;
-	else
-		intel_i460_dynamic_apbase = INTEL_I460_APBASE;
-
-	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-		/*
-		 * Dynamically calculate the proper num_entries and page_order values for
-		 * the define aperture sizes. Take care not to shift off the end of
-		 * values[i].size.
-		 */
-		values[i].num_entries = (values[i].size << 8) >> (intel_i460_pageshift - 12);
-		values[i].page_order = log2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT);
-	}
-
-	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-		/* Neglect control bits when matching up size_value */
-		if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) {
-			agp_bridge.previous_size = agp_bridge.current_size = (void *) (values + i);
-			agp_bridge.aperture_size_idx = i;
-			return values[i].size;
-		}
-	}
-
-	return 0;
-}
-
-/* There isn't anything to do here since 460 has no GART TLB. */
-static void intel_i460_tlb_flush(agp_memory * mem)
-{
-	return;
-}
-
-/*
- * This utility function is needed to prevent corruption of the control bits
- * which are stored along with the aperture size in 460's AGPSIZ register
- */
-static void intel_i460_write_agpsiz(u8 size_value)
-{
-	u8 temp;
-
-	pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp);
-	pci_write_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ,
-			      ((temp & ~I460_AGPSIZ_MASK) | size_value));
-}
-
-static void intel_i460_cleanup(void)
-{
-	struct aper_size_info_8 *previous_size;
-
-	previous_size = A_SIZE_8(agp_bridge.previous_size);
-	intel_i460_write_agpsiz(previous_size->size_value);
-
-	if (intel_i460_cpk == 0) {
-		vfree(i460_pg_detail);
-		vfree(i460_pg_count);
-	}
-}
-
-
-/* Control bits for Out-Of-GART coherency and Burst Write Combining */
-#define I460_GXBCTL_OOG		(1UL << 0)
-#define I460_GXBCTL_BWC		(1UL << 2)
-
-static int intel_i460_configure(void)
-{
-	union {
-		u32 small[2];
-		u64 large;
-	} temp;
-	u8 scratch;
-	int i;
-
-	struct aper_size_info_8 *current_size;
-
-	temp.large = 0;
-
-	current_size = A_SIZE_8(agp_bridge.current_size);
-	intel_i460_write_agpsiz(current_size->size_value);
-
-	/*
-	 * Do the necessary rigmarole to read all eight bytes of APBASE.
-	 * This has to be done since the AGP aperture can be above 4GB on
-	 * 460 based systems.
-	 */
-	pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase, &(temp.small[0]));
-	pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase + 4, &(temp.small[1]));
-
-	/* Clear BAR control bits */
-	agp_bridge.gart_bus_addr = temp.large & ~((1UL << 3) - 1);
-
-	pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &scratch);
-	pci_write_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL,
-			      (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC);
-
-	/*
-	 * Initialize partial allocation trackers if a GART page is bigger than
-	 * a kernel page.
-	 */
-	if (I460_CPAGES_PER_KPAGE >= 1) {
-		intel_i460_cpk = 1;
-	} else {
-		intel_i460_cpk = 0;
-
-		i460_pg_detail = vmalloc(sizeof(*i460_pg_detail) * current_size->num_entries);
-		i460_pg_count = vmalloc(sizeof(*i460_pg_count) * current_size->num_entries);
-
-		for (i = 0; i < current_size->num_entries; i++) {
-			i460_pg_count[i] = 0;
-			i460_pg_detail[i] = NULL;
-		}
-	}
-	return 0;
-}
-
-static int intel_i460_create_gatt_table(void)
-{
-	char *table;
-	int i;
-	int page_order;
-	int num_entries;
-	void *temp;
-
-	/*
-	 * Load up the fixed address of the GART SRAMS which hold our
-	 * GATT table.
-	 */
-	table = (char *) __va(INTEL_I460_ATTBASE);
-
-	temp = agp_bridge.current_size;
-	page_order = A_SIZE_8(temp)->page_order;
-	num_entries = A_SIZE_8(temp)->num_entries;
-
-	agp_bridge.gatt_table_real = (u32 *) table;
-	agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
-						(PAGE_SIZE * (1 << page_order)));
-	agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real);
-
-	for (i = 0; i < num_entries; i++) {
-		agp_bridge.gatt_table[i] = 0;
-	}
-
-	intel_i460_read_back(agp_bridge.gatt_table + i - 1);
-	return 0;
-}
-
-static int intel_i460_free_gatt_table(void)
-{
-	int num_entries;
-	int i;
-	void *temp;
-
-	temp = agp_bridge.current_size;
-
-	num_entries = A_SIZE_8(temp)->num_entries;
-
-	for (i = 0; i < num_entries; i++) {
-		agp_bridge.gatt_table[i] = 0;
-	}
-
-	intel_i460_read_back(agp_bridge.gatt_table + i - 1);
-
-	iounmap(agp_bridge.gatt_table);
-	return 0;
-}
-
-/* These functions are called when PAGE_SIZE exceeds the GART page size */
-
-static int intel_i460_insert_memory_cpk(agp_memory * mem, off_t pg_start, int type)
-{
-	int i, j, k, num_entries;
-	void *temp;
-	unsigned long paddr;
-
-	/*
-	 * The rest of the kernel will compute page offsets in terms of
-	 * PAGE_SIZE.
-	 */
-	pg_start = I460_CPAGES_PER_KPAGE * pg_start;
-
-	temp = agp_bridge.current_size;
-	num_entries = A_SIZE_8(temp)->num_entries;
-
-	if ((pg_start + I460_CPAGES_PER_KPAGE * mem->page_count) > num_entries) {
-		printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
-		return -EINVAL;
-	}
-
-	j = pg_start;
-	while (j < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count)) {
-		if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
-			return -EBUSY;
-		}
-		j++;
-	}
-
-#if 0
-	/* not necessary since 460 GART is operated in coherent mode... */
-	if (mem->is_flushed == FALSE) {
-		CACHE_FLUSH();
-		mem->is_flushed = TRUE;
-	}
-#endif
-
-	for (i = 0, j = pg_start; i < mem->page_count; i++) {
-		paddr = mem->memory[i];
-		for (k = 0; k < I460_CPAGES_PER_KPAGE; k++, j++, paddr += intel_i460_pagesize)
-			agp_bridge.gatt_table[j] = (u32) agp_bridge.mask_memory(paddr, mem->type);
-	}
-
-	intel_i460_read_back(agp_bridge.gatt_table + j - 1);
-	return 0;
-}
-
-static int intel_i460_remove_memory_cpk(agp_memory * mem, off_t pg_start, int type)
-{
-	int i;
-
-	pg_start = I460_CPAGES_PER_KPAGE * pg_start;
-
-	for (i = pg_start; i < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count); i++)
-		agp_bridge.gatt_table[i] = 0;
-
-	intel_i460_read_back(agp_bridge.gatt_table + i - 1);
-	return 0;
-}
-
-/*
- * These functions are called when the GART page size exceeds PAGE_SIZE.
- *
- * This situation is interesting since AGP memory allocations that are
- * smaller than a single GART page are possible.  The structures i460_pg_count
- * and i460_pg_detail track partial allocation of the large GART pages to
- * work around this issue.
- *
- * i460_pg_count[pg_num] tracks the number of kernel pages in use within
- * GART page pg_num.  i460_pg_detail[pg_num] is an array containing a
- * psuedo-GART entry for each of the aforementioned kernel pages.  The whole
- * of i460_pg_detail is equivalent to a giant GATT with page size equal to
- * that of the kernel.
- */
-
-static void *intel_i460_alloc_large_page(int pg_num)
-{
-	int i;
-	void *bp, *bp_end;
-	struct page *page;
-
-	i460_pg_detail[pg_num] = (void *) vmalloc(sizeof(u32) * I460_KPAGES_PER_CPAGE);
-	if (i460_pg_detail[pg_num] == NULL) {
-		printk(KERN_ERR PFX "Out of memory, we're in trouble...\n");
-		return NULL;
-	}
-
-	for (i = 0; i < I460_KPAGES_PER_CPAGE; i++)
-		i460_pg_detail[pg_num][i] = 0;
-
-	bp = (void *) __get_free_pages(GFP_KERNEL, intel_i460_pageshift - PAGE_SHIFT);
-	if (bp == NULL) {
-		printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n");
-		return NULL;
-	}
-
-	bp_end = bp + ((PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT))) - 1);
-
-	for (page = virt_to_page(bp); page <= virt_to_page(bp_end); page++) {
-		atomic_inc(&agp_bridge.current_memory_agp);
-	}
-	return bp;
-}
-
-static void intel_i460_free_large_page(int pg_num, unsigned long addr)
-{
-	struct page *page;
-	void *bp, *bp_end;
-
-	bp = (void *) __va(addr);
-	bp_end = bp + (PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT)));
-
-	vfree(i460_pg_detail[pg_num]);
-	i460_pg_detail[pg_num] = NULL;
-
-	for (page = virt_to_page(bp); page < virt_to_page(bp_end); page++) {
-		atomic_dec(&agp_bridge.current_memory_agp);
-	}
-
-	free_pages((unsigned long) bp, intel_i460_pageshift - PAGE_SHIFT);
-}
-
-static int intel_i460_insert_memory_kpc(agp_memory * mem, off_t pg_start, int type)
-{
-	int i, pg, start_pg, end_pg, start_offset, end_offset, idx;
-	int num_entries;
-	void *temp;
-	unsigned long paddr;
-
-	temp = agp_bridge.current_size;
-	num_entries = A_SIZE_8(temp)->num_entries;
-
-	/* Figure out what pg_start means in terms of our large GART pages */
-	start_pg 	= pg_start / I460_KPAGES_PER_CPAGE;
-	start_offset 	= pg_start % I460_KPAGES_PER_CPAGE;
-	end_pg 		= (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE;
-	end_offset 	= (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE;
-
-	if (end_pg > num_entries) {
-		printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
-		return -EINVAL;
-	}
-
-	/* Check if the requested region of the aperture is free */
-	for (pg = start_pg; pg <= end_pg; pg++) {
-		/* Allocate new GART pages if necessary */
-		if (i460_pg_detail[pg] == NULL) {
-			temp = intel_i460_alloc_large_page(pg);
-			if (temp == NULL)
-				return -ENOMEM;
-			agp_bridge.gatt_table[pg] = agp_bridge.mask_memory((unsigned long) temp,
-									   0);
-			intel_i460_read_back(agp_bridge.gatt_table + pg);
-		}
-
-		for (idx = ((pg == start_pg) ? start_offset : 0);
-		     idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE);
-		     idx++)
-		{
-			if (i460_pg_detail[pg][idx] != 0)
-				return -EBUSY;
-		}
-	}
-
-#if 0
-	/* not necessary since 460 GART is operated in coherent mode... */
-	if (mem->is_flushed == FALSE) {
-		CACHE_FLUSH();
-		mem->is_flushed = TRUE;
-	}
-#endif
-
-	for (pg = start_pg, i = 0; pg <= end_pg; pg++) {
-		paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]);
-		for (idx = ((pg == start_pg) ? start_offset : 0);
-		     idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE);
-		     idx++, i++)
-		{
-			mem->memory[i] = paddr + (idx * PAGE_SIZE);
-			i460_pg_detail[pg][idx] = agp_bridge.mask_memory(mem->memory[i],
-									 mem->type);
-			i460_pg_count[pg]++;
-		}
-	}
-
-	return 0;
-}
-
-static int intel_i460_remove_memory_kpc(agp_memory * mem, off_t pg_start, int type)
-{
-	int i, pg, start_pg, end_pg, start_offset, end_offset, idx;
-	int num_entries;
-	void *temp;
-	unsigned long paddr;
-
-	temp = agp_bridge.current_size;
-	num_entries = A_SIZE_8(temp)->num_entries;
-
-	/* Figure out what pg_start means in terms of our large GART pages */
-	start_pg 	= pg_start / I460_KPAGES_PER_CPAGE;
-	start_offset 	= pg_start % I460_KPAGES_PER_CPAGE;
-	end_pg 		= (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE;
-	end_offset 	= (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE;
-
-	for (i = 0, pg = start_pg; pg <= end_pg; pg++) {
-		for (idx = ((pg == start_pg) ? start_offset : 0);
-		    idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE);
-		    idx++, i++)
-		{
-			mem->memory[i] = 0;
-			i460_pg_detail[pg][idx] = 0;
-			i460_pg_count[pg]--;
-		}
-
-		/* Free GART pages if they are unused */
-		if (i460_pg_count[pg] == 0) {
-			paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]);
-			agp_bridge.gatt_table[pg] = agp_bridge.scratch_page;
-			intel_i460_read_back(agp_bridge.gatt_table + pg);
-			intel_i460_free_large_page(pg, paddr);
-		}
-	}
-	return 0;
-}
-
-/* Dummy routines to call the approriate {cpk,kpc} function */
-
-static int intel_i460_insert_memory(agp_memory * mem, off_t pg_start, int type)
-{
-	if (intel_i460_cpk)
-		return intel_i460_insert_memory_cpk(mem, pg_start, type);
-	else
-		return intel_i460_insert_memory_kpc(mem, pg_start, type);
-}
-
-static int intel_i460_remove_memory(agp_memory * mem, off_t pg_start, int type)
-{
-	if (intel_i460_cpk)
-		return intel_i460_remove_memory_cpk(mem, pg_start, type);
-	else
-		return intel_i460_remove_memory_kpc(mem, pg_start, type);
-}
-
-/*
- * If the kernel page size is smaller that the chipset page size, we don't
- * want to allocate memory until we know where it is to be bound in the
- * aperture (a multi-kernel-page alloc might fit inside of an already
- * allocated GART page).  Consequently, don't allocate or free anything
- * if i460_cpk (meaning chipset pages per kernel page) isn't set.
- *
- * Let's just hope nobody counts on the allocated AGP memory being there
- * before bind time (I don't think current drivers do)...
- */
-static void * intel_i460_alloc_page(void)
-{
-	if (intel_i460_cpk)
-		return agp_generic_alloc_page();
-
-	/* Returning NULL would cause problems */
-	/* AK: really dubious code. */
-	return (void *)~0UL;
-}
-
-static void intel_i460_destroy_page(void *page)
-{
-	if (intel_i460_cpk)
-		agp_generic_destroy_page(page);
-}
-
-static struct gatt_mask intel_i460_masks[] =
-{
-	{
-	  mask: INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT,
-	  type: 0
-	}
-};
-
-static unsigned long intel_i460_mask_memory(unsigned long addr, int type)
-{
-	/* Make sure the returned address is a valid GATT entry */
-	return (agp_bridge.masks[0].mask
-		| (((addr & ~((1 << intel_i460_pageshift) - 1)) & 0xffffff000) >> 12));
-}
-
-static unsigned long intel_i460_unmask_memory(unsigned long addr)
-{
-	/* Turn a GATT entry into a physical address */
-	return ((addr & 0xffffff) << 12);
-}
-
-static struct aper_size_info_8 intel_i460_sizes[3] =
-{
-	/*
-	 * The 32GB aperture is only available with a 4M GART page size.
-	 * Due to the dynamic GART page size, we can't figure out page_order
-	 * or num_entries until runtime.
-	 */
-	{32768, 0, 0, 4},
-	{1024, 0, 0, 2},
-	{256, 0, 0, 1}
-};
-
-int __init intel_i460_setup (struct pci_dev *pdev __attribute__((unused)))
-{
-	agp_bridge.masks = intel_i460_masks;
-	agp_bridge.aperture_sizes = (void *) intel_i460_sizes;
-	agp_bridge.size_type = U8_APER_SIZE;
-	agp_bridge.num_aperture_sizes = 3;
-	agp_bridge.dev_private_data = NULL;
-	agp_bridge.needs_scratch_page = FALSE;
-	agp_bridge.configure = intel_i460_configure;
-	agp_bridge.fetch_size = intel_i460_fetch_size;
-	agp_bridge.cleanup = intel_i460_cleanup;
-	agp_bridge.tlb_flush = intel_i460_tlb_flush;
-	agp_bridge.mask_memory = intel_i460_mask_memory;
-	agp_bridge.unmask_memory = intel_i460_unmask_memory;
-	agp_bridge.agp_enable = agp_generic_agp_enable;
-	agp_bridge.cache_flush = global_cache_flush;
-	agp_bridge.create_gatt_table = intel_i460_create_gatt_table;
-	agp_bridge.free_gatt_table = intel_i460_free_gatt_table;
-	agp_bridge.insert_memory = intel_i460_insert_memory;
-	agp_bridge.remove_memory = intel_i460_remove_memory;
-	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-	agp_bridge.free_by_type = agp_generic_free_by_type;
-	agp_bridge.agp_alloc_page = intel_i460_alloc_page;
-	agp_bridge.agp_destroy_page = intel_i460_destroy_page;
-	agp_bridge.suspend = agp_generic_suspend;
-	agp_bridge.resume = agp_generic_resume;
-	agp_bridge.cant_use_aperture = 1;
-	return 0;
-}
-
diff -Nru a/drivers/char/agp/agpgart_be-i810.c b/drivers/char/agp/agpgart_be-i810.c
--- a/drivers/char/agp/agpgart_be-i810.c	Wed Jul 17 11:29:31 2002
+++ /dev/null	Wed Dec 31 16:00:00 1969
@@ -1,594 +0,0 @@
-/*
- * AGPGART module version 0.99
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * TODO: 
- * - Allocate more than order 0 pages to avoid too much linear map splitting.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/agp_backend.h>
-#include "agp.h"
-
-static struct aper_size_info_fixed intel_i810_sizes[] =
-{
-	{64, 16384, 4},
-     /* The 32M mode still requires a 64k gatt */
-	{32, 8192, 4}
-};
-
-#define AGP_DCACHE_MEMORY 1
-#define AGP_PHYS_MEMORY   2
-
-static struct gatt_mask intel_i810_masks[] =
-{
-	{mask: I810_PTE_VALID, type: 0},
-	{mask: (I810_PTE_VALID | I810_PTE_LOCAL), type: AGP_DCACHE_MEMORY},
-	{mask: I810_PTE_VALID, type: 0}
-};
-
-static struct _intel_i810_private {
-	struct pci_dev *i810_dev;	/* device one */
-	volatile u8 *registers;
-	int num_dcache_entries;
-} intel_i810_private;
-
-static int intel_i810_fetch_size(void)
-{
-	u32 smram_miscc;
-	struct aper_size_info_fixed *values;
-
-	pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc);
-	values = A_SIZE_FIX(agp_bridge.aperture_sizes);
-
-	if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
-		printk(KERN_WARNING PFX "i810 is disabled\n");
-		return 0;
-	}
-	if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
-		agp_bridge.previous_size =
-		    agp_bridge.current_size = (void *) (values + 1);
-		agp_bridge.aperture_size_idx = 1;
-		return values[1].size;
-	} else {
-		agp_bridge.previous_size =
-		    agp_bridge.current_size = (void *) (values);
-		agp_bridge.aperture_size_idx = 0;
-		return values[0].size;
-	}
-
-	return 0;
-}
-
-static int intel_i810_configure(void)
-{
-	struct aper_size_info_fixed *current_size;
-	u32 temp;
-	int i;
-
-	current_size = A_SIZE_FIX(agp_bridge.current_size);
-
-	pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
-	temp &= 0xfff80000;
-
-	intel_i810_private.registers =
-	    (volatile u8 *) ioremap(temp, 128 * 4096);
-
-	if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL)
-	     & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
-		/* This will need to be dynamically assigned */
-		printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n");
-		intel_i810_private.num_dcache_entries = 1024;
-	}
-	pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-	OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL,
-		 agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED);
-	CACHE_FLUSH();
-
-	if (agp_bridge.needs_scratch_page == TRUE) {
-		for (i = 0; i < current_size->num_entries; i++) {
-			OUTREG32(intel_i810_private.registers,
-				 I810_PTE_BASE + (i * 4),
-				 agp_bridge.scratch_page);
-		}
-	}
-	return 0;
-}
-
-static void intel_i810_cleanup(void)
-{
-	OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0);
-	iounmap((void *) intel_i810_private.registers);
-}
-
-static void intel_i810_tlbflush(agp_memory * mem)
-{
-	return;
-}
-
-static void intel_i810_agp_enable(u32 mode)
-{
-	return;
-}
-
-static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start,
-				     int type)
-{
-	int i, j, num_entries;
-	void *temp;
-
-	temp = agp_bridge.current_size;
-	num_entries = A_SIZE_FIX(temp)->num_entries;
-
-	if ((pg_start + mem->page_count) > num_entries) {
-		return -EINVAL;
-	}
-	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
-		if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
-			return -EBUSY;
-		}
-	}
-
-	if (type != 0 || mem->type != 0) {
-		if ((type == AGP_DCACHE_MEMORY) &&
-		    (mem->type == AGP_DCACHE_MEMORY)) {
-			/* special insert */
-			CACHE_FLUSH();
-			for (i = pg_start;
-			     i < (pg_start + mem->page_count); i++) {
-				OUTREG32(intel_i810_private.registers,
-					 I810_PTE_BASE + (i * 4),
-					 (i * 4096) | I810_PTE_LOCAL |
-					 I810_PTE_VALID);
-			}
-			CACHE_FLUSH();
-			agp_bridge.tlb_flush(mem);
-			return 0;
-		}
-	        if((type == AGP_PHYS_MEMORY) &&
-		   (mem->type == AGP_PHYS_MEMORY)) {
-		   goto insert;
-		}
-		return -EINVAL;
-	}
-
-insert:
-   	CACHE_FLUSH();
-	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-		OUTREG32(intel_i810_private.registers,
-			 I810_PTE_BASE + (j * 4), mem->memory[i]);
-	}
-	CACHE_FLUSH();
-
-	agp_bridge.tlb_flush(mem);
-	return 0;
-}
-
-static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start,
-				     int type)
-{
-	int i;
-
-	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
-		OUTREG32(intel_i810_private.registers,
-			 I810_PTE_BASE + (i * 4),
-			 agp_bridge.scratch_page);
-	}
-
-	CACHE_FLUSH();
-	agp_bridge.tlb_flush(mem);
-	return 0;
-}
-
-static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
-{
-	agp_memory *new;
-
-	if (type == AGP_DCACHE_MEMORY) {
-		if (pg_count != intel_i810_private.num_dcache_entries) {
-			return NULL;
-		}
-		new = agp_create_memory(1);
-
-		if (new == NULL) {
-			return NULL;
-		}
-		new->type = AGP_DCACHE_MEMORY;
-		new->page_count = pg_count;
-		new->num_scratch_pages = 0;
-		vfree(new->memory);
-	   	MOD_INC_USE_COUNT;
-		return new;
-	}
-	if(type == AGP_PHYS_MEMORY) {
-		void *addr;
-		/* The I810 requires a physical address to program
-		 * it's mouse pointer into hardware.  However the
-		 * Xserver still writes to it through the agp
-		 * aperture
-		 */
-	   	if (pg_count != 1) {
-		   	return NULL;
-		}
-	   	new = agp_create_memory(1);
-
-		if (new == NULL) {
-			return NULL;
-		}
-	   	MOD_INC_USE_COUNT;
-		addr = agp_bridge.agp_alloc_page();
-
-		if (addr == NULL) {
-			/* Free this structure */
-			agp_free_memory(new);
-			return NULL;
-		}
-		new->memory[0] = agp_bridge.mask_memory(virt_to_phys(addr), type);
-		new->page_count = 1;
-	   	new->num_scratch_pages = 1;
-	   	new->type = AGP_PHYS_MEMORY;
-	        new->physical = virt_to_phys((void *) new->memory[0]);
-	   	return new;
-	}
-   
-	return NULL;
-}
-
-static void intel_i810_free_by_type(agp_memory * curr)
-{
-	agp_free_key(curr->key);
-   	if(curr->type == AGP_PHYS_MEMORY) {
-	   	agp_bridge.agp_destroy_page(
-				 phys_to_virt(curr->memory[0]));
-		vfree(curr->memory);
-	}
-	kfree(curr);
-   	MOD_DEC_USE_COUNT;
-}
-
-static unsigned long intel_i810_mask_memory(unsigned long addr, int type)
-{
-	/* Type checking must be done elsewhere */
-	return addr | agp_bridge.masks[type].mask;
-}
-
-int __init intel_i810_setup(struct pci_dev *i810_dev)
-{
-	intel_i810_private.i810_dev = i810_dev;
-
-	agp_bridge.masks = intel_i810_masks;
-	agp_bridge.num_of_masks = 2;
-	agp_bridge.aperture_sizes = (void *) intel_i810_sizes;
-	agp_bridge.size_type = FIXED_APER_SIZE;
-	agp_bridge.num_aperture_sizes = 2;
-	agp_bridge.dev_private_data = (void *) &intel_i810_private;
-	agp_bridge.needs_scratch_page = TRUE;
-	agp_bridge.configure = intel_i810_configure;
-	agp_bridge.fetch_size = intel_i810_fetch_size;
-	agp_bridge.cleanup = intel_i810_cleanup;
-	agp_bridge.tlb_flush = intel_i810_tlbflush;
-	agp_bridge.mask_memory = intel_i810_mask_memory;
-	agp_bridge.agp_enable = intel_i810_agp_enable;
-	agp_bridge.cache_flush = global_cache_flush;
-	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-	agp_bridge.insert_memory = intel_i810_insert_entries;
-	agp_bridge.remove_memory = intel_i810_remove_entries;
-	agp_bridge.alloc_by_type = intel_i810_alloc_by_type;
-	agp_bridge.free_by_type = intel_i810_free_by_type;
-	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
-	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-	agp_bridge.suspend = agp_generic_suspend;
-	agp_bridge.resume = agp_generic_resume;
-	agp_bridge.cant_use_aperture = 0;
-
-	return 0;
-}
-
-static struct aper_size_info_fixed intel_i830_sizes[] =
-{
-	{128, 32768, 5},
-	/* The 64M mode still requires a 128k gatt */
-	{64, 16384, 5}
-};
-
-static struct _intel_i830_private {
-	struct pci_dev *i830_dev;   /* device one */
-	volatile u8 *registers;
-	int gtt_entries;
-} intel_i830_private;
-
-static void intel_i830_init_gtt_entries(void)
-{
-	u16 gmch_ctrl;
-	int gtt_entries;
-	u8 rdct;
-	static const int ddt[4] = { 0, 16, 32, 64 };
-
-	pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl);
-
-	switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
-	case I830_GMCH_GMS_STOLEN_512:
-		gtt_entries = KB(512) - KB(132);
-		printk(KERN_INFO PFX "detected %dK stolen memory.\n",gtt_entries / KB(1));
-		break;
-	case I830_GMCH_GMS_STOLEN_1024:
-		gtt_entries = MB(1) - KB(132);
-		printk(KERN_INFO PFX "detected %dK stolen memory.\n",gtt_entries / KB(1));
-		break;
-	case I830_GMCH_GMS_STOLEN_8192:
-		gtt_entries = MB(8) - KB(132);
-		printk(KERN_INFO PFX "detected %dK stolen memory.\n",gtt_entries / KB(1));
-		break;
-	case I830_GMCH_GMS_LOCAL:
-		rdct = INREG8(intel_i830_private.registers,I830_RDRAM_CHANNEL_TYPE);
-		gtt_entries = (I830_RDRAM_ND(rdct) + 1) * MB(ddt[I830_RDRAM_DDT(rdct)]);
-		printk(KERN_INFO PFX "detected %dK local memory.\n",gtt_entries / KB(1));
-		break;
-	default:
-		printk(KERN_INFO PFX "no video memory detected.\n");
-		gtt_entries = 0;
-		break;
-	}
-
-	gtt_entries /= KB(4);
-
-	intel_i830_private.gtt_entries = gtt_entries;
-}
-
-/* The intel i830 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i830_create_gatt_table(void)
-{
-	int page_order;
-	struct aper_size_info_fixed *size;
-	int num_entries;
-	u32 temp;
-
-	size = agp_bridge.current_size;
-	page_order = size->page_order;
-	num_entries = size->num_entries;
-	agp_bridge.gatt_table_real = 0;
-
-	pci_read_config_dword(intel_i830_private.i830_dev,I810_MMADDR,&temp);
-	temp &= 0xfff80000;
-
-	intel_i830_private.registers = (volatile u8 *) ioremap(temp,128 * 4096);
-	if (!intel_i830_private.registers) return (-ENOMEM);
-
-	temp = INREG32(intel_i830_private.registers,I810_PGETBL_CTL) & 0xfffff000;
-	CACHE_FLUSH();
-
-	/* we have to call this as early as possible after the MMIO base address is known */
-	intel_i830_init_gtt_entries();
-
-	agp_bridge.gatt_table = NULL;
-
-	agp_bridge.gatt_bus_addr = temp;
-
-	return(0);
-}
-
-/* Return the gatt table to a sane state. Use the top of stolen
- * memory for the GTT.
- */
-static int intel_i830_free_gatt_table(void)
-{
-	return(0);
-}
-
-static int intel_i830_fetch_size(void)
-{
-	u16 gmch_ctrl;
-	struct aper_size_info_fixed *values;
-
-	pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl);
-	values = A_SIZE_FIX(agp_bridge.aperture_sizes);
-
-	if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
-		agp_bridge.previous_size = agp_bridge.current_size = (void *) values;
-		agp_bridge.aperture_size_idx = 0;
-		return(values[0].size);
-	} else {
-		agp_bridge.previous_size = agp_bridge.current_size = (void *) values;
-		agp_bridge.aperture_size_idx = 1;
-		return(values[1].size);
-	}
-
-	return(0);
-}
-
-static int intel_i830_configure(void)
-{
-	struct aper_size_info_fixed *current_size;
-	u32 temp;
-	u16 gmch_ctrl;
-	int i;
-
-	current_size = A_SIZE_FIX(agp_bridge.current_size);
-
-	pci_read_config_dword(intel_i830_private.i830_dev,I810_GMADDR,&temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-	pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl);
-	gmch_ctrl |= I830_GMCH_ENABLED;
-	pci_write_config_word(agp_bridge.dev,I830_GMCH_CTRL,gmch_ctrl);
-
-	OUTREG32(intel_i830_private.registers,I810_PGETBL_CTL,agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED);
-	CACHE_FLUSH();
-
-	if (agp_bridge.needs_scratch_page == TRUE)
-		for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++)
-			OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge.scratch_page);
-
-	return (0);
-}
-
-static void intel_i830_cleanup(void)
-{
-	iounmap((void *) intel_i830_private.registers);
-}
-
-static int intel_i830_insert_entries(agp_memory *mem,off_t pg_start,int type)
-{
-	int i,j,num_entries;
-	void *temp;
-
-	temp = agp_bridge.current_size;
-	num_entries = A_SIZE_FIX(temp)->num_entries;
-
-	if (pg_start < intel_i830_private.gtt_entries) {
-		printk (KERN_DEBUG "pg_start == 0x%.8lx,intel_i830_private.gtt_entries == 0x%.8x\n",
-				pg_start,intel_i830_private.gtt_entries);
-
-		printk ("Trying to insert into local/stolen memory\n");
-		return (-EINVAL);
-	}
-
-	if ((pg_start + mem->page_count) > num_entries)
-		return (-EINVAL);
-
-	/* The i830 can't check the GTT for entries since its read only,
-	 * depend on the caller to make the correct offset decisions.
-	 */
-
-	if ((type != 0 && type != AGP_PHYS_MEMORY) ||
-		(mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
-		return (-EINVAL);
-
-	CACHE_FLUSH();
-
-	for (i = 0, j = pg_start; i < mem->page_count; i++, j++)
-		OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (j * 4),mem->memory[i]);
-
-	CACHE_FLUSH();
-
-	agp_bridge.tlb_flush(mem);
-
-	return(0);
-}
-
-static int intel_i830_remove_entries(agp_memory *mem,off_t pg_start,int type)
-{
-	int i;
-
-	CACHE_FLUSH ();
-
-	if (pg_start < intel_i830_private.gtt_entries) {
-		printk ("Trying to disable local/stolen memory\n");
-		return (-EINVAL);
-	}
-
-	for (i = pg_start; i < (mem->page_count + pg_start); i++)
-		OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge.scratch_page);
-
-	CACHE_FLUSH();
-
-	agp_bridge.tlb_flush(mem);
-
-	return (0);
-}
-
-static agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type)
-{
-	agp_memory *nw;
-
-	/* always return NULL for now */
-	if (type == AGP_DCACHE_MEMORY) return(NULL);
-
-	if (type == AGP_PHYS_MEMORY) {
-		void *addr;
-
-		/* The i830 requires a physical address to program
-		 * it's mouse pointer into hardware. However the
-		 * Xserver still writes to it through the agp
-		 * aperture
-		 */
-
-		if (pg_count != 1) return(NULL);
-
-		nw = agp_create_memory(1);
-
-		if (nw == NULL) return(NULL);
-
-		MOD_INC_USE_COUNT;
-		addr = agp_bridge.agp_alloc_page();
-		if (addr == NULL) {
-			/* free this structure */
-			agp_free_memory(nw);
-			return(NULL);
-		}
-
-		nw->memory[0] = agp_bridge.mask_memory(virt_to_phys(addr),type);
-		nw->page_count = 1;
-		nw->num_scratch_pages = 1;
-		nw->type = AGP_PHYS_MEMORY;
-		nw->physical = virt_to_phys(addr);
-		return(nw);
-	}
-
-	return(NULL);
-}
-
-int __init intel_i830_setup(struct pci_dev *i830_dev)
-{
-	intel_i830_private.i830_dev = i830_dev;
-
-	agp_bridge.masks = intel_i810_masks;
-	agp_bridge.num_of_masks = 3;
-	agp_bridge.aperture_sizes = (void *) intel_i830_sizes;
-	agp_bridge.size_type = FIXED_APER_SIZE;
-	agp_bridge.num_aperture_sizes = 2;
-
-	agp_bridge.dev_private_data = (void *) &intel_i830_private;
-	agp_bridge.needs_scratch_page = TRUE;
-
-	agp_bridge.configure = intel_i830_configure;
-	agp_bridge.fetch_size = intel_i830_fetch_size;
-	agp_bridge.cleanup = intel_i830_cleanup;
-	agp_bridge.tlb_flush = intel_i810_tlbflush;
-	agp_bridge.mask_memory = intel_i810_mask_memory;
-	agp_bridge.agp_enable = intel_i810_agp_enable;
-	agp_bridge.cache_flush = global_cache_flush;
-
-	agp_bridge.create_gatt_table = intel_i830_create_gatt_table;
-	agp_bridge.free_gatt_table = intel_i830_free_gatt_table;
-
-	agp_bridge.insert_memory = intel_i830_insert_entries;
-	agp_bridge.remove_memory = intel_i830_remove_entries;
-	agp_bridge.alloc_by_type = intel_i830_alloc_by_type;
-	agp_bridge.free_by_type = intel_i810_free_by_type;
-	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
-	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-
-	agp_bridge.suspend = agp_generic_suspend;
-	agp_bridge.resume = agp_generic_resume;
-	agp_bridge.cant_use_aperture = 0;
-
-	return(0);
-}
-
diff -Nru a/drivers/char/agp/agpgart_be-i8x0.c b/drivers/char/agp/agpgart_be-i8x0.c
--- a/drivers/char/agp/agpgart_be-i8x0.c	Wed Jul 17 11:29:31 2002
+++ /dev/null	Wed Dec 31 16:00:00 1969
@@ -1,726 +0,0 @@
-/*
- * AGPGART module version 0.99
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * TODO: 
- * - Allocate more than order 0 pages to avoid too much linear map splitting.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/agp_backend.h>
-#include "agp.h"
-
-
-static int intel_fetch_size(void)
-{
-	int i;
-	u16 temp;
-	struct aper_size_info_16 *values;
-
-	pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp);
-	values = A_SIZE_16(agp_bridge.aperture_sizes);
-
-	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-		if (temp == values[i].size_value) {
-			agp_bridge.previous_size =
-			    agp_bridge.current_size = (void *) (values + i);
-			agp_bridge.aperture_size_idx = i;
-			return values[i].size;
-		}
-	}
-
-	return 0;
-}
-
-static int intel_8xx_fetch_size(void)
-{
-	int i;
-	u8 temp;
-	struct aper_size_info_8 *values;
-
-	pci_read_config_byte(agp_bridge.dev, INTEL_APSIZE, &temp);
-
-	/* Intel 815 chipsets have a _weird_ APSIZE register with only
-	 * one non-reserved bit, so mask the others out ... */
-	if (agp_bridge.type == INTEL_I815)
-		temp &= (1 << 3);
-
-	values = A_SIZE_8(agp_bridge.aperture_sizes);
-
-	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-		if (temp == values[i].size_value) {
-			agp_bridge.previous_size =
-				agp_bridge.current_size = (void *) (values + i);
-			agp_bridge.aperture_size_idx = i;
-			return values[i].size;
-		}
-	}
-	return 0;
-}
-
-
-static void intel_tlbflush(agp_memory * mem)
-{
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200);
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
-}
-
-
-static void intel_8xx_tlbflush(agp_memory * mem)
-{
-  u32 temp;
-  pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp);
-  pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp & ~(1 << 7));
-  pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp);
-  pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp | (1 << 7));
-}
-
-
-static void intel_cleanup(void)
-{
-	u16 temp;
-	struct aper_size_info_16 *previous_size;
-
-	previous_size = A_SIZE_16(agp_bridge.previous_size);
-	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
-	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
-	pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
-			      previous_size->size_value);
-}
-
-
-static void intel_8xx_cleanup(void)
-{
-	u16 temp;
-	struct aper_size_info_8 *previous_size;
-
-	previous_size = A_SIZE_8(agp_bridge.previous_size);
-	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
-	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
-	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
-			      previous_size->size_value);
-}
-
-
-static int intel_configure(void)
-{
-	u32 temp;
-	u16 temp2;
-	struct aper_size_info_16 *current_size;
-
-	current_size = A_SIZE_16(agp_bridge.current_size);
-
-	/* aperture size */
-	pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
-			      current_size->size_value);
-
-	/* address to map to */
-	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-	/* attbase - aperture base */
-	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
-			       agp_bridge.gatt_bus_addr);
-
-	/* agpctrl */
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
-
-	/* paccfg/nbxcfg */
-	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
-	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
-			      (temp2 & ~(1 << 10)) | (1 << 9));
-	/* clear any possible error conditions */
-	pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7);
-	return 0;
-}
-
-static int intel_815_configure(void)
-{
-	u32 temp, addr;
-	u8 temp2;
-	struct aper_size_info_8 *current_size;
-
-	current_size = A_SIZE_8(agp_bridge.current_size);
-
-	/* aperture size */
-	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
-			current_size->size_value); 
-
-	/* address to map to */
-	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-	/* attbase - aperture base */
-	/* the Intel 815 chipset spec. says that bits 29-31 in the
-	* ATTBASE register are reserved -> try not to write them */
-	if (agp_bridge.gatt_bus_addr &  INTEL_815_ATTBASE_MASK)
-		panic("gatt bus addr too high");
-	pci_read_config_dword(agp_bridge.dev, INTEL_ATTBASE, &addr);
-	addr &= INTEL_815_ATTBASE_MASK;
-	addr |= agp_bridge.gatt_bus_addr;
-	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, addr);
-
-	/* agpctrl */
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
-
-	/* apcont */
-	pci_read_config_byte(agp_bridge.dev, INTEL_815_APCONT, &temp2);
-	pci_write_config_byte(agp_bridge.dev, INTEL_815_APCONT, temp2 | (1 << 1));
-
-	/* clear any possible error conditions */
-	/* Oddness : this chipset seems to have no ERRSTS register ! */
-	return 0;
-}
-
-static void intel_820_tlbflush(agp_memory * mem)
-{
-  return;
-}
-
-static void intel_820_cleanup(void)
-{
-	u8 temp;
-	struct aper_size_info_8 *previous_size;
-
-	previous_size = A_SIZE_8(agp_bridge.previous_size);
-	pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp);
-	pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, 
-			      temp & ~(1 << 1));
-	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
-			      previous_size->size_value);
-}
-
-
-static int intel_820_configure(void)
-{
-	u32 temp;
- 	u8 temp2; 
-	struct aper_size_info_8 *current_size;
-
-	current_size = A_SIZE_8(agp_bridge.current_size);
-
-	/* aperture size */
-	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
-			      current_size->size_value); 
-
-	/* address to map to */
-	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-	/* attbase - aperture base */
-	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
-			       agp_bridge.gatt_bus_addr); 
-
-	/* agpctrl */
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
-
-	/* global enable aperture access */
-	/* This flag is not accessed through MCHCFG register as in */
-	/* i850 chipset. */
-	pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp2);
-	pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, 
-			      temp2 | (1 << 1));
-	/* clear any possible AGP-related error conditions */
-	pci_write_config_word(agp_bridge.dev, INTEL_I820_ERRSTS, 0x001c); 
-	return 0;
-}
-
-static int intel_840_configure(void)
-{
-	u32 temp;
-	u16 temp2;
-	struct aper_size_info_8 *current_size;
-
-	current_size = A_SIZE_8(agp_bridge.current_size);
-
-	/* aperture size */
-	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
-			      current_size->size_value); 
-
-	/* address to map to */
-	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-	/* attbase - aperture base */
-	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
-			       agp_bridge.gatt_bus_addr); 
-
-	/* agpctrl */
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
-
-	/* mcgcfg */
-	pci_read_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, &temp2);
-	pci_write_config_word(agp_bridge.dev, INTEL_I840_MCHCFG,
-			      temp2 | (1 << 9));
-	/* clear any possible error conditions */
-	pci_write_config_word(agp_bridge.dev, INTEL_I840_ERRSTS, 0xc000); 
-	return 0;
-}
-
-static int intel_845_configure(void)
-{
-	u32 temp;
-	u8 temp2;
-	struct aper_size_info_8 *current_size;
-
-	current_size = A_SIZE_8(agp_bridge.current_size);
-
-	/* aperture size */
-	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
-			      current_size->size_value); 
-
-	/* address to map to */
-	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-	/* attbase - aperture base */
-	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
-			       agp_bridge.gatt_bus_addr); 
-
-	/* agpctrl */
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
-
-	/* agpm */
-	pci_read_config_byte(agp_bridge.dev, INTEL_I845_AGPM, &temp2);
-	pci_write_config_byte(agp_bridge.dev, INTEL_I845_AGPM,
-			      temp2 | (1 << 1));
-	/* clear any possible error conditions */
-	pci_write_config_word(agp_bridge.dev, INTEL_I845_ERRSTS, 0x001c); 
-	return 0;
-}
-
-static int intel_850_configure(void)
-{
-	u32 temp;
-	u16 temp2;
-	struct aper_size_info_8 *current_size;
-
-	current_size = A_SIZE_8(agp_bridge.current_size);
-
-	/* aperture size */
-	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
-			      current_size->size_value); 
-
-	/* address to map to */
-	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-	/* attbase - aperture base */
-	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
-			       agp_bridge.gatt_bus_addr); 
-
-	/* agpctrl */
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
-
-	/* mcgcfg */
-	pci_read_config_word(agp_bridge.dev, INTEL_I850_MCHCFG, &temp2);
-	pci_write_config_word(agp_bridge.dev, INTEL_I850_MCHCFG,
-			      temp2 | (1 << 9));
-	/* clear any possible AGP-related error conditions */
-	pci_write_config_word(agp_bridge.dev, INTEL_I850_ERRSTS, 0x001c); 
-	return 0;
-}
-
-static int intel_860_configure(void)
-{
-	u32 temp;
-	u16 temp2;
-	struct aper_size_info_8 *current_size;
-
-	current_size = A_SIZE_8(agp_bridge.current_size);
-
-	/* aperture size */
-	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
-			      current_size->size_value);
-
-	/* address to map to */
-	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-	/* attbase - aperture base */
-	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
-			       agp_bridge.gatt_bus_addr);
-
-	/* agpctrl */
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
-
-	/* mcgcfg */
-	pci_read_config_word(agp_bridge.dev, INTEL_I860_MCHCFG, &temp2);
-	pci_write_config_word(agp_bridge.dev, INTEL_I860_MCHCFG,
-			      temp2 | (1 << 9));
-	/* clear any possible AGP-related error conditions */
-	pci_write_config_word(agp_bridge.dev, INTEL_I860_ERRSTS, 0xf700);
-	return 0;
-}
-
-static int intel_830mp_configure(void)
-{
-	u32 temp;
-	u16 temp2;
-	struct aper_size_info_8 *current_size;
-
-	current_size = A_SIZE_8(agp_bridge.current_size);
-
-	/* aperture size */
-	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
-			      current_size->size_value);
-
-	/* address to map to */
-	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-	/* attbase - aperture base */
-	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
-			       agp_bridge.gatt_bus_addr);
-
-	/* agpctrl */
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
-
-	/* gmch */
-	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
-	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
-			      temp2 | (1 << 9));
-	/* clear any possible AGP-related error conditions */
-	pci_write_config_word(agp_bridge.dev, INTEL_I830_ERRSTS, 0x1c);
-	return 0;
-}
-
-static unsigned long intel_mask_memory(unsigned long addr, int type)
-{
-	/* Memory type is ignored */
-
-	return addr | agp_bridge.masks[0].mask;
-}
-
-static void intel_resume(void)
-{
-	intel_configure();
-}
-
-/* Setup function */
-static struct gatt_mask intel_generic_masks[] =
-{
-	{mask: 0x00000017, type: 0}
-};
-
-static struct aper_size_info_8 intel_815_sizes[2] =
-{
-	{64, 16384, 4, 0},
-	{32, 8192, 3, 8},
-};
-	
-static struct aper_size_info_8 intel_8xx_sizes[7] =
-{
-	{256, 65536, 6, 0},
-	{128, 32768, 5, 32},
-	{64, 16384, 4, 48},
-	{32, 8192, 3, 56},
-	{16, 4096, 2, 60},
-	{8, 2048, 1, 62},
-	{4, 1024, 0, 63}
-};
-
-static struct aper_size_info_16 intel_generic_sizes[7] =
-{
-	{256, 65536, 6, 0},
-	{128, 32768, 5, 32},
-	{64, 16384, 4, 48},
-	{32, 8192, 3, 56},
-	{16, 4096, 2, 60},
-	{8, 2048, 1, 62},
-	{4, 1024, 0, 63}
-};
-
-static struct aper_size_info_8 intel_830mp_sizes[4] = 
-{
-  {256, 65536, 6, 0},
-  {128, 32768, 5, 32},
-  {64, 16384, 4, 48},
-  {32, 8192, 3, 56}
-};
-
-int __init intel_generic_setup (struct pci_dev *pdev)
-{
-	agp_bridge.masks = intel_generic_masks;
-	agp_bridge.num_of_masks = 1;
-	agp_bridge.aperture_sizes = (void *) intel_generic_sizes;
-	agp_bridge.size_type = U16_APER_SIZE;
-	agp_bridge.num_aperture_sizes = 7;
-	agp_bridge.dev_private_data = NULL;
-	agp_bridge.needs_scratch_page = FALSE;
-	agp_bridge.configure = intel_configure;
-	agp_bridge.fetch_size = intel_fetch_size;
-	agp_bridge.cleanup = intel_cleanup;
-	agp_bridge.tlb_flush = intel_tlbflush;
-	agp_bridge.mask_memory = intel_mask_memory;
-	agp_bridge.agp_enable = agp_generic_agp_enable;
-	agp_bridge.cache_flush = global_cache_flush;
-	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-	agp_bridge.insert_memory = agp_generic_insert_memory;
-	agp_bridge.remove_memory = agp_generic_remove_memory;
-	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-	agp_bridge.free_by_type = agp_generic_free_by_type;
-	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
-	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-	agp_bridge.suspend = agp_generic_suspend;
-	agp_bridge.resume = intel_resume;
-	agp_bridge.cant_use_aperture = 0;
-
-	return 0;
-	
-	(void) pdev; /* unused */
-}
-
-int __init intel_815_setup (struct pci_dev *pdev)
-{
-	agp_bridge.masks = intel_generic_masks;
-	agp_bridge.num_of_masks = 1;
-	agp_bridge.aperture_sizes = (void *) intel_815_sizes;
-	agp_bridge.size_type = U8_APER_SIZE;
-	agp_bridge.num_aperture_sizes = 2;
-	agp_bridge.dev_private_data = NULL;
-	agp_bridge.needs_scratch_page = FALSE;
-	agp_bridge.configure = intel_815_configure;
-	agp_bridge.fetch_size = intel_8xx_fetch_size;
-	agp_bridge.cleanup = intel_8xx_cleanup;
-	agp_bridge.tlb_flush = intel_8xx_tlbflush;
-	agp_bridge.mask_memory = intel_mask_memory;
-	agp_bridge.agp_enable = agp_generic_agp_enable;
-	agp_bridge.cache_flush = global_cache_flush;
-	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-	agp_bridge.insert_memory = agp_generic_insert_memory;
-	agp_bridge.remove_memory = agp_generic_remove_memory;
-	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-	agp_bridge.free_by_type = agp_generic_free_by_type;
-	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
-	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-	agp_bridge.suspend = agp_generic_suspend;
-	agp_bridge.resume = agp_generic_resume;
-	agp_bridge.cant_use_aperture = 0;
-
-	return 0;
-}
-
-
-int __init intel_820_setup (struct pci_dev *pdev)
-{
-       agp_bridge.masks = intel_generic_masks;
-       agp_bridge.num_of_masks = 1;
-       agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
-       agp_bridge.size_type = U8_APER_SIZE;
-       agp_bridge.num_aperture_sizes = 7;
-       agp_bridge.dev_private_data = NULL;
-       agp_bridge.needs_scratch_page = FALSE;
-       agp_bridge.configure = intel_820_configure;
-       agp_bridge.fetch_size = intel_8xx_fetch_size;
-       agp_bridge.cleanup = intel_820_cleanup;
-       agp_bridge.tlb_flush = intel_820_tlbflush;
-       agp_bridge.mask_memory = intel_mask_memory;
-       agp_bridge.agp_enable = agp_generic_agp_enable;
-       agp_bridge.cache_flush = global_cache_flush;
-       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-       agp_bridge.insert_memory = agp_generic_insert_memory;
-       agp_bridge.remove_memory = agp_generic_remove_memory;
-       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-       agp_bridge.free_by_type = agp_generic_free_by_type;
-       agp_bridge.agp_alloc_page = agp_generic_alloc_page;
-       agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-       agp_bridge.suspend = agp_generic_suspend;
-       agp_bridge.resume = agp_generic_resume;
-       agp_bridge.cant_use_aperture = 0;
-
-       return 0;
-
-       (void) pdev; /* unused */
-}
-
-int __init intel_830mp_setup (struct pci_dev *pdev)
-{
-       agp_bridge.masks = intel_generic_masks;
-       agp_bridge.num_of_masks = 1;
-       agp_bridge.aperture_sizes = (void *) intel_830mp_sizes;
-       agp_bridge.size_type = U8_APER_SIZE;
-       agp_bridge.num_aperture_sizes = 4;
-       agp_bridge.dev_private_data = NULL;
-       agp_bridge.needs_scratch_page = FALSE;
-       agp_bridge.configure = intel_830mp_configure;
-       agp_bridge.fetch_size = intel_8xx_fetch_size;
-       agp_bridge.cleanup = intel_8xx_cleanup;
-       agp_bridge.tlb_flush = intel_8xx_tlbflush;
-       agp_bridge.mask_memory = intel_mask_memory;
-       agp_bridge.agp_enable = agp_generic_agp_enable;
-       agp_bridge.cache_flush = global_cache_flush;
-       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-       agp_bridge.insert_memory = agp_generic_insert_memory;
-       agp_bridge.remove_memory = agp_generic_remove_memory;
-       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-       agp_bridge.free_by_type = agp_generic_free_by_type;
-       agp_bridge.agp_alloc_page = agp_generic_alloc_page;
-       agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-       agp_bridge.suspend = agp_generic_suspend;
-       agp_bridge.resume = agp_generic_resume;
-       agp_bridge.cant_use_aperture = 0;
-
-       return 0;
-
-       (void) pdev; /* unused */
-}
-
-int __init intel_840_setup (struct pci_dev *pdev)
-{
-	agp_bridge.masks = intel_generic_masks;
-	agp_bridge.num_of_masks = 1;
-	agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
-	agp_bridge.size_type = U8_APER_SIZE;
-	agp_bridge.num_aperture_sizes = 7;
-	agp_bridge.dev_private_data = NULL;
-	agp_bridge.needs_scratch_page = FALSE;
-	agp_bridge.configure = intel_840_configure;
-	agp_bridge.fetch_size = intel_8xx_fetch_size;
-	agp_bridge.cleanup = intel_8xx_cleanup;
-	agp_bridge.tlb_flush = intel_8xx_tlbflush;
-	agp_bridge.mask_memory = intel_mask_memory;
-	agp_bridge.agp_enable = agp_generic_agp_enable;
-	agp_bridge.cache_flush = global_cache_flush;
-	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-	agp_bridge.insert_memory = agp_generic_insert_memory;
-	agp_bridge.remove_memory = agp_generic_remove_memory;
-	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-	agp_bridge.free_by_type = agp_generic_free_by_type;
-	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
-	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-	agp_bridge.suspend = agp_generic_suspend;
-	agp_bridge.resume = agp_generic_resume;
-	agp_bridge.cant_use_aperture = 0;
-
-	return 0;
-	
-	(void) pdev; /* unused */
-}
-
-int __init intel_845_setup (struct pci_dev *pdev)
-{
-	agp_bridge.masks = intel_generic_masks;
-	agp_bridge.num_of_masks = 1;
-	agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
-	agp_bridge.size_type = U8_APER_SIZE;
-	agp_bridge.num_aperture_sizes = 7;
-	agp_bridge.dev_private_data = NULL;
-	agp_bridge.needs_scratch_page = FALSE;
-	agp_bridge.configure = intel_845_configure;
-	agp_bridge.fetch_size = intel_8xx_fetch_size;
-	agp_bridge.cleanup = intel_8xx_cleanup;
-	agp_bridge.tlb_flush = intel_8xx_tlbflush;
-	agp_bridge.mask_memory = intel_mask_memory;
-	agp_bridge.agp_enable = agp_generic_agp_enable;
-	agp_bridge.cache_flush = global_cache_flush;
-	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-	agp_bridge.insert_memory = agp_generic_insert_memory;
-	agp_bridge.remove_memory = agp_generic_remove_memory;
-	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-	agp_bridge.free_by_type = agp_generic_free_by_type;
-	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
-	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-	agp_bridge.suspend = agp_generic_suspend;
-	agp_bridge.resume = agp_generic_resume;
-	agp_bridge.cant_use_aperture = 0;
-
-	return 0;
-	
-	(void) pdev; /* unused */
-}
-
-int __init intel_850_setup (struct pci_dev *pdev)
-{
-	agp_bridge.masks = intel_generic_masks;
-	agp_bridge.num_of_masks = 1;
-	agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
-	agp_bridge.size_type = U8_APER_SIZE;
-	agp_bridge.num_aperture_sizes = 7;
-	agp_bridge.dev_private_data = NULL;
-	agp_bridge.needs_scratch_page = FALSE;
-	agp_bridge.configure = intel_850_configure;
-	agp_bridge.fetch_size = intel_8xx_fetch_size;
-	agp_bridge.cleanup = intel_8xx_cleanup;
-	agp_bridge.tlb_flush = intel_8xx_tlbflush;
-	agp_bridge.mask_memory = intel_mask_memory;
-	agp_bridge.agp_enable = agp_generic_agp_enable;
-	agp_bridge.cache_flush = global_cache_flush;
-	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-	agp_bridge.insert_memory = agp_generic_insert_memory;
-	agp_bridge.remove_memory = agp_generic_remove_memory;
-	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-	agp_bridge.free_by_type = agp_generic_free_by_type;
-	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
-	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-	agp_bridge.suspend = agp_generic_suspend;
-	agp_bridge.resume = agp_generic_resume;
-	agp_bridge.cant_use_aperture = 0;
-
-	return 0;
-	
-	(void) pdev; /* unused */
-}
-
-int __init intel_860_setup (struct pci_dev *pdev)
-{
-	agp_bridge.masks = intel_generic_masks;
-	agp_bridge.num_of_masks = 1;
-	agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
-	agp_bridge.size_type = U8_APER_SIZE;
-	agp_bridge.num_aperture_sizes = 7;
-	agp_bridge.dev_private_data = NULL;
-	agp_bridge.needs_scratch_page = FALSE;
-	agp_bridge.configure = intel_860_configure;
-	agp_bridge.fetch_size = intel_8xx_fetch_size;
-	agp_bridge.cleanup = intel_8xx_cleanup;
-	agp_bridge.tlb_flush = intel_8xx_tlbflush;
-	agp_bridge.mask_memory = intel_mask_memory;
-	agp_bridge.agp_enable = agp_generic_agp_enable;
-	agp_bridge.cache_flush = global_cache_flush;
-	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-	agp_bridge.insert_memory = agp_generic_insert_memory;
-	agp_bridge.remove_memory = agp_generic_remove_memory;
-	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-	agp_bridge.free_by_type = agp_generic_free_by_type;
-	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
-	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-	agp_bridge.suspend = agp_generic_suspend;
-	agp_bridge.resume = agp_generic_resume;
-	agp_bridge.cant_use_aperture = 0;
-
-	return 0;
-
-	(void) pdev; /* unused */
-}
-
diff -Nru a/drivers/char/agp/agpgart_be-sis.c b/drivers/char/agp/agpgart_be-sis.c
--- a/drivers/char/agp/agpgart_be-sis.c	Wed Jul 17 11:29:31 2002
+++ /dev/null	Wed Dec 31 16:00:00 1969
@@ -1,142 +0,0 @@
-/*
- * AGPGART module version 0.99
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * TODO: 
- * - Allocate more than order 0 pages to avoid too much linear map splitting.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/agp_backend.h>
-#include "agp.h"
-
-static int sis_fetch_size(void)
-{
-	u8 temp_size;
-	int i;
-	struct aper_size_info_8 *values;
-
-	pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size);
-	values = A_SIZE_8(agp_bridge.aperture_sizes);
-	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-		if ((temp_size == values[i].size_value) ||
-		    ((temp_size & ~(0x03)) ==
-		     (values[i].size_value & ~(0x03)))) {
-			agp_bridge.previous_size =
-			    agp_bridge.current_size = (void *) (values + i);
-
-			agp_bridge.aperture_size_idx = i;
-			return values[i].size;
-		}
-	}
-
-	return 0;
-}
-
-
-static void sis_tlbflush(agp_memory * mem)
-{
-	pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02);
-}
-
-static int sis_configure(void)
-{
-	u32 temp;
-	struct aper_size_info_8 *current_size;
-
-	current_size = A_SIZE_8(agp_bridge.current_size);
-	pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05);
-	pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-	pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE,
-			       agp_bridge.gatt_bus_addr);
-	pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
-			      current_size->size_value);
-	return 0;
-}
-
-static void sis_cleanup(void)
-{
-	struct aper_size_info_8 *previous_size;
-
-	previous_size = A_SIZE_8(agp_bridge.previous_size);
-	pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
-			      (previous_size->size_value & ~(0x03)));
-}
-
-static unsigned long sis_mask_memory(unsigned long addr, int type)
-{
-	/* Memory type is ignored */
-
-	return addr | agp_bridge.masks[0].mask;
-}
-
-static struct aper_size_info_8 sis_generic_sizes[7] =
-{
-	{256, 65536, 6, 99},
-	{128, 32768, 5, 83},
-	{64, 16384, 4, 67},
-	{32, 8192, 3, 51},
-	{16, 4096, 2, 35},
-	{8, 2048, 1, 19},
-	{4, 1024, 0, 3}
-};
-
-static struct gatt_mask sis_generic_masks[] =
-{
-	{mask: 0x00000000, type: 0}
-};
-
-int __init sis_generic_setup (struct pci_dev *pdev)
-{
-	agp_bridge.masks = sis_generic_masks;
-	agp_bridge.num_of_masks = 1;
-	agp_bridge.aperture_sizes = (void *) sis_generic_sizes;
-	agp_bridge.size_type = U8_APER_SIZE;
-	agp_bridge.num_aperture_sizes = 7;
-	agp_bridge.dev_private_data = NULL;
-	agp_bridge.needs_scratch_page = FALSE;
-	agp_bridge.configure = sis_configure;
-	agp_bridge.fetch_size = sis_fetch_size;
-	agp_bridge.cleanup = sis_cleanup;
-	agp_bridge.tlb_flush = sis_tlbflush;
-	agp_bridge.mask_memory = sis_mask_memory;
-	agp_bridge.agp_enable = agp_generic_agp_enable;
-	agp_bridge.cache_flush = global_cache_flush;
-	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-	agp_bridge.insert_memory = agp_generic_insert_memory;
-	agp_bridge.remove_memory = agp_generic_remove_memory;
-	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-	agp_bridge.free_by_type = agp_generic_free_by_type;
-	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
-	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-	agp_bridge.suspend = agp_generic_suspend;
-	agp_bridge.resume = agp_generic_resume;
-	agp_bridge.cant_use_aperture = 0;
-
-	return 0;
-}
-
diff -Nru a/drivers/char/agp/agpgart_be-sworks.c b/drivers/char/agp/agpgart_be-sworks.c
--- a/drivers/char/agp/agpgart_be-sworks.c	Wed Jul 17 11:29:31 2002
+++ /dev/null	Wed Dec 31 16:00:00 1969
@@ -1,626 +0,0 @@
-/*
- * AGPGART module version 0.99
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * TODO: 
- * - Allocate more than order 0 pages to avoid too much linear map splitting.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/agp_backend.h>
-#include "agp.h"
-
-struct serverworks_page_map {
-	unsigned long *real;
-	unsigned long *remapped;
-};
-
-static struct _serverworks_private {
-	struct pci_dev *svrwrks_dev;	/* device one */
-	volatile u8 *registers;
-	struct serverworks_page_map **gatt_pages;
-	int num_tables;
-	struct serverworks_page_map scratch_dir;
-
-	int gart_addr_ofs;
-	int mm_addr_ofs;
-} serverworks_private;
-
-static int serverworks_create_page_map(struct serverworks_page_map *page_map)
-{
-	int i;
-
-	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
-	if (page_map->real == NULL) {
-		return -ENOMEM;
-	}
-	SetPageReserved(virt_to_page(page_map->real));
-	CACHE_FLUSH();
-	page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), 
-					    PAGE_SIZE);
-	if (page_map->remapped == NULL) {
-		ClearPageReserved(virt_to_page(page_map->real));
-		free_page((unsigned long) page_map->real);
-		page_map->real = NULL;
-		return -ENOMEM;
-	}
-	CACHE_FLUSH();
-
-	for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
-		page_map->remapped[i] = agp_bridge.scratch_page;
-	}
-
-	return 0;
-}
-
-static void serverworks_free_page_map(struct serverworks_page_map *page_map)
-{
-	iounmap(page_map->remapped);
-	ClearPageReserved(virt_to_page(page_map->real));
-	free_page((unsigned long) page_map->real);
-}
-
-static void serverworks_free_gatt_pages(void)
-{
-	int i;
-	struct serverworks_page_map **tables;
-	struct serverworks_page_map *entry;
-
-	tables = serverworks_private.gatt_pages;
-	for(i = 0; i < serverworks_private.num_tables; i++) {
-		entry = tables[i];
-		if (entry != NULL) {
-			if (entry->real != NULL) {
-				serverworks_free_page_map(entry);
-			}
-			kfree(entry);
-		}
-	}
-	kfree(tables);
-}
-
-static int serverworks_create_gatt_pages(int nr_tables)
-{
-	struct serverworks_page_map **tables;
-	struct serverworks_page_map *entry;
-	int retval = 0;
-	int i;
-
-	tables = kmalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *), 
-			 GFP_KERNEL);
-	if (tables == NULL) {
-		return -ENOMEM;
-	}
-	memset(tables, 0, sizeof(struct serverworks_page_map *) * (nr_tables + 1));
-	for (i = 0; i < nr_tables; i++) {
-		entry = kmalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
-		if (entry == NULL) {
-			retval = -ENOMEM;
-			break;
-		}
-		memset(entry, 0, sizeof(struct serverworks_page_map));
-		tables[i] = entry;
-		retval = serverworks_create_page_map(entry);
-		if (retval != 0) break;
-	}
-	serverworks_private.num_tables = nr_tables;
-	serverworks_private.gatt_pages = tables;
-
-	if (retval != 0) serverworks_free_gatt_pages();
-
-	return retval;
-}
-
-#define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
-	GET_PAGE_DIR_IDX(addr)]->remapped)
-
-#ifndef GET_PAGE_DIR_OFF
-#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
-#endif
-
-#ifndef GET_PAGE_DIR_IDX
-#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
-	GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
-#endif
-
-#ifndef GET_GATT_OFF
-#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
-#endif
-
-static int serverworks_create_gatt_table(void)
-{
-	struct aper_size_info_lvl2 *value;
-	struct serverworks_page_map page_dir;
-	int retval;
-	u32 temp;
-	int i;
-
-	value = A_SIZE_LVL2(agp_bridge.current_size);
-	retval = serverworks_create_page_map(&page_dir);
-	if (retval != 0) {
-		return retval;
-	}
-	retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
-	if (retval != 0) {
-		serverworks_free_page_map(&page_dir);
-		return retval;
-	}
-	/* Create a fake scratch directory */
-	for(i = 0; i < 1024; i++) {
-		serverworks_private.scratch_dir.remapped[i] = (unsigned long) agp_bridge.scratch_page;
-		page_dir.remapped[i] =
-			virt_to_phys(serverworks_private.scratch_dir.real);
-		page_dir.remapped[i] |= 0x00000001;
-	}
-
-	retval = serverworks_create_gatt_pages(value->num_entries / 1024);
-	if (retval != 0) {
-		serverworks_free_page_map(&page_dir);
-		serverworks_free_page_map(&serverworks_private.scratch_dir);
-		return retval;
-	}
-
-	agp_bridge.gatt_table_real = page_dir.real;
-	agp_bridge.gatt_table = page_dir.remapped;
-	agp_bridge.gatt_bus_addr = virt_to_phys(page_dir.real);
-
-	/* Get the address for the gart region.
-	 * This is a bus address even on the alpha, b/c its
-	 * used to program the agp master not the cpu
-	 */
-
-	pci_read_config_dword(agp_bridge.dev,
-			      serverworks_private.gart_addr_ofs,
-			      &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-	/* Calculate the agp offset */	
-
-	for(i = 0; i < value->num_entries / 1024; i++) {
-		page_dir.remapped[i] =
-			virt_to_phys(serverworks_private.gatt_pages[i]->real);
-		page_dir.remapped[i] |= 0x00000001;
-	}
-
-	return 0;
-}
-
-static int serverworks_free_gatt_table(void)
-{
-	struct serverworks_page_map page_dir;
-   
-	page_dir.real = agp_bridge.gatt_table_real;
-	page_dir.remapped = agp_bridge.gatt_table;
-
-	serverworks_free_gatt_pages();
-	serverworks_free_page_map(&page_dir);
-	serverworks_free_page_map(&serverworks_private.scratch_dir);
-	return 0;
-}
-
-static int serverworks_fetch_size(void)
-{
-	int i;
-	u32 temp;
-	u32 temp2;
-	struct aper_size_info_lvl2 *values;
-
-	values = A_SIZE_LVL2(agp_bridge.aperture_sizes);
-	pci_read_config_dword(agp_bridge.dev,
-			      serverworks_private.gart_addr_ofs,
-			      &temp);
-	pci_write_config_dword(agp_bridge.dev,
-			       serverworks_private.gart_addr_ofs,
-			       SVWRKS_SIZE_MASK);
-	pci_read_config_dword(agp_bridge.dev,
-			      serverworks_private.gart_addr_ofs,
-			      &temp2);
-	pci_write_config_dword(agp_bridge.dev,
-			       serverworks_private.gart_addr_ofs,
-			       temp);
-	temp2 &= SVWRKS_SIZE_MASK;
-
-	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-		if (temp2 == values[i].size_value) {
-			agp_bridge.previous_size =
-			    agp_bridge.current_size = (void *) (values + i);
-
-			agp_bridge.aperture_size_idx = i;
-			return values[i].size;
-		}
-	}
-
-	return 0;
-}
-
-static int serverworks_configure(void)
-{
-	struct aper_size_info_lvl2 *current_size;
-	u32 temp;
-	u8 enable_reg;
-	u8 cap_ptr;
-	u32 cap_id;
-	u16 cap_reg;
-
-	current_size = A_SIZE_LVL2(agp_bridge.current_size);
-
-	/* Get the memory mapped registers */
-	pci_read_config_dword(agp_bridge.dev,
-			      serverworks_private.mm_addr_ofs,
-			      &temp);
-	temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-	serverworks_private.registers = (volatile u8 *) ioremap(temp, 4096);
-
-	OUTREG8(serverworks_private.registers, SVWRKS_GART_CACHE, 0x0a);
-
-	OUTREG32(serverworks_private.registers, SVWRKS_GATTBASE, 
-		 agp_bridge.gatt_bus_addr);
-
-	cap_reg = INREG16(serverworks_private.registers, SVWRKS_COMMAND);
-	cap_reg &= ~0x0007;
-	cap_reg |= 0x4;
-	OUTREG16(serverworks_private.registers, SVWRKS_COMMAND, cap_reg);
-
-	pci_read_config_byte(serverworks_private.svrwrks_dev,
-			     SVWRKS_AGP_ENABLE, &enable_reg);
-	enable_reg |= 0x1; /* Agp Enable bit */
-	pci_write_config_byte(serverworks_private.svrwrks_dev,
-			      SVWRKS_AGP_ENABLE, enable_reg);
-	agp_bridge.tlb_flush(NULL);
-
-	pci_read_config_byte(serverworks_private.svrwrks_dev, 0x34, &cap_ptr);
-	if (cap_ptr != 0x00) {
-		do {
-			pci_read_config_dword(serverworks_private.svrwrks_dev,
-					      cap_ptr, &cap_id);
-
-			if ((cap_id & 0xff) != 0x02)
-				cap_ptr = (cap_id >> 8) & 0xff;
-		}
-		while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
-	}
-	agp_bridge.capndx = cap_ptr;
-
-	/* Fill in the mode register */
-	pci_read_config_dword(serverworks_private.svrwrks_dev,
-			      agp_bridge.capndx + 4,
-			      &agp_bridge.mode);
-
-	pci_read_config_byte(agp_bridge.dev,
-			     SVWRKS_CACHING,
-			     &enable_reg);
-	enable_reg &= ~0x3;
-	pci_write_config_byte(agp_bridge.dev,
-			      SVWRKS_CACHING,
-			      enable_reg);
-
-	pci_read_config_byte(agp_bridge.dev,
-			     SVWRKS_FEATURE,
-			     &enable_reg);
-	enable_reg |= (1<<6);
-	pci_write_config_byte(agp_bridge.dev,
-			      SVWRKS_FEATURE,
-			      enable_reg);
-
-	return 0;
-}
-
-static void serverworks_cleanup(void)
-{
-	iounmap((void *) serverworks_private.registers);
-}
-
-/*
- * This routine could be implemented by taking the addresses
- * written to the GATT, and flushing them individually.  However
- * currently it just flushes the whole table.  Which is probably
- * more efficent, since agp_memory blocks can be a large number of
- * entries.
- */
-
-static void serverworks_tlbflush(agp_memory * temp)
-{
-	unsigned long end;
-
-	OUTREG8(serverworks_private.registers, SVWRKS_POSTFLUSH, 0x01);
-	end = jiffies + 3*HZ;
-	while(INREG8(serverworks_private.registers, 
-		     SVWRKS_POSTFLUSH) == 0x01) {
-		if((signed)(end - jiffies) <= 0) {
-			printk(KERN_ERR "Posted write buffer flush took more"
-			       "then 3 seconds\n");
-		}
-	}
-	OUTREG32(serverworks_private.registers, SVWRKS_DIRFLUSH, 0x00000001);
-	end = jiffies + 3*HZ;
-	while(INREG32(serverworks_private.registers, 
-		     SVWRKS_DIRFLUSH) == 0x00000001) {
-		if((signed)(end - jiffies) <= 0) {
-			printk(KERN_ERR "TLB flush took more"
-			       "then 3 seconds\n");
-		}
-	}
-}
-
-static unsigned long serverworks_mask_memory(unsigned long addr, int type)
-{
-	/* Only type 0 is supported by the serverworks chipsets */
-
-	return addr | agp_bridge.masks[0].mask;
-}
-
-static int serverworks_insert_memory(agp_memory * mem,
-			     off_t pg_start, int type)
-{
-	int i, j, num_entries;
-	unsigned long *cur_gatt;
-	unsigned long addr;
-
-	num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries;
-
-	if (type != 0 || mem->type != 0) {
-		return -EINVAL;
-	}
-	if ((pg_start + mem->page_count) > num_entries) {
-		return -EINVAL;
-	}
-
-	j = pg_start;
-	while (j < (pg_start + mem->page_count)) {
-		addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
-		cur_gatt = SVRWRKS_GET_GATT(addr);
-		if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) {
-			return -EBUSY;
-		}
-		j++;
-	}
-
-	if (mem->is_flushed == FALSE) {
-		CACHE_FLUSH();
-		mem->is_flushed = TRUE;
-	}
-
-	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-		addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
-		cur_gatt = SVRWRKS_GET_GATT(addr);
-		cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i];
-	}
-	agp_bridge.tlb_flush(mem);
-	return 0;
-}
-
-static int serverworks_remove_memory(agp_memory * mem, off_t pg_start,
-			     int type)
-{
-	int i;
-	unsigned long *cur_gatt;
-	unsigned long addr;
-
-	if (type != 0 || mem->type != 0) {
-		return -EINVAL;
-	}
-
-	CACHE_FLUSH();
-	agp_bridge.tlb_flush(mem);
-
-	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
-		addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr;
-		cur_gatt = SVRWRKS_GET_GATT(addr);
-		cur_gatt[GET_GATT_OFF(addr)] = 
-			(unsigned long) agp_bridge.scratch_page;
-	}
-
-	agp_bridge.tlb_flush(mem);
-	return 0;
-}
-
-static struct gatt_mask serverworks_masks[] =
-{
-	{mask: 0x00000001, type: 0}
-};
-
-static struct aper_size_info_lvl2 serverworks_sizes[7] =
-{
-	{2048, 524288, 0x80000000},
-	{1024, 262144, 0xc0000000},
-	{512, 131072, 0xe0000000},
-	{256, 65536, 0xf0000000},
-	{128, 32768, 0xf8000000},
-	{64, 16384, 0xfc000000},
-	{32, 8192, 0xfe000000}
-};
-
-static void serverworks_agp_enable(u32 mode)
-{
-	struct pci_dev *device = NULL;
-	u32 command, scratch, cap_id;
-	u8 cap_ptr;
-
-	pci_read_config_dword(serverworks_private.svrwrks_dev,
-			      agp_bridge.capndx + 4,
-			      &command);
-
-	/*
-	 * PASS1: go throu all devices that claim to be
-	 *        AGP devices and collect their data.
-	 */
-
-
-	pci_for_each_dev(device) {
-		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
-		if (cap_ptr != 0x00) {
-			do {
-				pci_read_config_dword(device,
-						      cap_ptr, &cap_id);
-
-				if ((cap_id & 0xff) != 0x02)
-					cap_ptr = (cap_id >> 8) & 0xff;
-			}
-			while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
-		}
-		if (cap_ptr != 0x00) {
-			/*
-			 * Ok, here we have a AGP device. Disable impossible 
-			 * settings, and adjust the readqueue to the minimum.
-			 */
-
-			pci_read_config_dword(device, cap_ptr + 4, &scratch);
-
-			/* adjust RQ depth */
-			command =
-			    ((command & ~0xff000000) |
-			     min_t(u32, (mode & 0xff000000),
-				 min_t(u32, (command & 0xff000000),
-				     (scratch & 0xff000000))));
-
-			/* disable SBA if it's not supported */
-			if (!((command & 0x00000200) &&
-			      (scratch & 0x00000200) &&
-			      (mode & 0x00000200)))
-				command &= ~0x00000200;
-
-			/* disable FW */
-			command &= ~0x00000010;
-
-			command &= ~0x00000008;
-
-			if (!((command & 4) &&
-			      (scratch & 4) &&
-			      (mode & 4)))
-				command &= ~0x00000004;
-
-			if (!((command & 2) &&
-			      (scratch & 2) &&
-			      (mode & 2)))
-				command &= ~0x00000002;
-
-			if (!((command & 1) &&
-			      (scratch & 1) &&
-			      (mode & 1)))
-				command &= ~0x00000001;
-		}
-	}
-	/*
-	 * PASS2: Figure out the 4X/2X/1X setting and enable the
-	 *        target (our motherboard chipset).
-	 */
-
-	if (command & 4) {
-		command &= ~3;	/* 4X */
-	}
-	if (command & 2) {
-		command &= ~5;	/* 2X */
-	}
-	if (command & 1) {
-		command &= ~6;	/* 1X */
-	}
-	command |= 0x00000100;
-
-	pci_write_config_dword(serverworks_private.svrwrks_dev,
-			       agp_bridge.capndx + 8,
-			       command);
-
-	/*
-	 * PASS3: Go throu all AGP devices and update the
-	 *        command registers.
-	 */
-
-	pci_for_each_dev(device) {
-		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
-		if (cap_ptr != 0x00)
-			pci_write_config_dword(device, cap_ptr + 8, command);
-	}
-}
-
-int __init serverworks_setup (struct pci_dev *pdev)
-{
-	u32 temp;
-	u32 temp2;
-
-	serverworks_private.svrwrks_dev = pdev;
-
-	agp_bridge.masks = serverworks_masks;
-	agp_bridge.num_of_masks = 1;
-	agp_bridge.aperture_sizes = (void *) serverworks_sizes;
-	agp_bridge.size_type = LVL2_APER_SIZE;
-	agp_bridge.num_aperture_sizes = 7;
-	agp_bridge.dev_private_data = (void *) &serverworks_private;
-	agp_bridge.needs_scratch_page = TRUE;
-	agp_bridge.configure = serverworks_configure;
-	agp_bridge.fetch_size = serverworks_fetch_size;
-	agp_bridge.cleanup = serverworks_cleanup;
-	agp_bridge.tlb_flush = serverworks_tlbflush;
-	agp_bridge.mask_memory = serverworks_mask_memory;
-	agp_bridge.agp_enable = serverworks_agp_enable;
-	agp_bridge.cache_flush = global_cache_flush;
-	agp_bridge.create_gatt_table = serverworks_create_gatt_table;
-	agp_bridge.free_gatt_table = serverworks_free_gatt_table;
-	agp_bridge.insert_memory = serverworks_insert_memory;
-	agp_bridge.remove_memory = serverworks_remove_memory;
-	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-	agp_bridge.free_by_type = agp_generic_free_by_type;
-	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
-	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-	agp_bridge.suspend = agp_generic_suspend;
-	agp_bridge.resume = agp_generic_resume;
-	agp_bridge.cant_use_aperture = 0;
-
-	pci_read_config_dword(agp_bridge.dev,
-			      SVWRKS_APSIZE,
-			      &temp);
-
-	serverworks_private.gart_addr_ofs = 0x10;
-
-	if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
-		pci_read_config_dword(agp_bridge.dev,
-				      SVWRKS_APSIZE + 4,
-				      &temp2);
-		if(temp2 != 0) {
-			printk("Detected 64 bit aperture address, but top "
-			       "bits are not zero.  Disabling agp\n");
-			return -ENODEV;
-		}
-		serverworks_private.mm_addr_ofs = 0x18;
-	} else {
-		serverworks_private.mm_addr_ofs = 0x14;
-	}
-
-	pci_read_config_dword(agp_bridge.dev,
-			      serverworks_private.mm_addr_ofs,
-			      &temp);
-	if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
-		pci_read_config_dword(agp_bridge.dev,
-				      serverworks_private.mm_addr_ofs + 4,
-				      &temp2);
-		if(temp2 != 0) {
-			printk("Detected 64 bit MMIO address, but top "
-			       "bits are not zero.  Disabling agp\n");
-			return -ENODEV;
-		}
-	}
-
-	return 0;
-}
-
diff -Nru a/drivers/char/agp/agpgart_be-via.c b/drivers/char/agp/agpgart_be-via.c
--- a/drivers/char/agp/agpgart_be-via.c	Wed Jul 17 11:29:31 2002
+++ /dev/null	Wed Dec 31 16:00:00 1969
@@ -1,151 +0,0 @@
-/*
- * AGPGART module version 0.99
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * TODO: 
- * - Allocate more than order 0 pages to avoid too much linear map splitting.
- */
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/agp_backend.h>
-#include "agp.h"
-
-
-static int via_fetch_size(void)
-{
-	int i;
-	u8 temp;
-	struct aper_size_info_8 *values;
-
-	values = A_SIZE_8(agp_bridge.aperture_sizes);
-	pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp);
-	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-		if (temp == values[i].size_value) {
-			agp_bridge.previous_size =
-			    agp_bridge.current_size = (void *) (values + i);
-			agp_bridge.aperture_size_idx = i;
-			return values[i].size;
-		}
-	}
-
-	return 0;
-}
-
-static int via_configure(void)
-{
-	u32 temp;
-	struct aper_size_info_8 *current_size;
-
-	current_size = A_SIZE_8(agp_bridge.current_size);
-	/* aperture size */
-	pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
-			      current_size->size_value);
-	/* address to map too */
-	pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-	/* GART control register */
-	pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
-
-	/* attbase - aperture GATT base */
-	pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE,
-			    (agp_bridge.gatt_bus_addr & 0xfffff000) | 3);
-	return 0;
-}
-
-static void via_cleanup(void)
-{
-	struct aper_size_info_8 *previous_size;
-
-	previous_size = A_SIZE_8(agp_bridge.previous_size);
-	pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
-			      previous_size->size_value);
-	/* Do not disable by writing 0 to VIA_ATTBASE, it screws things up
-	 * during reinitialization.
-	 */
-}
-
-static void via_tlbflush(agp_memory * mem)
-{
-	pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f);
-	pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
-}
-
-static unsigned long via_mask_memory(unsigned long addr, int type)
-{
-	/* Memory type is ignored */
-
-	return addr | agp_bridge.masks[0].mask;
-}
-
-static struct aper_size_info_8 via_generic_sizes[7] =
-{
-	{256, 65536, 6, 0},
-	{128, 32768, 5, 128},
-	{64, 16384, 4, 192},
-	{32, 8192, 3, 224},
-	{16, 4096, 2, 240},
-	{8, 2048, 1, 248},
-	{4, 1024, 0, 252}
-};
-
-static struct gatt_mask via_generic_masks[] =
-{
-	{mask: 0x00000000, type: 0}
-};
-
-int __init via_generic_setup (struct pci_dev *pdev)
-{
-	agp_bridge.masks = via_generic_masks;
-	agp_bridge.num_of_masks = 1;
-	agp_bridge.aperture_sizes = (void *) via_generic_sizes;
-	agp_bridge.size_type = U8_APER_SIZE;
-	agp_bridge.num_aperture_sizes = 7;
-	agp_bridge.dev_private_data = NULL;
-	agp_bridge.needs_scratch_page = FALSE;
-	agp_bridge.configure = via_configure;
-	agp_bridge.fetch_size = via_fetch_size;
-	agp_bridge.cleanup = via_cleanup;
-	agp_bridge.tlb_flush = via_tlbflush;
-	agp_bridge.mask_memory = via_mask_memory;
-	agp_bridge.agp_enable = agp_generic_agp_enable;
-	agp_bridge.cache_flush = global_cache_flush;
-	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
-	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
-	agp_bridge.insert_memory = agp_generic_insert_memory;
-	agp_bridge.remove_memory = agp_generic_remove_memory;
-	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
-	agp_bridge.free_by_type = agp_generic_free_by_type;
-	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
-	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-	agp_bridge.suspend = agp_generic_suspend;
-	agp_bridge.resume = agp_generic_resume;
-	agp_bridge.cant_use_aperture = 0;
-
-	return 0;
-	
-	(void) pdev; /* unused */
-}
diff -Nru a/drivers/char/agp/agpgart_be.c b/drivers/char/agp/agpgart_be.c
--- a/drivers/char/agp/agpgart_be.c	Wed Jul 17 11:29:31 2002
+++ /dev/null	Wed Dec 31 16:00:00 1969
@@ -1,1662 +0,0 @@
-/*
- * AGPGART module version 0.99
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * TODO: 
- * - Allocate more than order 0 pages to avoid too much linear map splitting.
- */
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/pagemap.h>
-#include <linux/miscdevice.h>
-#include <linux/pm.h>
-#include <linux/agp_backend.h>
-#include "agp.h"
-
-MODULE_AUTHOR("Jeff Hartmann <jhartmann@precisioninsight.com>");
-MODULE_PARM(agp_try_unsupported, "1i");
-MODULE_LICENSE("GPL and additional rights");
-EXPORT_SYMBOL(agp_free_memory);
-EXPORT_SYMBOL(agp_allocate_memory);
-EXPORT_SYMBOL(agp_copy_info);
-EXPORT_SYMBOL(agp_bind_memory);
-EXPORT_SYMBOL(agp_unbind_memory);
-EXPORT_SYMBOL(agp_enable);
-EXPORT_SYMBOL(agp_backend_acquire);
-EXPORT_SYMBOL(agp_backend_release);
-
-struct agp_bridge_data agp_bridge = { type: NOT_SUPPORTED };
-static int agp_try_unsupported __initdata = 0;
-
-int agp_backend_acquire(void)
-{
-	if (agp_bridge.type == NOT_SUPPORTED)
-		return -EINVAL;
-
-	atomic_inc(&agp_bridge.agp_in_use);
-
-	if (atomic_read(&agp_bridge.agp_in_use) != 1) {
-		atomic_dec(&agp_bridge.agp_in_use);
-		return -EBUSY;
-	}
-	MOD_INC_USE_COUNT;
-	return 0;
-}
-
-void agp_backend_release(void)
-{
-	if (agp_bridge.type == NOT_SUPPORTED)
-		return;
-
-	atomic_dec(&agp_bridge.agp_in_use);
-	MOD_DEC_USE_COUNT;
-}
-
-/* 
- * Generic routines for handling agp_memory structures -
- * They use the basic page allocation routines to do the
- * brunt of the work.
- */
-
-
-void agp_free_key(int key)
-{
-
-	if (key < 0)
-		return;
-
-	if (key < MAXKEY)
-		clear_bit(key, agp_bridge.key_list);
-}
-
-static int agp_get_key(void)
-{
-	int bit;
-
-	bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY);
-	if (bit < MAXKEY) {
-		set_bit(bit, agp_bridge.key_list);
-		return bit;
-	}
-	return -1;
-}
-
-agp_memory *agp_create_memory(int scratch_pages)
-{
-	agp_memory *new;
-
-	new = kmalloc(sizeof(agp_memory), GFP_KERNEL);
-
-	if (new == NULL)
-		return NULL;
-
-	memset(new, 0, sizeof(agp_memory));
-	new->key = agp_get_key();
-
-	if (new->key < 0) {
-		kfree(new);
-		return NULL;
-	}
-	new->memory = vmalloc(PAGE_SIZE * scratch_pages);
-
-	if (new->memory == NULL) {
-		agp_free_key(new->key);
-		kfree(new);
-		return NULL;
-	}
-	new->num_scratch_pages = scratch_pages;
-	return new;
-}
-
-void agp_free_memory(agp_memory * curr)
-{
-	int i;
-
-	if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL))
-		return;
-
-	if (curr->is_bound == TRUE)
-		agp_unbind_memory(curr);
-
-	if (curr->type != 0) {
-		agp_bridge.free_by_type(curr);
-		return;
-	}
-	if (curr->page_count != 0) {
-		for (i = 0; i < curr->page_count; i++) {
-			curr->memory[i] &= ~(0x00000fff);
-			agp_bridge.agp_destroy_page(phys_to_virt(curr->memory[i]));
-		}
-	}
-	agp_free_key(curr->key);
-	vfree(curr->memory);
-	kfree(curr);
-	MOD_DEC_USE_COUNT;
-}
-
-#define ENTRIES_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
-
-agp_memory *agp_allocate_memory(size_t page_count, u32 type)
-{
-	int scratch_pages;
-	agp_memory *new;
-	int i;
-
-	if (agp_bridge.type == NOT_SUPPORTED)
-		return NULL;
-
-	if ((atomic_read(&agp_bridge.current_memory_agp) + page_count) >
-	    agp_bridge.max_memory_agp) {
-		return NULL;
-	}
-
-	if (type != 0) {
-		new = agp_bridge.alloc_by_type(page_count, type);
-		return new;
-	}
-      	/* We always increase the module count, since free auto-decrements
-	 * it
-	 */
-
-	MOD_INC_USE_COUNT;
-
-	scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
-
-	new = agp_create_memory(scratch_pages);
-
-	if (new == NULL) {
-		MOD_DEC_USE_COUNT;
-		return NULL;
-	}
-
-	for (i = 0; i < page_count; i++) {
-		void *addr = agp_bridge.agp_alloc_page();
-
-		if (addr == NULL) {
-			/* Free this structure */
-			agp_free_memory(new);
-			return NULL;
-		}
-		new->memory[i] = agp_bridge.mask_memory(virt_to_phys(addr), type);
-		new->page_count++;
-	}
-
-	flush_agp_mappings();
-
-	return new;
-}
-
-/* End - Generic routines for handling agp_memory structures */
-
-static int agp_return_size(void)
-{
-	int current_size;
-	void *temp;
-
-	temp = agp_bridge.current_size;
-
-	switch (agp_bridge.size_type) {
-	case U8_APER_SIZE:
-		current_size = A_SIZE_8(temp)->size;
-		break;
-	case U16_APER_SIZE:
-		current_size = A_SIZE_16(temp)->size;
-		break;
-	case U32_APER_SIZE:
-		current_size = A_SIZE_32(temp)->size;
-		break;
-	case LVL2_APER_SIZE:
-		current_size = A_SIZE_LVL2(temp)->size;
-		break;
-	case FIXED_APER_SIZE:
-		current_size = A_SIZE_FIX(temp)->size;
-		break;
-	default:
-		current_size = 0;
-		break;
-	}
-
-	return current_size;
-}
-
-/* Routine to copy over information structure */
-
-void agp_copy_info(agp_kern_info * info)
-{
-	unsigned long page_mask = 0;
-	int i;
-
-	memset(info, 0, sizeof(agp_kern_info));
-	if (agp_bridge.type == NOT_SUPPORTED) {
-		info->chipset = agp_bridge.type;
-		return;
-	}
-	info->version.major = agp_bridge.version->major;
-	info->version.minor = agp_bridge.version->minor;
-	info->device = agp_bridge.dev;
-	info->chipset = agp_bridge.type;
-	info->mode = agp_bridge.mode;
-	info->aper_base = agp_bridge.gart_bus_addr;
-	info->aper_size = agp_return_size();
-	info->max_memory = agp_bridge.max_memory_agp;
-	info->current_memory = atomic_read(&agp_bridge.current_memory_agp);
-	info->cant_use_aperture = agp_bridge.cant_use_aperture;
-
-	for(i = 0; i < agp_bridge.num_of_masks; i++)
-		page_mask |= agp_bridge.mask_memory(page_mask, i);
-
-	info->page_mask = ~page_mask;
-}
-
-/* End - Routine to copy over information structure */
-
-/*
- * Routines for handling swapping of agp_memory into the GATT -
- * These routines take agp_memory and insert them into the GATT.
- * They call device specific routines to actually write to the GATT.
- */
-
-int agp_bind_memory(agp_memory * curr, off_t pg_start)
-{
-	int ret_val;
-
-	if ((agp_bridge.type == NOT_SUPPORTED) ||
-	    (curr == NULL) || (curr->is_bound == TRUE)) {
-		return -EINVAL;
-	}
-	if (curr->is_flushed == FALSE) {
-		CACHE_FLUSH();
-		curr->is_flushed = TRUE;
-	}
-	ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type);
-
-	if (ret_val != 0)
-		return ret_val;
-
-	curr->is_bound = TRUE;
-	curr->pg_start = pg_start;
-	return 0;
-}
-
-int agp_unbind_memory(agp_memory * curr)
-{
-	int ret_val;
-
-	if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL))
-		return -EINVAL;
-
-	if (curr->is_bound != TRUE)
-		return -EINVAL;
-
-	ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type);
-
-	if (ret_val != 0)
-		return ret_val;
-
-	curr->is_bound = FALSE;
-	curr->pg_start = 0;
-	return 0;
-}
-
-/* End - Routines for handling swapping of agp_memory into the GATT */
-
-/* 
- * Driver routines - start
- * Currently this module supports the following chipsets:
- * i810, i815, 440lx, 440bx, 440gx, i830, i840, i845, i850, i860, via vp3,
- * via mvp3, via kx133, via kt133, amd irongate, amd 761, amd 762, ALi M1541,
- * and generic support for the SiS chipsets.
- */
-
-/* Generic Agp routines - Start */
-
-void agp_generic_agp_enable(u32 mode)
-{
-	struct pci_dev *device = NULL;
-	u32 command, scratch; 
-	u8 cap_ptr;
-
-	pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 4, &command);
-
-	/*
-	 * PASS1: go throu all devices that claim to be
-	 *        AGP devices and collect their data.
-	 */
-
-
-	pci_for_each_dev(device) {
-		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
-		if (cap_ptr != 0x00) {
-			/*
-			 * Ok, here we have a AGP device. Disable impossible 
-			 * settings, and adjust the readqueue to the minimum.
-			 */
-
-			pci_read_config_dword(device, cap_ptr + 4, &scratch);
-
-			/* adjust RQ depth */
-			command = ((command & ~0xff000000) |
-			     min_t(u32, (mode & 0xff000000),
-				 min_t(u32, (command & 0xff000000),
-				     (scratch & 0xff000000))));
-
-			/* disable SBA if it's not supported */
-			if (!((command & 0x00000200) &&
-			      (scratch & 0x00000200) &&
-			      (mode & 0x00000200)))
-				command &= ~0x00000200;
-
-			/* disable FW if it's not supported */
-			if (!((command & 0x00000010) &&
-			      (scratch & 0x00000010) &&
-			      (mode & 0x00000010)))
-				command &= ~0x00000010;
-
-			if (!((command & 4) &&
-			      (scratch & 4) &&
-			      (mode & 4)))
-				command &= ~0x00000004;
-
-			if (!((command & 2) &&
-			      (scratch & 2) &&
-			      (mode & 2)))
-				command &= ~0x00000002;
-
-			if (!((command & 1) &&
-			      (scratch & 1) &&
-			      (mode & 1)))
-				command &= ~0x00000001;
-		}
-	}
-	/*
-	 * PASS2: Figure out the 4X/2X/1X setting and enable the
-	 *        target (our motherboard chipset).
-	 */
-
-	if (command & 4)
-		command &= ~3;	/* 4X */
-
-	if (command & 2)
-		command &= ~5;	/* 2X */
-
-	if (command & 1)
-		command &= ~6;	/* 1X */
-
-	command |= 0x00000100;
-
-	pci_write_config_dword(agp_bridge.dev,
-			       agp_bridge.capndx + 8,
-			       command);
-
-	/*
-	 * PASS3: Go throu all AGP devices and update the
-	 *        command registers.
-	 */
-
-	pci_for_each_dev(device) {
-		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
-		if (cap_ptr != 0x00)
-			pci_write_config_dword(device, cap_ptr + 8, command);
-	}
-}
-
-int agp_generic_create_gatt_table(void)
-{
-	char *table;
-	char *table_end;
-	int size;
-	int page_order;
-	int num_entries;
-	int i;
-	void *temp;
-	struct page *page;
-
-	/* The generic routines can't handle 2 level gatt's */
-	if (agp_bridge.size_type == LVL2_APER_SIZE) {
-		return -EINVAL;
-	}
-
-	table = NULL;
-	i = agp_bridge.aperture_size_idx;
-	temp = agp_bridge.current_size;
-	size = page_order = num_entries = 0;
-
-	if (agp_bridge.size_type != FIXED_APER_SIZE) {
-		do {
-			switch (agp_bridge.size_type) {
-			case U8_APER_SIZE:
-				size = A_SIZE_8(temp)->size;
-				page_order =
-				    A_SIZE_8(temp)->page_order;
-				num_entries =
-				    A_SIZE_8(temp)->num_entries;
-				break;
-			case U16_APER_SIZE:
-				size = A_SIZE_16(temp)->size;
-				page_order = A_SIZE_16(temp)->page_order;
-				num_entries = A_SIZE_16(temp)->num_entries;
-				break;
-			case U32_APER_SIZE:
-				size = A_SIZE_32(temp)->size;
-				page_order = A_SIZE_32(temp)->page_order;
-				num_entries = A_SIZE_32(temp)->num_entries;
-				break;
-				/* This case will never really happen. */
-			case FIXED_APER_SIZE:
-			case LVL2_APER_SIZE:
-			default:
-				size = page_order = num_entries = 0;
-				break;
-			}
-
-			table = (char *) __get_free_pages(GFP_KERNEL,
-							  page_order);
-
-			if (table == NULL) {
-				i++;
-				switch (agp_bridge.size_type) {
-				case U8_APER_SIZE:
-					agp_bridge.current_size = A_IDX8();
-					break;
-				case U16_APER_SIZE:
-					agp_bridge.current_size = A_IDX16();
-					break;
-				case U32_APER_SIZE:
-					agp_bridge.current_size = A_IDX32();
-					break;
-					/* This case will never really 
-					 * happen. 
-					 */
-				case FIXED_APER_SIZE:
-				case LVL2_APER_SIZE:
-				default:
-					agp_bridge.current_size =
-					    agp_bridge.current_size;
-					break;
-				}
-				temp = agp_bridge.current_size;	
-			} else {
-				agp_bridge.aperture_size_idx = i;
-			}
-		} while ((table == NULL) &&
-			 (i < agp_bridge.num_aperture_sizes));
-	} else {
-		size = ((struct aper_size_info_fixed *) temp)->size;
-		page_order = ((struct aper_size_info_fixed *) temp)->page_order;
-		num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
-		table = (char *) __get_free_pages(GFP_KERNEL, page_order);
-	}
-
-	if (table == NULL)
-		return -ENOMEM;
-
-	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
-
-	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
-		SetPageReserved(page);
-
-	agp_bridge.gatt_table_real = (unsigned long *) table;
-	CACHE_FLUSH();
-	agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
-					(PAGE_SIZE * (1 << page_order)));
-	CACHE_FLUSH();
-
-	if (agp_bridge.gatt_table == NULL) {
-		for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
-			ClearPageReserved(page);
-
-		free_pages((unsigned long) table, page_order);
-
-		return -ENOMEM;
-	}
-	agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real);
-
-	for (i = 0; i < num_entries; i++)
-		agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page;
-
-	return 0;
-}
-
-int agp_generic_suspend(void)
-{
-	return 0;
-}
-
-void agp_generic_resume(void)
-{
-	return;
-}
-
-int agp_generic_free_gatt_table(void)
-{
-	int page_order;
-	char *table, *table_end;
-	void *temp;
-	struct page *page;
-
-	temp = agp_bridge.current_size;
-
-	switch (agp_bridge.size_type) {
-	case U8_APER_SIZE:
-		page_order = A_SIZE_8(temp)->page_order;
-		break;
-	case U16_APER_SIZE:
-		page_order = A_SIZE_16(temp)->page_order;
-		break;
-	case U32_APER_SIZE:
-		page_order = A_SIZE_32(temp)->page_order;
-		break;
-	case FIXED_APER_SIZE:
-		page_order = A_SIZE_FIX(temp)->page_order;
-		break;
-	case LVL2_APER_SIZE:
-		/* The generic routines can't deal with 2 level gatt's */
-		return -EINVAL;
-		break;
-	default:
-		page_order = 0;
-		break;
-	}
-
-	/* Do not worry about freeing memory, because if this is
-	 * called, then all agp memory is deallocated and removed
-	 * from the table.
-	 */
-
-	iounmap(agp_bridge.gatt_table);
-	table = (char *) agp_bridge.gatt_table_real;
-	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
-
-	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
-		ClearPageReserved(page);
-
-	free_pages((unsigned long) agp_bridge.gatt_table_real, page_order);
-	return 0;
-}
-
-int agp_generic_insert_memory(agp_memory * mem, off_t pg_start, int type)
-{
-	int i, j, num_entries;
-	void *temp;
-
-	temp = agp_bridge.current_size;
-
-	switch (agp_bridge.size_type) {
-	case U8_APER_SIZE:
-		num_entries = A_SIZE_8(temp)->num_entries;
-		break;
-	case U16_APER_SIZE:
-		num_entries = A_SIZE_16(temp)->num_entries;
-		break;
-	case U32_APER_SIZE:
-		num_entries = A_SIZE_32(temp)->num_entries;
-		break;
-	case FIXED_APER_SIZE:
-		num_entries = A_SIZE_FIX(temp)->num_entries;
-		break;
-	case LVL2_APER_SIZE:
-		/* The generic routines can't deal with 2 level gatt's */
-		return -EINVAL;
-		break;
-	default:
-		num_entries = 0;
-		break;
-	}
-
-	if (type != 0 || mem->type != 0) {
-		/* The generic routines know nothing of memory types */
-		return -EINVAL;
-	}
-
-	if ((pg_start + mem->page_count) > num_entries)
-		return -EINVAL;
-
-	j = pg_start;
-
-	while (j < (pg_start + mem->page_count)) {
-		if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
-			return -EBUSY;
-		}
-		j++;
-	}
-
-	if (mem->is_flushed == FALSE) {
-		CACHE_FLUSH();
-		mem->is_flushed = TRUE;
-	}
-
-	for (i = 0, j = pg_start; i < mem->page_count; i++, j++)
-		agp_bridge.gatt_table[j] = mem->memory[i];
-
-	agp_bridge.tlb_flush(mem);
-	return 0;
-}
-
-int agp_generic_remove_memory(agp_memory * mem, off_t pg_start, int type)
-{
-	int i;
-
-	if (type != 0 || mem->type != 0) {
-		/* The generic routines know nothing of memory types */
-		return -EINVAL;
-	}
-	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
-		agp_bridge.gatt_table[i] =
-		    (unsigned long) agp_bridge.scratch_page;
-	}
-
-	agp_bridge.tlb_flush(mem);
-	return 0;
-}
-
-agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
-{
-	return NULL;
-}
-
-void agp_generic_free_by_type(agp_memory * curr)
-{
-	if (curr->memory != NULL)
-		vfree(curr->memory);
-
-	agp_free_key(curr->key);
-	kfree(curr);
-}
-
-/* 
- * Basic Page Allocation Routines -
- * These routines handle page allocation
- * and by default they reserve the allocated 
- * memory.  They also handle incrementing the
- * current_memory_agp value, Which is checked
- * against a maximum value.
- */
-
-void *agp_generic_alloc_page(void)
-{
-	struct page * page;
-	
-	page = alloc_page(GFP_KERNEL);
-	if (page == NULL)
-		return 0;
-
-	map_page_into_agp(page);
-
-	get_page(page);
-	SetPageLocked(page);
-	atomic_inc(&agp_bridge.current_memory_agp);
-	return page_address(page);
-}
-
-void agp_generic_destroy_page(void *addr)
-{
-	struct page *page;
-
-	if (addr == NULL)
-		return;
-
-	page = virt_to_page(addr);
-	unmap_page_from_agp(page);
-	put_page(page);
-	unlock_page(page);
-	free_page((unsigned long)addr);
-	atomic_dec(&agp_bridge.current_memory_agp);
-}
-
-/* End Basic Page Allocation Routines */
-
-void agp_enable(u32 mode)
-{
-	if (agp_bridge.type == NOT_SUPPORTED)
-		return;
-	agp_bridge.agp_enable(mode);
-}
-
-/* End - Generic Agp routines */
-
-
-/* per-chipset initialization data.
- * note -- all chipsets for a single vendor MUST be grouped together
- */
-static struct {
-	unsigned short device_id; /* first, to make table easier to read */
-	unsigned short vendor_id;
-	enum chipset_type chipset;
-	const char *vendor_name;
-	const char *chipset_name;
-	int (*chipset_setup) (struct pci_dev *pdev);
-} agp_bridge_info[] __initdata = {
-
-#ifdef CONFIG_AGP_ALI
-	{
-		device_id:	PCI_DEVICE_ID_AL_M1541_0,
-		vendor_id:	PCI_VENDOR_ID_AL,
-		chipset:	ALI_M1541,
-		vendor_name:	"Ali",
-		chipset_name:	"M1541",
-		chipset_setup:	ali_generic_setup,
-	},
-	{
-		device_id:	PCI_DEVICE_ID_AL_M1621_0,
-		vendor_id:	PCI_VENDOR_ID_AL,
-		chipset:	ALI_M1621,
-		vendor_name:	"Ali",
-		chipset_name:	"M1621",
-		chipset_setup:	ali_generic_setup,
-	},
-	{
-		device_id:	PCI_DEVICE_ID_AL_M1631_0,
-		vendor_id:	PCI_VENDOR_ID_AL,
-		chipset:	ALI_M1631,
-		vendor_name:	"Ali",
-		chipset_name:	"M1631",
-		chipset_setup:	ali_generic_setup,
-	},
-	{
-		device_id:	PCI_DEVICE_ID_AL_M1632_0,
-		vendor_id:	PCI_VENDOR_ID_AL,
-		chipset:	ALI_M1632,
-		vendor_name:	"Ali",
-		chipset_name:	"M1632",
-		chipset_setup:	ali_generic_setup,
-	},
-	{
-		device_id:	PCI_DEVICE_ID_AL_M1641_0,
-		vendor_id:	PCI_VENDOR_ID_AL,
-		chipset:	ALI_M1641,
-		vendor_name:	"Ali",
-		chipset_name:	"M1641",
-		chipset_setup:	ali_generic_setup,
-	},
-	{
-		device_id:	PCI_DEVICE_ID_AL_M1644_0,
-		vendor_id:	PCI_VENDOR_ID_AL,
-		chipset:	ALI_M1644,
-		vendor_name:	"Ali",
-		chipset_name:	"M1644",
-		chipset_setup:	ali_generic_setup,
-	},
-	{
-		device_id:	PCI_DEVICE_ID_AL_M1647_0,
-		vendor_id:	PCI_VENDOR_ID_AL,
-		chipset:	ALI_M1647,
-		vendor_name:	"Ali",
-		chipset_name:	"M1647",
-		chipset_setup:	ali_generic_setup,
-	},
-	{
-		device_id:	PCI_DEVICE_ID_AL_M1651_0,
-		vendor_id:	PCI_VENDOR_ID_AL,
-		chipset:	ALI_M1651,
-		vendor_name:	"Ali",
-		chipset_name:	"M1651",
-		chipset_setup:	ali_generic_setup,
-	},
-	{
-		device_id:	0,
-		vendor_id:	PCI_VENDOR_ID_AL,
-		chipset:	ALI_GENERIC,
-		vendor_name:	"Ali",
-		chipset_name:	"Generic",
-		chipset_setup:	ali_generic_setup,
-	},
-#endif /* CONFIG_AGP_ALI */
-
-#ifdef CONFIG_AGP_AMD
-	{
-		device_id:	PCI_DEVICE_ID_AMD_IRONGATE_0,
-		vendor_id:	PCI_VENDOR_ID_AMD,
-		chipset:	AMD_IRONGATE,
-		vendor_name:	"AMD",
-		chipset_name:	"Irongate",
-		chipset_setup:	amd_irongate_setup,
-	},
-	{
-		device_id:	PCI_DEVICE_ID_AMD_761_0,
-		vendor_id:	PCI_VENDOR_ID_AMD,
-		chipset:	AMD_761,
-		vendor_name:	"AMD",
-		chipset_name:	"761",
-		chipset_setup:	amd_irongate_setup,
-	},
-	{
-		device_id:	PCI_DEVICE_ID_AMD_762_0,
-		vendor_id:	PCI_VENDOR_ID_AMD,
-		chipset:	AMD_762,
-		vendor_name:	"AMD",
-		chipset_name:	"760MP",
-		chipset_setup:	amd_irongate_setup,
-	},
-	{
-		device_id:	0,
-		vendor_id:	PCI_VENDOR_ID_AMD,
-		chipset:	AMD_GENERIC,
-		vendor_name:	"AMD",
-		chipset_name:	"Generic",
-		chipset_setup:	amd_irongate_setup,
-	},
-#endif /* CONFIG_AGP_AMD */
-
-#ifdef CONFIG_AGP_INTEL
-	{
-		device_id:	PCI_DEVICE_ID_INTEL_82443LX_0,
-		vendor_id:	PCI_VENDOR_ID_INTEL,
-		chipset:	INTEL_LX,
-		vendor_name:	"Intel",
-		chipset_name:	"440LX",
-		chipset_setup:	intel_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_INTEL_82443BX_0,
-		vendor_id:	PCI_VENDOR_ID_INTEL,
-		chipset:	INTEL_BX,
-		vendor_name:	"Intel",
-		chipset_name:	"440BX",
-		chipset_setup:	intel_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_INTEL_82443GX_0,
-		vendor_id:	PCI_VENDOR_ID_INTEL,
-		chipset:	INTEL_GX,
-		vendor_name:	"Intel",
-		chipset_name:	"440GX",
-		chipset_setup:	intel_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_INTEL_815_0,
-		vendor_id:	PCI_VENDOR_ID_INTEL,
-		chipset:	INTEL_I815,
-		vendor_name:	"Intel",
-		chipset_name:	"i815",
-		chipset_setup:	intel_815_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_INTEL_820_0,
-		vendor_id:	PCI_VENDOR_ID_INTEL,
-		chipset:	INTEL_I820,
-		vendor_name:	"Intel",
-		chipset_name:	"i820",
-		chipset_setup:	intel_820_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_INTEL_820_UP_0,
-		vendor_id:	PCI_VENDOR_ID_INTEL,
-		chipset:	INTEL_I820,
-		vendor_name:	"Intel",
-		chipset_name:	"i820",
-		chipset_setup:	intel_820_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_INTEL_830_M_0,
-		vendor_id:	PCI_VENDOR_ID_INTEL,
-		chipset:	INTEL_I830_M,
-		vendor_name:	"Intel",
-		chipset_name:	"i830M",
-		chipset_setup:	intel_830mp_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_INTEL_845_G_0,
-		vendor_id:	PCI_VENDOR_ID_INTEL,
-		chipset:	INTEL_I845_G,
-		vendor_name:	"Intel",
-		chipset_name:	"i845G",
-		chipset_setup:	intel_830mp_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_INTEL_840_0,
-		vendor_id:	PCI_VENDOR_ID_INTEL,
-		chipset:	INTEL_I840,
-		vendor_name:	"Intel",
-		chipset_name:	"i840",
-		chipset_setup:	intel_840_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_INTEL_845_0,
-		vendor_id:	PCI_VENDOR_ID_INTEL,
-		chipset:	INTEL_I845,
-		vendor_name:	"Intel",
-		chipset_name:	"i845",
-		chipset_setup:	intel_845_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_INTEL_850_0,
-		vendor_id:	PCI_VENDOR_ID_INTEL,
-		chipset:	INTEL_I850,
-		vendor_name:	"Intel",
-		chipset_name:	"i850",
-		chipset_setup:	intel_850_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_INTEL_860_0,
-		vendor_id:	PCI_VENDOR_ID_INTEL,
-		chipset:	INTEL_I860,
-		vendor_name:	"Intel",
-		chipset_name:	"i860",
-		chipset_setup:	intel_860_setup
-	},
-	{
-		device_id:	0,
-		vendor_id:	PCI_VENDOR_ID_INTEL,
-		chipset:	INTEL_GENERIC,
-		vendor_name:	"Intel",
-		chipset_name:	"Generic",
-		chipset_setup:	intel_generic_setup
-	},
-
-#endif /* CONFIG_AGP_INTEL */
-
-#ifdef CONFIG_AGP_SIS
-	{
-		device_id:	PCI_DEVICE_ID_SI_740,
-		vendor_id:	PCI_VENDOR_ID_SI,
-		chipset:	SIS_GENERIC,
-		vendor_name:	"SiS",
-		chipset_name:	"740",
-		chipset_setup:	sis_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_SI_650,
-		vendor_id:	PCI_VENDOR_ID_SI,
-		chipset:	SIS_GENERIC,
-		vendor_name:	"SiS",
-		chipset_name:	"650",
-		chipset_setup:	sis_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_SI_645,
-		vendor_id:	PCI_VENDOR_ID_SI,
-		chipset:	SIS_GENERIC,
-		vendor_name:	"SiS",
-		chipset_name:	"645",
-		chipset_setup:	sis_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_SI_735,
-		vendor_id:	PCI_VENDOR_ID_SI,
-		chipset:	SIS_GENERIC,
-		vendor_name:	"SiS",
-		chipset_name:	"735",
-		chipset_setup:	sis_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_SI_745,
-		vendor_id:	PCI_VENDOR_ID_SI,
-		chipset:	SIS_GENERIC,
-		vendor_name:	"SiS",
-		chipset_name:	"745",
-		chipset_setup:	sis_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_SI_730,
-		vendor_id:	PCI_VENDOR_ID_SI,
-		chipset:	SIS_GENERIC,
-		vendor_name:	"SiS",
-		chipset_name:	"730",
-		chipset_setup:	sis_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_SI_630,
-		vendor_id:	PCI_VENDOR_ID_SI,
-		chipset:	SIS_GENERIC,
-		vendor_name:	"SiS",
-		chipset_name:	"630",
-		chipset_setup:	sis_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_SI_540,
-		vendor_id:	PCI_VENDOR_ID_SI,
-		chipset:	SIS_GENERIC,
-		vendor_name:	"SiS",
-		chipset_name:	"540",
-		chipset_setup:	sis_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_SI_620,
-		vendor_id:	PCI_VENDOR_ID_SI,
-		chipset:	SIS_GENERIC,
-		vendor_name:	"SiS",
-		chipset_name:	"620",
-		chipset_setup:	sis_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_SI_530,
-		vendor_id:	PCI_VENDOR_ID_SI,
-		chipset:	SIS_GENERIC,
-		vendor_name:	"SiS",
-		chipset_name:	"530",
-		chipset_setup:	sis_generic_setup
-	},
-        {
-		device_id:	PCI_DEVICE_ID_SI_550,
-		vendor_id:	PCI_VENDOR_ID_SI,
-		chipset:	SIS_GENERIC,
-		vendor_name:	"SiS",
-		chipset_name:	"550",
-		chipset_setup:	sis_generic_setup
-	},
-	{
-		device_id:	0,
-		vendor_id:	PCI_VENDOR_ID_SI,
-		chipset:	SIS_GENERIC,
-		vendor_name:	"SiS",
-		chipset_name:	"Generic",
-		chipset_setup:	sis_generic_setup
-	},
-#endif /* CONFIG_AGP_SIS */
-
-#ifdef CONFIG_AGP_VIA
-	{
-		device_id:	PCI_DEVICE_ID_VIA_8501_0,
-		vendor_id:	PCI_VENDOR_ID_VIA,
-		chipset:	VIA_MVP4,
-		vendor_name:	"Via",
-		chipset_name:	"MVP4",
-		chipset_setup:	via_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_VIA_82C597_0,
-		vendor_id:	PCI_VENDOR_ID_VIA,
-		chipset:	VIA_VP3,
-		vendor_name:	"Via",
-		chipset_name:	"VP3",
-		chipset_setup:	via_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_VIA_82C598_0,
-		vendor_id:	PCI_VENDOR_ID_VIA,
-		chipset:	VIA_MVP3,
-		vendor_name:	"Via",
-		chipset_name:	"MVP3",
-		chipset_setup:	via_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_VIA_82C691_0,
-		vendor_id:	PCI_VENDOR_ID_VIA,
-		chipset:	VIA_APOLLO_PRO,
-		vendor_name:	"Via",
-		chipset_name:	"Apollo Pro",
-		chipset_setup:	via_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_VIA_8371_0,
-		vendor_id:	PCI_VENDOR_ID_VIA,
-		chipset:	VIA_APOLLO_KX133,
-		vendor_name:	"Via",
-		chipset_name:	"Apollo Pro KX133",
-		chipset_setup:	via_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_VIA_8363_0,
-		vendor_id:	PCI_VENDOR_ID_VIA,
-		chipset:	VIA_APOLLO_KT133,
-		vendor_name:	"Via",
-		chipset_name:	"Apollo Pro KT133",
-		chipset_setup:	via_generic_setup
-	},
-	{
-		device_id:	PCI_DEVICE_ID_VIA_8367_0,
-		vendor_id:	PCI_VENDOR_ID_VIA,
-		chipset:	VIA_APOLLO_KT133,
-		vendor_name:	"Via",
-		chipset_name:	"Apollo Pro KT266",
-		chipset_setup:	via_generic_setup
-	},
-	{
-		device_id:	0,
-		vendor_id:	PCI_VENDOR_ID_VIA,
-		chipset:	VIA_GENERIC,
-		vendor_name:	"Via",
-		chipset_name:	"Generic",
-		chipset_setup:	via_generic_setup
-	},
-#endif /* CONFIG_AGP_VIA */
-
-#ifdef CONFIG_AGP_HP_ZX1
-	{
-		device_id:	PCI_DEVICE_ID_HP_ZX1_LBA,
-		vendor_id:	PCI_VENDOR_ID_HP,
-		chipset:	HP_ZX1,
-		vendor_name:	"HP",
-		chipset_name:	"ZX1",
-		chipset_setup:	hp_zx1_setup
-	},
-#endif
-
-	{ }, /* dummy final entry, always present */
-};
-
-
-/* scan table above for supported devices */
-static int __init agp_lookup_host_bridge (struct pci_dev *pdev)
-{
-	int i;
-	
-	for (i = 0; i < ARRAY_SIZE (agp_bridge_info); i++)
-		if (pdev->vendor == agp_bridge_info[i].vendor_id)
-			break;
-
-	if (i >= ARRAY_SIZE (agp_bridge_info)) {
-		printk (KERN_DEBUG PFX "unsupported bridge\n");
-		return -ENODEV;
-	}
-
-	while ((i < ARRAY_SIZE (agp_bridge_info)) &&
-	       (agp_bridge_info[i].vendor_id == pdev->vendor)) {
-		if (pdev->device == agp_bridge_info[i].device_id) {
-#ifdef CONFIG_AGP_ALI
-			if (pdev->device == PCI_DEVICE_ID_AL_M1621_0) {
-				u8 hidden_1621_id;
-
-				pci_read_config_byte(pdev, 0xFB, &hidden_1621_id);
-				switch (hidden_1621_id) {
-				case 0x31:
-					agp_bridge_info[i].chipset_name="M1631";
-					break;
-				case 0x32:
-					agp_bridge_info[i].chipset_name="M1632";
-					break;
-				case 0x41:
-					agp_bridge_info[i].chipset_name="M1641";
-					break;
-				case 0x43:
-					break;
-				case 0x47:
-					agp_bridge_info[i].chipset_name="M1647";
-					break;
-				case 0x51:
-					agp_bridge_info[i].chipset_name="M1651";
-					break;
-				default:
-					break;
-				}
-			}
-#endif
-
-			printk (KERN_INFO PFX "Detected %s %s chipset\n",
-				agp_bridge_info[i].vendor_name,
-				agp_bridge_info[i].chipset_name);
-			agp_bridge.type = agp_bridge_info[i].chipset;
-			return agp_bridge_info[i].chipset_setup (pdev);
-		}
-		
-		i++;
-	}
-
-	i--; /* point to vendor generic entry (device_id == 0) */
-
-	/* try init anyway, if user requests it AND
-	 * there is a 'generic' bridge entry for this vendor */
-	if (agp_try_unsupported && agp_bridge_info[i].device_id == 0) {
-		printk(KERN_WARNING PFX "Trying generic %s routines"
-		       " for device id: %04x\n",
-		       agp_bridge_info[i].vendor_name, pdev->device);
-		agp_bridge.type = agp_bridge_info[i].chipset;
-		return agp_bridge_info[i].chipset_setup (pdev);
-	}
-
-	printk(KERN_ERR PFX "Unsupported %s chipset (device id: %04x),"
-	       " you might want to try agp_try_unsupported=1.\n",
-	       agp_bridge_info[i].vendor_name, pdev->device);
-	return -ENODEV;
-}
-
-
-/* Supported Device Scanning routine */
-
-static int __init agp_find_supported_device(struct pci_dev *dev)
-{
-	u8 cap_ptr = 0x00;
-
-	agp_bridge.dev = dev;
-
-	/* Need to test for I810 here */
-#ifdef CONFIG_AGP_I810
-	if (dev->vendor == PCI_VENDOR_ID_INTEL) {
-		struct pci_dev *i810_dev;
-
-		switch (dev->device) {
-		case PCI_DEVICE_ID_INTEL_810_0:
-			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
-					       PCI_DEVICE_ID_INTEL_810_1,
-						   NULL);
-			if (i810_dev == NULL) {
-				printk(KERN_ERR PFX "Detected an Intel i810,"
-				       " but could not find the secondary"
-				       " device.\n");
-				return -ENODEV;
-			}
-			printk(KERN_INFO PFX "Detected an Intel "
-			       "i810 Chipset.\n");
-			agp_bridge.type = INTEL_I810;
-			return intel_i810_setup (i810_dev);
-
-		case PCI_DEVICE_ID_INTEL_810_DC100_0:
-			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
-					 PCI_DEVICE_ID_INTEL_810_DC100_1,
-						   NULL);
-			if (i810_dev == NULL) {
-				printk(KERN_ERR PFX "Detected an Intel i810 "
-				       "DC100, but could not find the "
-				       "secondary device.\n");
-				return -ENODEV;
-			}
-			printk(KERN_INFO PFX "Detected an Intel i810 "
-			       "DC100 Chipset.\n");
-			agp_bridge.type = INTEL_I810;
-			return intel_i810_setup(i810_dev);
-
-		case PCI_DEVICE_ID_INTEL_810_E_0:
-			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
-					     PCI_DEVICE_ID_INTEL_810_E_1,
-						   NULL);
-			if (i810_dev == NULL) {
-				printk(KERN_ERR PFX "Detected an Intel i810 E"
-				    ", but could not find the secondary "
-				       "device.\n");
-				return -ENODEV;
-			}
-			printk(KERN_INFO PFX "Detected an Intel i810 E "
-			       "Chipset.\n");
-			agp_bridge.type = INTEL_I810;
-			return intel_i810_setup(i810_dev);
-
-		 case PCI_DEVICE_ID_INTEL_815_0:
-		   /* The i815 can operate either as an i810 style
-		    * integrated device, or as an AGP4X motherboard.
-		    *
-		    * This only addresses the first mode:
-		    */
-			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
-						   PCI_DEVICE_ID_INTEL_815_1,
-						   NULL);
-			if (i810_dev == NULL) {
-				printk(KERN_ERR PFX "agpgart: Detected an "
-					"Intel i815, but could not find the"
-					" secondary device. Assuming a "
-					"non-integrated video card.\n");
-				break;
-			}
-			printk(KERN_INFO PFX "agpgart: Detected an Intel i815 "
-				"Chipset.\n");
-			agp_bridge.type = INTEL_I810;
-			return intel_i810_setup(i810_dev);
-
-		case PCI_DEVICE_ID_INTEL_845_G_0:
-			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
-					PCI_DEVICE_ID_INTEL_845_G_1, NULL);
-			if(i810_dev && PCI_FUNC(i810_dev->devfn) != 0) {
-				i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
-					PCI_DEVICE_ID_INTEL_845_G_1, i810_dev);
-			}
-
-			if (i810_dev == NULL) {
-				/* 
-				 * We probably have a I845MP chipset
-				 * with an external graphics
-				 * card. It will be initialized later 
-				 */
-				agp_bridge.type = INTEL_I845_G;
-				break;
-			}
-			printk(KERN_INFO PFX "Detected an Intel "
-				   "845G Chipset.\n");
-			agp_bridge.type = INTEL_I810;
-			return intel_i830_setup(i810_dev);
-		   
-		case PCI_DEVICE_ID_INTEL_830_M_0:
-			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
-						   PCI_DEVICE_ID_INTEL_830_M_1,
-						   NULL);
-			if(i810_dev && PCI_FUNC(i810_dev->devfn) != 0) {
-				i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
-							   PCI_DEVICE_ID_INTEL_830_M_1,
-							   i810_dev);
-			}
-
-			if (i810_dev == NULL) {
-				/* Intel 830MP with external graphic card */
-				/* It will be initialized later */
-				agp_bridge.type = INTEL_I830_M;
-				break;
-			}
-			printk(KERN_INFO PFX "Detected an Intel "
-				   "830M Chipset.\n");
-			agp_bridge.type = INTEL_I810;
-			return intel_i830_setup(i810_dev);
-		default:
-			break;
-		}
-	}
-#endif /* CONFIG_AGP_I810 */
-
-#ifdef CONFIG_AGP_SWORKS
-	/* Everything is on func 1 here so we are hardcoding function one */
-	if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS) {
-		struct pci_dev *bridge_dev;
-
-		bridge_dev = pci_find_slot ((unsigned int)dev->bus->number, 
-					    PCI_DEVFN(0, 1));
-		if(bridge_dev == NULL) {
-			printk(KERN_INFO PFX "agpgart: Detected a Serverworks "
-			       "Chipset, but could not find the secondary "
-			       "device.\n");
-			return -ENODEV;
-		}
-
-		switch (dev->device) {
-		case PCI_DEVICE_ID_SERVERWORKS_HE:
-			agp_bridge.type = SVWRKS_HE;
-			return serverworks_setup(bridge_dev);
-
-		case PCI_DEVICE_ID_SERVERWORKS_LE:
-		case 0x0007:
-			agp_bridge.type = SVWRKS_LE;
-			return serverworks_setup(bridge_dev);
-
-		default:
-			if(agp_try_unsupported) {
-				agp_bridge.type = SVWRKS_GENERIC;
-				return serverworks_setup(bridge_dev);
-			}
-			break;
-		}
-	}
-
-#endif /* CONFIG_AGP_SWORKS */
-
-#ifdef CONFIG_AGP_HP_ZX1
-	if (dev->vendor == PCI_VENDOR_ID_HP) {
-		/* ZX1 LBAs can be either PCI or AGP bridges */
-		if (pci_find_capability(dev, PCI_CAP_ID_AGP)) {
-			printk(KERN_INFO PFX "Detected HP ZX1 AGP "
-			       "chipset at %s\n", dev->slot_name);
-			agp_bridge.type = HP_ZX1;
-			agp_bridge.dev = dev;
-			return hp_zx1_setup(dev);
-		}
-		return -ENODEV;
-	}
-#endif /* CONFIG_AGP_HP_ZX1 */
-
-	/* find capndx */
-	cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP);
-	if (cap_ptr == 0x00)
-		return -ENODEV;
-	agp_bridge.capndx = cap_ptr;
-
-	/* Fill in the mode register */
-	pci_read_config_dword(agp_bridge.dev,
-			      agp_bridge.capndx + 4,
-			      &agp_bridge.mode);
-
-	/* probe for known chipsets */
-	return agp_lookup_host_bridge (dev);
-}
-
-struct agp_max_table {
-	int mem;
-	int agp;
-};
-
-static struct agp_max_table maxes_table[9] __initdata =
-{
-	{0, 0},
-	{32, 4},
-	{64, 28},
-	{128, 96},
-	{256, 204},
-	{512, 440},
-	{1024, 942},
-	{2048, 1920},
-	{4096, 3932}
-};
-
-static int __init agp_find_max (void)
-{
-	long memory, index, result;
-
-	memory = virt_to_phys(high_memory) >> 20;
-	index = 1;
-
-	while ((memory > maxes_table[index].mem) &&
-	       (index < 8)) {
-		index++;
-	}
-
-	result = maxes_table[index - 1].agp +
-	   ( (memory - maxes_table[index - 1].mem)  *
-	     (maxes_table[index].agp - maxes_table[index - 1].agp)) /
-	   (maxes_table[index].mem - maxes_table[index - 1].mem);
-
-	printk(KERN_INFO PFX "Maximum main memory to use "
-	       "for agp memory: %ldM\n", result);
-	result = result << (20 - PAGE_SHIFT);
-        return result;
-}
-
-#define AGPGART_VERSION_MAJOR 0
-#define AGPGART_VERSION_MINOR 99
-
-static struct agp_version agp_current_version =
-{
-	major:	AGPGART_VERSION_MAJOR,
-	minor:	AGPGART_VERSION_MINOR,
-};
-
-static int __init agp_backend_initialize(struct pci_dev *dev)
-{
-	int size_value, rc, got_gatt=0, got_keylist=0;
-
-	memset(&agp_bridge, 0, sizeof(struct agp_bridge_data));
-	agp_bridge.type = NOT_SUPPORTED;
-	agp_bridge.max_memory_agp = agp_find_max();
-	agp_bridge.version = &agp_current_version;
-
-	rc = agp_find_supported_device(dev);
-	if (rc) {
-		/* not KERN_ERR because error msg should have already printed */
-		printk(KERN_DEBUG PFX "no supported devices found.\n");
-		return rc;
-	}
-
-	if (agp_bridge.needs_scratch_page == TRUE) {
-		void *addr;
-		addr = agp_bridge.agp_alloc_page();
-
-		if (addr == NULL) {
-			printk(KERN_ERR PFX "unable to get memory for "
-			       "scratch page.\n");
-			return -ENOMEM;
-		}
-		agp_bridge.scratch_page = virt_to_phys(addr);
-		agp_bridge.scratch_page =
-		    agp_bridge.mask_memory(agp_bridge.scratch_page, 0);
-	}
-
-	size_value = agp_bridge.fetch_size();
-
-	if (size_value == 0) {
-		printk(KERN_ERR PFX "unable to determine aperture size.\n");
-		rc = -EINVAL;
-		goto err_out;
-	}
-	if (agp_bridge.create_gatt_table()) {
-		printk(KERN_ERR PFX "unable to get memory for graphics "
-		       "translation table.\n");
-		rc = -ENOMEM;
-		goto err_out;
-	}
-	got_gatt = 1;
-	
-	agp_bridge.key_list = vmalloc(PAGE_SIZE * 4);
-	if (agp_bridge.key_list == NULL) {
-		printk(KERN_ERR PFX "error allocating memory for key lists.\n");
-		rc = -ENOMEM;
-		goto err_out;
-	}
-	got_keylist = 1;
-	
-	/* FIXME vmalloc'd memory not guaranteed contiguous */
-	memset(agp_bridge.key_list, 0, PAGE_SIZE * 4);
-
-	if (agp_bridge.configure()) {
-		printk(KERN_ERR PFX "error configuring host chipset.\n");
-		rc = -EINVAL;
-		goto err_out;
-	}
-
-	printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n",
-	       size_value, agp_bridge.gart_bus_addr);
-
-	return 0;
-
-err_out:
-	if (agp_bridge.needs_scratch_page == TRUE) {
-		agp_bridge.scratch_page &= ~(0x00000fff);
-		agp_bridge.agp_destroy_page(phys_to_virt(agp_bridge.scratch_page));
-	}
-	if (got_gatt)
-		agp_bridge.free_gatt_table();
-	if (got_keylist)
-		vfree(agp_bridge.key_list);
-	return rc;
-}
-
-
-/* cannot be __exit b/c as it could be called from __init code */
-static void agp_backend_cleanup(void)
-{
-	agp_bridge.cleanup();
-	agp_bridge.free_gatt_table();
-	vfree(agp_bridge.key_list);
-
-	if (agp_bridge.needs_scratch_page == TRUE) {
-		agp_bridge.scratch_page &= ~(0x00000fff);
-		agp_bridge.agp_destroy_page(phys_to_virt(agp_bridge.scratch_page));
-	}
-}
-
-static int agp_power(struct pm_dev *dev, pm_request_t rq, void *data)
-{
-	switch(rq)
-	{
-		case PM_SUSPEND:
-			return agp_bridge.suspend();
-		case PM_RESUME:
-			agp_bridge.resume();
-			return 0;
-	}		
-	return 0;
-}
-
-extern int agp_frontend_initialize(void);
-extern void agp_frontend_cleanup(void);
-
-static const drm_agp_t drm_agp = {
-	&agp_free_memory,
-	&agp_allocate_memory,
-	&agp_bind_memory,
-	&agp_unbind_memory,
-	&agp_enable,
-	&agp_backend_acquire,
-	&agp_backend_release,
-	&agp_copy_info
-};
-
-static int agp_probe (struct pci_dev *dev, const struct pci_device_id *ent)
-{
-	int ret_val;
-
-	if (agp_bridge.type != NOT_SUPPORTED) {
-		printk (KERN_DEBUG "Oops, don't init a 2nd agpgart device.\n");
-		return -ENODEV;
-	}
-
-	ret_val = agp_backend_initialize(dev);
-	if (ret_val) {
-		agp_bridge.type = NOT_SUPPORTED;
-		return ret_val;
-	}
-	ret_val = agp_frontend_initialize();
-	if (ret_val) {
-		agp_bridge.type = NOT_SUPPORTED;
-		agp_backend_cleanup();
-		return ret_val;
-	}
-
-	inter_module_register("drm_agp", THIS_MODULE, &drm_agp);
-	
-	pm_register(PM_PCI_DEV, PM_PCI_ID(agp_bridge.dev), agp_power);
-	return 0;
-}
-
-static struct pci_device_id agp_pci_table[] __initdata = {
-	{
-	class:		(PCI_CLASS_BRIDGE_HOST << 8),
-	class_mask:	~0,
-	vendor:		PCI_ANY_ID,
-	device:		PCI_ANY_ID,
-	subvendor:	PCI_ANY_ID,
-	subdevice:	PCI_ANY_ID,
-	},
-	{ }
-};
-
-MODULE_DEVICE_TABLE(pci, agp_pci_table);
-
-static struct pci_driver agp_pci_driver = {
-	name:		"agpgart",
-	id_table:	agp_pci_table,
-	probe:		agp_probe,
-};
-
-static int __init agp_init(void)
-{
-	int ret_val;
-
-	printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Jeff Hartmann\n",
-	       AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
-
-	ret_val = pci_module_init(&agp_pci_driver);
-	if (ret_val) {
-		agp_bridge.type = NOT_SUPPORTED;
-		return ret_val;
-	}
-	return 0;
-}
-
-static void __exit agp_cleanup(void)
-{
-	pci_unregister_driver(&agp_pci_driver);
-	if (agp_bridge.type != NOT_SUPPORTED) {
-		pm_unregister_all(agp_power);
-		agp_frontend_cleanup();
-		agp_backend_cleanup();
-		inter_module_unregister("drm_agp");
-	}
-}
-
-module_init(agp_init);
-module_exit(agp_cleanup);
diff -Nru a/drivers/char/agp/agpgart_fe.c b/drivers/char/agp/agpgart_fe.c
--- a/drivers/char/agp/agpgart_fe.c	Wed Jul 17 11:29:31 2002
+++ /dev/null	Wed Dec 31 16:00:00 1969
@@ -1,1086 +0,0 @@
-/*
- * AGPGART module frontend version 0.99
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mman.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/miscdevice.h>
-#include <linux/agp_backend.h>
-#include <linux/agpgart.h>
-#include <asm/uaccess.h>
-
-#include "agp.h"
-
-static struct agp_front_data agp_fe;
-
-static agp_memory *agp_find_mem_by_key(int key)
-{
-	agp_memory *curr;
-
-	if (agp_fe.current_controller == NULL) {
-		return NULL;
-	}
-	curr = agp_fe.current_controller->pool;
-
-	while (curr != NULL) {
-		if (curr->key == key) {
-			return curr;
-		}
-		curr = curr->next;
-	}
-
-	return NULL;
-}
-
-static void agp_remove_from_pool(agp_memory * temp)
-{
-	agp_memory *prev;
-	agp_memory *next;
-
-	/* Check to see if this is even in the memory pool */
-
-	if (agp_find_mem_by_key(temp->key) != NULL) {
-		next = temp->next;
-		prev = temp->prev;
-
-		if (prev != NULL) {
-			prev->next = next;
-			if (next != NULL) {
-				next->prev = prev;
-			}
-		} else {
-			/* This is the first item on the list */
-			if (next != NULL) {
-				next->prev = NULL;
-			}
-			agp_fe.current_controller->pool = next;
-		}
-	}
-}
-
-/*
- * Routines for managing each client's segment list -
- * These routines handle adding and removing segments
- * to each auth'ed client.
- */
-
-static agp_segment_priv *agp_find_seg_in_client(const agp_client * client,
-						unsigned long offset,
-					    int size, pgprot_t page_prot)
-{
-	agp_segment_priv *seg;
-	int num_segments, pg_start, pg_count, i;
-
-	pg_start = offset / 4096;
-	pg_count = size / 4096;
-	seg = *(client->segments);
-	num_segments = client->num_segments;
-
-	for (i = 0; i < client->num_segments; i++) {
-		if ((seg[i].pg_start == pg_start) &&
-		    (seg[i].pg_count == pg_count) &&
-		    (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
-			return seg + i;
-		}
-	}
-
-	return NULL;
-}
-
-static void agp_remove_seg_from_client(agp_client * client)
-{
-	if (client->segments != NULL) {
-		if (*(client->segments) != NULL) {
-			kfree(*(client->segments));
-		}
-		kfree(client->segments);
-	}
-}
-
-static void agp_add_seg_to_client(agp_client * client,
-			       agp_segment_priv ** seg, int num_segments)
-{
-	agp_segment_priv **prev_seg;
-
-	prev_seg = client->segments;
-
-	if (prev_seg != NULL) {
-		agp_remove_seg_from_client(client);
-	}
-	client->num_segments = num_segments;
-	client->segments = seg;
-}
-
-/* Originally taken from linux/mm/mmap.c from the array
- * protection_map.
- * The original really should be exported to modules, or 
- * some routine which does the conversion for you 
- */
-
-static const pgprot_t my_protect_map[16] =
-{
-	__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
-	__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
-};
-
-static pgprot_t agp_convert_mmap_flags(int prot)
-{
-#define _trans(x,bit1,bit2) \
-((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
-
-	unsigned long prot_bits;
-	pgprot_t temp;
-
-	prot_bits = _trans(prot, PROT_READ, VM_READ) |
-	    _trans(prot, PROT_WRITE, VM_WRITE) |
-	    _trans(prot, PROT_EXEC, VM_EXEC);
-
-	prot_bits |= VM_SHARED;
-
-	temp = my_protect_map[prot_bits & 0x0000000f];
-
-	return temp;
-}
-
-static int agp_create_segment(agp_client * client, agp_region * region)
-{
-	agp_segment_priv **ret_seg;
-	agp_segment_priv *seg;
-	agp_segment *user_seg;
-	int i;
-
-	seg = kmalloc((sizeof(agp_segment_priv) * region->seg_count),
-		      GFP_KERNEL);
-	if (seg == NULL) {
-		kfree(region->seg_list);
-		return -ENOMEM;
-	}
-	memset(seg, 0, (sizeof(agp_segment_priv) * region->seg_count));
-	user_seg = region->seg_list;
-
-	for (i = 0; i < region->seg_count; i++) {
-		seg[i].pg_start = user_seg[i].pg_start;
-		seg[i].pg_count = user_seg[i].pg_count;
-		seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot);
-	}
-	ret_seg = kmalloc(sizeof(void *), GFP_KERNEL);
-	if (ret_seg == NULL) {
-		kfree(region->seg_list);
-		kfree(seg);
-		return -ENOMEM;
-	}
-	*ret_seg = seg;
-	kfree(region->seg_list);
-	agp_add_seg_to_client(client, ret_seg, region->seg_count);
-	return 0;
-}
-
-/* End - Routines for managing each client's segment list */
-
-/* This function must only be called when current_controller != NULL */
-static void agp_insert_into_pool(agp_memory * temp)
-{
-	agp_memory *prev;
-
-	prev = agp_fe.current_controller->pool;
-
-	if (prev != NULL) {
-		prev->prev = temp;
-		temp->next = prev;
-	}
-	agp_fe.current_controller->pool = temp;
-}
-
-
-/* File private list routines */
-
-agp_file_private *agp_find_private(pid_t pid)
-{
-	agp_file_private *curr;
-
-	curr = agp_fe.file_priv_list;
-
-	while (curr != NULL) {
-		if (curr->my_pid == pid) {
-			return curr;
-		}
-		curr = curr->next;
-	}
-
-	return NULL;
-}
-
-void agp_insert_file_private(agp_file_private * priv)
-{
-	agp_file_private *prev;
-
-	prev = agp_fe.file_priv_list;
-
-	if (prev != NULL) {
-		prev->prev = priv;
-	}
-	priv->next = prev;
-	agp_fe.file_priv_list = priv;
-}
-
-void agp_remove_file_private(agp_file_private * priv)
-{
-	agp_file_private *next;
-	agp_file_private *prev;
-
-	next = priv->next;
-	prev = priv->prev;
-
-	if (prev != NULL) {
-		prev->next = next;
-
-		if (next != NULL) {
-			next->prev = prev;
-		}
-	} else {
-		if (next != NULL) {
-			next->prev = NULL;
-		}
-		agp_fe.file_priv_list = next;
-	}
-}
-
-/* End - File flag list routines */
-
-/* 
- * Wrappers for agp_free_memory & agp_allocate_memory 
- * These make sure that internal lists are kept updated.
- */
-static void agp_free_memory_wrap(agp_memory * memory)
-{
-	agp_remove_from_pool(memory);
-	agp_free_memory(memory);
-}
-
-static agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
-{
-	agp_memory *memory;
-
-	memory = agp_allocate_memory(pg_count, type);
-   	printk(KERN_DEBUG "agp_allocate_memory: %p\n", memory);
-	if (memory == NULL) {
-		return NULL;
-	}
-	agp_insert_into_pool(memory);
-	return memory;
-}
-
-/* Routines for managing the list of controllers -
- * These routines manage the current controller, and the list of
- * controllers
- */
-
-static agp_controller *agp_find_controller_by_pid(pid_t id)
-{
-	agp_controller *controller;
-
-	controller = agp_fe.controllers;
-
-	while (controller != NULL) {
-		if (controller->pid == id) {
-			return controller;
-		}
-		controller = controller->next;
-	}
-
-	return NULL;
-}
-
-static agp_controller *agp_create_controller(pid_t id)
-{
-	agp_controller *controller;
-
-	controller = kmalloc(sizeof(agp_controller), GFP_KERNEL);
-
-	if (controller == NULL) {
-		return NULL;
-	}
-	memset(controller, 0, sizeof(agp_controller));
-	controller->pid = id;
-
-	return controller;
-}
-
-static int agp_insert_controller(agp_controller * controller)
-{
-	agp_controller *prev_controller;
-
-	prev_controller = agp_fe.controllers;
-	controller->next = prev_controller;
-
-	if (prev_controller != NULL) {
-		prev_controller->prev = controller;
-	}
-	agp_fe.controllers = controller;
-
-	return 0;
-}
-
-static void agp_remove_all_clients(agp_controller * controller)
-{
-	agp_client *client;
-	agp_client *temp;
-
-	client = controller->clients;
-
-	while (client) {
-		agp_file_private *priv;
-
-		temp = client;
-		agp_remove_seg_from_client(temp);
-		priv = agp_find_private(temp->pid);
-
-		if (priv != NULL) {
-			clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
-			clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
-		}
-		client = client->next;
-		kfree(temp);
-	}
-}
-
-static void agp_remove_all_memory(agp_controller * controller)
-{
-	agp_memory *memory;
-	agp_memory *temp;
-
-	memory = controller->pool;
-
-	while (memory) {
-		temp = memory;
-		memory = memory->next;
-		agp_free_memory_wrap(temp);
-	}
-}
-
-static int agp_remove_controller(agp_controller * controller)
-{
-	agp_controller *prev_controller;
-	agp_controller *next_controller;
-
-	prev_controller = controller->prev;
-	next_controller = controller->next;
-
-	if (prev_controller != NULL) {
-		prev_controller->next = next_controller;
-		if (next_controller != NULL) {
-			next_controller->prev = prev_controller;
-		}
-	} else {
-		if (next_controller != NULL) {
-			next_controller->prev = NULL;
-		}
-		agp_fe.controllers = next_controller;
-	}
-
-	agp_remove_all_memory(controller);
-	agp_remove_all_clients(controller);
-
-	if (agp_fe.current_controller == controller) {
-		agp_fe.current_controller = NULL;
-		agp_fe.backend_acquired = FALSE;
-		agp_backend_release();
-	}
-	kfree(controller);
-	return 0;
-}
-
-static void agp_controller_make_current(agp_controller * controller)
-{
-	agp_client *clients;
-
-	clients = controller->clients;
-
-	while (clients != NULL) {
-		agp_file_private *priv;
-
-		priv = agp_find_private(clients->pid);
-
-		if (priv != NULL) {
-			set_bit(AGP_FF_IS_VALID, &priv->access_flags);
-			set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
-		}
-		clients = clients->next;
-	}
-
-	agp_fe.current_controller = controller;
-}
-
-static void agp_controller_release_current(agp_controller * controller,
-				      agp_file_private * controller_priv)
-{
-	agp_client *clients;
-
-	clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags);
-	clients = controller->clients;
-
-	while (clients != NULL) {
-		agp_file_private *priv;
-
-		priv = agp_find_private(clients->pid);
-
-		if (priv != NULL) {
-			clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
-		}
-		clients = clients->next;
-	}
-
-	agp_fe.current_controller = NULL;
-	agp_fe.used_by_controller = FALSE;
-	agp_backend_release();
-}
-
-/* 
- * Routines for managing client lists -
- * These routines are for managing the list of auth'ed clients.
- */
-
-static agp_client *agp_find_client_in_controller(agp_controller * controller,
-						 pid_t id)
-{
-	agp_client *client;
-
-	if (controller == NULL) {
-		return NULL;
-	}
-	client = controller->clients;
-
-	while (client != NULL) {
-		if (client->pid == id) {
-			return client;
-		}
-		client = client->next;
-	}
-
-	return NULL;
-}
-
-static agp_controller *agp_find_controller_for_client(pid_t id)
-{
-	agp_controller *controller;
-
-	controller = agp_fe.controllers;
-
-	while (controller != NULL) {
-		if ((agp_find_client_in_controller(controller, id)) != NULL) {
-			return controller;
-		}
-		controller = controller->next;
-	}
-
-	return NULL;
-}
-
-static agp_client *agp_find_client_by_pid(pid_t id)
-{
-	agp_client *temp;
-
-	if (agp_fe.current_controller == NULL) {
-		return NULL;
-	}
-	temp = agp_find_client_in_controller(agp_fe.current_controller, id);
-	return temp;
-}
-
-static void agp_insert_client(agp_client * client)
-{
-	agp_client *prev_client;
-
-	prev_client = agp_fe.current_controller->clients;
-	client->next = prev_client;
-
-	if (prev_client != NULL) {
-		prev_client->prev = client;
-	}
-	agp_fe.current_controller->clients = client;
-	agp_fe.current_controller->num_clients++;
-}
-
-static agp_client *agp_create_client(pid_t id)
-{
-	agp_client *new_client;
-
-	new_client = kmalloc(sizeof(agp_client), GFP_KERNEL);
-
-	if (new_client == NULL) {
-		return NULL;
-	}
-	memset(new_client, 0, sizeof(agp_client));
-	new_client->pid = id;
-	agp_insert_client(new_client);
-	return new_client;
-}
-
-static int agp_remove_client(pid_t id)
-{
-	agp_client *client;
-	agp_client *prev_client;
-	agp_client *next_client;
-	agp_controller *controller;
-
-	controller = agp_find_controller_for_client(id);
-
-	if (controller == NULL) {
-		return -EINVAL;
-	}
-	client = agp_find_client_in_controller(controller, id);
-
-	if (client == NULL) {
-		return -EINVAL;
-	}
-	prev_client = client->prev;
-	next_client = client->next;
-
-	if (prev_client != NULL) {
-		prev_client->next = next_client;
-		if (next_client != NULL) {
-			next_client->prev = prev_client;
-		}
-	} else {
-		if (next_client != NULL) {
-			next_client->prev = NULL;
-		}
-		controller->clients = next_client;
-	}
-
-	controller->num_clients--;
-	agp_remove_seg_from_client(client);
-	kfree(client);
-	return 0;
-}
-
-/* End - Routines for managing client lists */
-
-/* File Operations */
-
-static int agp_mmap(struct file *file, struct vm_area_struct *vma)
-{
-	int size;
-	int current_size;
-	unsigned long offset;
-	agp_client *client;
-	agp_file_private *priv = (agp_file_private *) file->private_data;
-	agp_kern_info kerninfo;
-
-	AGP_LOCK();
-
-	if (agp_fe.backend_acquired != TRUE) {
-		AGP_UNLOCK();
-		return -EPERM;
-	}
-	if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags))) {
-		AGP_UNLOCK();
-		return -EPERM;
-	}
-	agp_copy_info(&kerninfo);
-	size = vma->vm_end - vma->vm_start;
-	current_size = kerninfo.aper_size;
-	current_size = current_size * 0x100000;
-	offset = vma->vm_pgoff << PAGE_SHIFT;
-
-	if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
-		if ((size + offset) > current_size) {
-			AGP_UNLOCK();
-			return -EINVAL;
-		}
-		client = agp_find_client_by_pid(current->pid);
-
-		if (client == NULL) {
-			AGP_UNLOCK();
-			return -EPERM;
-		}
-		if (!agp_find_seg_in_client(client, offset,
-					    size, vma->vm_page_prot)) {
-			AGP_UNLOCK();
-			return -EINVAL;
-		}
-		if (remap_page_range(vma, vma->vm_start,
-				     (kerninfo.aper_base + offset),
-				     size, vma->vm_page_prot)) {
-			AGP_UNLOCK();
-			return -EAGAIN;
-		}
-		AGP_UNLOCK();
-		return 0;
-	}
-	if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
-		if (size != current_size) {
-			AGP_UNLOCK();
-			return -EINVAL;
-		}
-		if (remap_page_range(vma, vma->vm_start, kerninfo.aper_base,
-				     size, vma->vm_page_prot)) {
-			AGP_UNLOCK();
-			return -EAGAIN;
-		}
-		AGP_UNLOCK();
-		return 0;
-	}
-	AGP_UNLOCK();
-	return -EPERM;
-}
-
-static int agp_release(struct inode *inode, struct file *file)
-{
-	agp_file_private *priv = (agp_file_private *) file->private_data;
-
-	AGP_LOCK();
-
-	if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
-		agp_controller *controller;
-
-		controller = agp_find_controller_by_pid(priv->my_pid);
-
-		if (controller != NULL) {
-			if (controller == agp_fe.current_controller) {
-				agp_controller_release_current(controller,
-							       priv);
-			}
-			agp_remove_controller(controller);
-		}
-	}
-	if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
-		agp_remove_client(priv->my_pid);
-	}
-	agp_remove_file_private(priv);
-	kfree(priv);
-	AGP_UNLOCK();
-	return 0;
-}
-
-static int agp_open(struct inode *inode, struct file *file)
-{
-	int minor = minor(inode->i_rdev);
-	agp_file_private *priv;
-	agp_client *client;
-	int rc = -ENXIO;
-
-	AGP_LOCK();
-
-	if (minor != AGPGART_MINOR)
-		goto err_out;
-
-	priv = kmalloc(sizeof(agp_file_private), GFP_KERNEL);
-	if (priv == NULL)
-		goto err_out_nomem;
-
-	memset(priv, 0, sizeof(agp_file_private));
-	set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
-	priv->my_pid = current->pid;
-
-	if ((current->uid == 0) || (current->suid == 0)) {
-		/* Root priv, can be controller */
-		set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
-	}
-	client = agp_find_client_by_pid(current->pid);
-
-	if (client != NULL) {
-		set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
-		set_bit(AGP_FF_IS_VALID, &priv->access_flags);
-	}
-	file->private_data = (void *) priv;
-	agp_insert_file_private(priv);
-	AGP_UNLOCK();
-	return 0;
-
-err_out_nomem:
-	rc = -ENOMEM;
-err_out:
-	AGP_UNLOCK();
-	return rc;
-}
-
-
-static ssize_t agp_read(struct file *file, char *buf,
-			size_t count, loff_t * ppos)
-{
-	return -EINVAL;
-}
-
-static ssize_t agp_write(struct file *file, const char *buf,
-			 size_t count, loff_t * ppos)
-{
-	return -EINVAL;
-}
-
-static int agpioc_info_wrap(agp_file_private * priv, unsigned long arg)
-{
-	agp_info userinfo;
-	agp_kern_info kerninfo;
-
-	agp_copy_info(&kerninfo);
-
-	userinfo.version.major = kerninfo.version.major;
-	userinfo.version.minor = kerninfo.version.minor;
-	userinfo.bridge_id = kerninfo.device->vendor |
-	    (kerninfo.device->device << 16);
-	userinfo.agp_mode = kerninfo.mode;
-	userinfo.aper_base = kerninfo.aper_base;
-	userinfo.aper_size = kerninfo.aper_size;
-	userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
-	userinfo.pg_used = kerninfo.current_memory;
-
-	if (copy_to_user((void *) arg, &userinfo, sizeof(agp_info))) {
-		return -EFAULT;
-	}
-	return 0;
-}
-
-static int agpioc_acquire_wrap(agp_file_private * priv, unsigned long arg)
-{
-	agp_controller *controller;
-	if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags))) {
-		return -EPERM;
-	}
-	if (agp_fe.current_controller != NULL) {
-		return -EBUSY;
-	}
-	if ((agp_backend_acquire()) == 0) {
-		agp_fe.backend_acquired = TRUE;
-	} else {
-		return -EBUSY;
-	}
-
-	controller = agp_find_controller_by_pid(priv->my_pid);
-
-	if (controller != NULL) {
-		agp_controller_make_current(controller);
-	} else {
-		controller = agp_create_controller(priv->my_pid);
-
-		if (controller == NULL) {
-			agp_fe.backend_acquired = FALSE;
-			agp_backend_release();
-			return -ENOMEM;
-		}
-		agp_insert_controller(controller);
-		agp_controller_make_current(controller);
-	}
-
-	set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags);
-	set_bit(AGP_FF_IS_VALID, &priv->access_flags);
-	return 0;
-}
-
-static int agpioc_release_wrap(agp_file_private * priv, unsigned long arg)
-{
-	agp_controller_release_current(agp_fe.current_controller, priv);
-	return 0;
-}
-
-static int agpioc_setup_wrap(agp_file_private * priv, unsigned long arg)
-{
-	agp_setup mode;
-
-	if (copy_from_user(&mode, (void *) arg, sizeof(agp_setup))) {
-		return -EFAULT;
-	}
-	agp_enable(mode.agp_mode);
-	return 0;
-}
-
-static int agpioc_reserve_wrap(agp_file_private * priv, unsigned long arg)
-{
-	agp_region reserve;
-	agp_client *client;
-	agp_file_private *client_priv;
-
-
-	if (copy_from_user(&reserve, (void *) arg, sizeof(agp_region))) {
-		return -EFAULT;
-	}
-	if ((unsigned) reserve.seg_count >= ~0U/sizeof(agp_segment))
-		return -EFAULT;
-
-	client = agp_find_client_by_pid(reserve.pid);
-
-	if (reserve.seg_count == 0) {
-		/* remove a client */
-		client_priv = agp_find_private(reserve.pid);
-
-		if (client_priv != NULL) {
-			set_bit(AGP_FF_IS_CLIENT,
-				&client_priv->access_flags);
-			set_bit(AGP_FF_IS_VALID,
-				&client_priv->access_flags);
-		}
-		if (client == NULL) {
-			/* client is already removed */
-			return 0;
-		}
-		return agp_remove_client(reserve.pid);
-	} else {
-		agp_segment *segment;
-
-		if (reserve.seg_count >= 16384)
-			return -EINVAL;
-			
-		segment = kmalloc((sizeof(agp_segment) * reserve.seg_count),
-				  GFP_KERNEL);
-
-		if (segment == NULL) {
-			return -ENOMEM;
-		}
-		if (copy_from_user(segment, (void *) reserve.seg_list,
-				   sizeof(agp_segment) * reserve.seg_count)) {
-			kfree(segment);
-			return -EFAULT;
-		}
-		reserve.seg_list = segment;
-
-		if (client == NULL) {
-			/* Create the client and add the segment */
-			client = agp_create_client(reserve.pid);
-
-			if (client == NULL) {
-				kfree(segment);
-				return -ENOMEM;
-			}
-			client_priv = agp_find_private(reserve.pid);
-
-			if (client_priv != NULL) {
-				set_bit(AGP_FF_IS_CLIENT,
-					&client_priv->access_flags);
-				set_bit(AGP_FF_IS_VALID,
-					&client_priv->access_flags);
-			}
-			return agp_create_segment(client, &reserve);
-		} else {
-			return agp_create_segment(client, &reserve);
-		}
-	}
-	/* Will never really happen */
-	return -EINVAL;
-}
-
-static int agpioc_protect_wrap(agp_file_private * priv, unsigned long arg)
-{
-	/* This function is not currently implemented */
-	return -EINVAL;
-}
-
-static int agpioc_allocate_wrap(agp_file_private * priv, unsigned long arg)
-{
-	agp_memory *memory;
-	agp_allocate alloc;
-
-	if (copy_from_user(&alloc, (void *) arg, sizeof(agp_allocate))) {
-		return -EFAULT;
-	}
-	memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
-
-	if (memory == NULL) {
-		return -ENOMEM;
-	}
-	alloc.key = memory->key;
-	alloc.physical = memory->physical;
-
-	if (copy_to_user((void *) arg, &alloc, sizeof(agp_allocate))) {
-		agp_free_memory_wrap(memory);
-		return -EFAULT;
-	}
-	return 0;
-}
-
-static int agpioc_deallocate_wrap(agp_file_private * priv, unsigned long arg)
-{
-	agp_memory *memory;
-
-	memory = agp_find_mem_by_key((int) arg);
-
-	if (memory == NULL) {
-		return -EINVAL;
-	}
-	agp_free_memory_wrap(memory);
-	return 0;
-}
-
-static int agpioc_bind_wrap(agp_file_private * priv, unsigned long arg)
-{
-	agp_bind bind_info;
-	agp_memory *memory;
-
-	if (copy_from_user(&bind_info, (void *) arg, sizeof(agp_bind))) {
-		return -EFAULT;
-	}
-	memory = agp_find_mem_by_key(bind_info.key);
-
-	if (memory == NULL) {
-		return -EINVAL;
-	}
-	return agp_bind_memory(memory, bind_info.pg_start);
-}
-
-static int agpioc_unbind_wrap(agp_file_private * priv, unsigned long arg)
-{
-	agp_memory *memory;
-	agp_unbind unbind;
-
-	if (copy_from_user(&unbind, (void *) arg, sizeof(agp_unbind))) {
-		return -EFAULT;
-	}
-	memory = agp_find_mem_by_key(unbind.key);
-
-	if (memory == NULL) {
-		return -EINVAL;
-	}
-	return agp_unbind_memory(memory);
-}
-
-static int agp_ioctl(struct inode *inode, struct file *file,
-		     unsigned int cmd, unsigned long arg)
-{
-	agp_file_private *curr_priv = (agp_file_private *) file->private_data;
-	int ret_val = -ENOTTY;
-
-	AGP_LOCK();
-
-	if ((agp_fe.current_controller == NULL) &&
-	    (cmd != AGPIOC_ACQUIRE)) {
-		ret_val = -EINVAL;
-	   	goto ioctl_out;
-	}
-	if ((agp_fe.backend_acquired != TRUE) &&
-	    (cmd != AGPIOC_ACQUIRE)) {
-		ret_val = -EBUSY;
-	   	goto ioctl_out;
-	}
-	if (cmd != AGPIOC_ACQUIRE) {
-		if (!(test_bit(AGP_FF_IS_CONTROLLER,
-			       &curr_priv->access_flags))) {
-			ret_val = -EPERM;
-		   	goto ioctl_out;
-		}
-		/* Use the original pid of the controller,
-		 * in case it's threaded */
-
-		if (agp_fe.current_controller->pid != curr_priv->my_pid) {
-			ret_val = -EBUSY;
-		   	goto ioctl_out;
-		}
-	}
-	switch (cmd) {
-	case AGPIOC_INFO:
-		{
-			ret_val = agpioc_info_wrap(curr_priv, arg);
-		   	goto ioctl_out;
-		}
-	case AGPIOC_ACQUIRE:
-		{
-			ret_val = agpioc_acquire_wrap(curr_priv, arg);
-		   	goto ioctl_out;
-		}
-	case AGPIOC_RELEASE:
-		{
-			ret_val = agpioc_release_wrap(curr_priv, arg);
-		   	goto ioctl_out;
-		}
-	case AGPIOC_SETUP:
-		{
-			ret_val = agpioc_setup_wrap(curr_priv, arg);
-		   	goto ioctl_out;
-		}
-	case AGPIOC_RESERVE:
-		{
-			ret_val = agpioc_reserve_wrap(curr_priv, arg);
-		   	goto ioctl_out;
-		}
-	case AGPIOC_PROTECT:
-		{
-			ret_val = agpioc_protect_wrap(curr_priv, arg);
-		   	goto ioctl_out;
-		}
-	case AGPIOC_ALLOCATE:
-		{
-			ret_val = agpioc_allocate_wrap(curr_priv, arg);
-		   	goto ioctl_out;
-		}
-	case AGPIOC_DEALLOCATE:
-		{
-			ret_val = agpioc_deallocate_wrap(curr_priv, arg);
-		   	goto ioctl_out;
-		}
-	case AGPIOC_BIND:
-		{
-			ret_val = agpioc_bind_wrap(curr_priv, arg);
-		   	goto ioctl_out;
-		}
-	case AGPIOC_UNBIND:
-		{
-			ret_val = agpioc_unbind_wrap(curr_priv, arg);
-		   	goto ioctl_out;
-		}
-	}
-   
-ioctl_out:
-	AGP_UNLOCK();
-	return ret_val;
-}
-
-static struct file_operations agp_fops =
-{
-	owner:		THIS_MODULE,
-	llseek:		no_llseek,
-	read:		agp_read,
-	write:		agp_write,
-	ioctl:		agp_ioctl,
-	mmap:		agp_mmap,
-	open:		agp_open,
-	release:	agp_release,
-};
-
-static struct miscdevice agp_miscdev =
-{
-	AGPGART_MINOR,
-	AGPGART_MODULE_NAME,
-	&agp_fops
-};
-
-int __init agp_frontend_initialize(void)
-{
-	memset(&agp_fe, 0, sizeof(struct agp_front_data));
-	AGP_LOCK_INIT();
-
-	if (misc_register(&agp_miscdev)) {
-		printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR);
-		return -EIO;
-	}
-	return 0;
-}
-
-void __exit agp_frontend_cleanup(void)
-{
-	misc_deregister(&agp_miscdev);
-}
-
diff -Nru a/drivers/char/agp/ali.c b/drivers/char/agp/ali.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/drivers/char/agp/ali.c	Wed Jul 17 11:29:31 2002
@@ -0,0 +1,265 @@
+/*
+ * AGPGART module version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO: 
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+static int ali_fetch_size(void)
+{
+	int i;
+	u32 temp;
+	struct aper_size_info_32 *values;
+
+	pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
+	temp &= ~(0xfffffff0);
+	values = A_SIZE_32(agp_bridge.aperture_sizes);
+
+	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge.previous_size =
+			    agp_bridge.current_size = (void *) (values + i);
+			agp_bridge.aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+static void ali_tlbflush(agp_memory * mem)
+{
+	u32 temp;
+
+	pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
+// clear tag
+	pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL,
+			((temp & 0xfffffff0) | 0x00000001|0x00000002));
+}
+
+static void ali_cleanup(void)
+{
+	struct aper_size_info_32 *previous_size;
+	u32 temp;
+
+	previous_size = A_SIZE_32(agp_bridge.previous_size);
+
+	pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
+// clear tag
+	pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL,
+			((temp & 0xffffff00) | 0x00000001|0x00000002));
+
+	pci_read_config_dword(agp_bridge.dev,  ALI_ATTBASE, &temp);
+	pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
+			((temp & 0x00000ff0) | previous_size->size_value));
+}
+
+static int ali_configure(void)
+{
+	u32 temp;
+	struct aper_size_info_32 *current_size;
+
+	current_size = A_SIZE_32(agp_bridge.current_size);
+
+	/* aperture size and gatt addr */
+	pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
+	temp = (((temp & 0x00000ff0) | (agp_bridge.gatt_bus_addr & 0xfffff000))
+			| (current_size->size_value & 0xf));
+	pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, temp);
+
+	/* tlb control */
+
+	/*
+	 *	Question: Jeff, ALi's patch deletes this:
+	 *
+	 *	pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
+	 *	pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
+	 *			       ((temp & 0xffffff00) | 0x00000010));
+	 *
+	 *	and replaces it with the following, which seems to duplicate the
+	 *	next couple of lines below it. I suspect this was an oversight,
+	 *	but you might want to check up on this?
+	 */
+	
+	pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* address to map to */
+	pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+#if 0
+	if (agp_bridge.type == ALI_M1541) {
+		u32 nlvm_addr = 0;
+
+		switch (current_size->size_value) {
+			case 0:  break;
+			case 1:  nlvm_addr = 0x100000;break;
+			case 2:  nlvm_addr = 0x200000;break;
+			case 3:  nlvm_addr = 0x400000;break;
+			case 4:  nlvm_addr = 0x800000;break;
+			case 6:  nlvm_addr = 0x1000000;break;
+			case 7:  nlvm_addr = 0x2000000;break;
+			case 8:  nlvm_addr = 0x4000000;break;
+			case 9:  nlvm_addr = 0x8000000;break;
+			case 10: nlvm_addr = 0x10000000;break;
+			default: break;
+		}
+		nlvm_addr--;
+		nlvm_addr&=0xfff00000;
+
+		nlvm_addr+= agp_bridge.gart_bus_addr;
+		nlvm_addr|=(agp_bridge.gart_bus_addr>>12);
+		printk(KERN_INFO PFX "nlvm top &base = %8x\n",nlvm_addr);
+	}
+#endif
+
+	pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
+	temp &= 0xffffff7f;		//enable TLB
+	pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, temp);
+
+	return 0;
+}
+
+static unsigned long ali_mask_memory(unsigned long addr, int type)
+{
+	/* Memory type is ignored */
+
+	return addr | agp_bridge.masks[0].mask;
+}
+
+static void ali_cache_flush(void)
+{
+	global_cache_flush();
+
+	if (agp_bridge.type == ALI_M1541) {
+		int i, page_count;
+		u32 temp;
+
+		page_count = 1 << A_SIZE_32(agp_bridge.current_size)->page_order;
+		for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) {
+			pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp);
+			pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL,
+					(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+					  (agp_bridge.gatt_bus_addr + i)) |
+					    ALI_CACHE_FLUSH_EN));
+		}
+	}
+}
+
+static void *ali_alloc_page(void)
+{
+	void *adr = agp_generic_alloc_page();
+	u32 temp;
+
+	if (adr == 0)
+		return 0;
+
+	if (agp_bridge.type == ALI_M1541) {
+		pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp);
+		pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL,
+				(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+				  virt_to_phys(adr)) |
+				    ALI_CACHE_FLUSH_EN ));
+	}
+	return adr;
+}
+
+static void ali_destroy_page(void * addr)
+{
+	u32 temp;
+
+	if (addr == NULL)
+		return;
+
+	global_cache_flush();
+
+	if (agp_bridge.type == ALI_M1541) {
+		pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp);
+		pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL,
+				(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+				  virt_to_phys(addr)) |
+				    ALI_CACHE_FLUSH_EN));
+	}
+
+	agp_generic_destroy_page(addr);
+}
+
+/* Setup function */
+static struct gatt_mask ali_generic_masks[] =
+{
+	{mask: 0x00000000, type: 0}
+};
+
+static struct aper_size_info_32 ali_generic_sizes[7] =
+{
+	{256, 65536, 6, 10},
+	{128, 32768, 5, 9},
+	{64, 16384, 4, 8},
+	{32, 8192, 3, 7},
+	{16, 4096, 2, 6},
+	{8, 2048, 1, 4},
+	{4, 1024, 0, 3}
+};
+
+int __init ali_generic_setup (struct pci_dev *pdev)
+{
+	agp_bridge.masks = ali_generic_masks;
+	agp_bridge.num_of_masks = 1;
+	agp_bridge.aperture_sizes = (void *) ali_generic_sizes;
+	agp_bridge.size_type = U32_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 7;
+	agp_bridge.dev_private_data = NULL;
+	agp_bridge.needs_scratch_page = FALSE;
+	agp_bridge.configure = ali_configure;
+	agp_bridge.fetch_size = ali_fetch_size;
+	agp_bridge.cleanup = ali_cleanup;
+	agp_bridge.tlb_flush = ali_tlbflush;
+	agp_bridge.mask_memory = ali_mask_memory;
+	agp_bridge.agp_enable = agp_generic_agp_enable;
+	agp_bridge.cache_flush = ali_cache_flush;
+	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+	agp_bridge.insert_memory = agp_generic_insert_memory;
+	agp_bridge.remove_memory = agp_generic_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = ali_alloc_page;
+	agp_bridge.agp_destroy_page = ali_destroy_page;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 0;
+
+	return 0;
+	
+	(void) pdev; /* unused */
+}
+
diff -Nru a/drivers/char/agp/amd.c b/drivers/char/agp/amd.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/drivers/char/agp/amd.c	Wed Jul 17 11:29:31 2002
@@ -0,0 +1,408 @@
+/*
+ * AGPGART module version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO: 
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+struct amd_page_map {
+	unsigned long *real;
+	unsigned long *remapped;
+};
+
+static struct _amd_irongate_private {
+	volatile u8 *registers;
+	struct amd_page_map **gatt_pages;
+	int num_tables;
+} amd_irongate_private;
+
+static int amd_create_page_map(struct amd_page_map *page_map)
+{
+	int i;
+
+	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+	if (page_map->real == NULL) {
+		return -ENOMEM;
+	}
+	SetPageReserved(virt_to_page(page_map->real));
+	CACHE_FLUSH();
+	page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), 
+					    PAGE_SIZE);
+	if (page_map->remapped == NULL) {
+		ClearPageReserved(virt_to_page(page_map->real));
+		free_page((unsigned long) page_map->real);
+		page_map->real = NULL;
+		return -ENOMEM;
+	}
+	CACHE_FLUSH();
+
+	for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
+		page_map->remapped[i] = agp_bridge.scratch_page;
+	}
+
+	return 0;
+}
+
+static void amd_free_page_map(struct amd_page_map *page_map)
+{
+	iounmap(page_map->remapped);
+	ClearPageReserved(virt_to_page(page_map->real));
+	free_page((unsigned long) page_map->real);
+}
+
+static void amd_free_gatt_pages(void)
+{
+	int i;
+	struct amd_page_map **tables;
+	struct amd_page_map *entry;
+
+	tables = amd_irongate_private.gatt_pages;
+	for(i = 0; i < amd_irongate_private.num_tables; i++) {
+		entry = tables[i];
+		if (entry != NULL) {
+			if (entry->real != NULL) {
+				amd_free_page_map(entry);
+			}
+			kfree(entry);
+		}
+	}
+	kfree(tables);
+}
+
+static int amd_create_gatt_pages(int nr_tables)
+{
+	struct amd_page_map **tables;
+	struct amd_page_map *entry;
+	int retval = 0;
+	int i;
+
+	tables = kmalloc((nr_tables + 1) * sizeof(struct amd_page_map *), 
+			 GFP_KERNEL);
+	if (tables == NULL) {
+		return -ENOMEM;
+	}
+	memset(tables, 0, sizeof(struct amd_page_map *) * (nr_tables + 1));
+	for (i = 0; i < nr_tables; i++) {
+		entry = kmalloc(sizeof(struct amd_page_map), GFP_KERNEL);
+		if (entry == NULL) {
+			retval = -ENOMEM;
+			break;
+		}
+		memset(entry, 0, sizeof(struct amd_page_map));
+		tables[i] = entry;
+		retval = amd_create_page_map(entry);
+		if (retval != 0) break;
+	}
+	amd_irongate_private.num_tables = nr_tables;
+	amd_irongate_private.gatt_pages = tables;
+
+	if (retval != 0) amd_free_gatt_pages();
+
+	return retval;
+}
+
+/* Since we don't need contigious memory we just try
+ * to get the gatt table once
+ */
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+	GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) 
+#define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
+	GET_PAGE_DIR_IDX(addr)]->remapped)
+
+static int amd_create_gatt_table(void)
+{
+	struct aper_size_info_lvl2 *value;
+	struct amd_page_map page_dir;
+	unsigned long addr;
+	int retval;
+	u32 temp;
+	int i;
+
+	value = A_SIZE_LVL2(agp_bridge.current_size);
+	retval = amd_create_page_map(&page_dir);
+	if (retval != 0) {
+		return retval;
+	}
+
+	retval = amd_create_gatt_pages(value->num_entries / 1024);
+	if (retval != 0) {
+		amd_free_page_map(&page_dir);
+		return retval;
+	}
+
+	agp_bridge.gatt_table_real = page_dir.real;
+	agp_bridge.gatt_table = page_dir.remapped;
+	agp_bridge.gatt_bus_addr = virt_to_phys(page_dir.real);
+
+	/* Get the address for the gart region.
+	 * This is a bus address even on the alpha, b/c its
+	 * used to program the agp master not the cpu
+	 */
+
+	pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp);
+	addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+	agp_bridge.gart_bus_addr = addr;
+
+	/* Calculate the agp offset */
+	for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
+		page_dir.remapped[GET_PAGE_DIR_OFF(addr)] =
+			virt_to_phys(amd_irongate_private.gatt_pages[i]->real);
+		page_dir.remapped[GET_PAGE_DIR_OFF(addr)] |= 0x00000001;
+	}
+
+	return 0;
+}
+
+static int amd_free_gatt_table(void)
+{
+	struct amd_page_map page_dir;
+   
+	page_dir.real = agp_bridge.gatt_table_real;
+	page_dir.remapped = agp_bridge.gatt_table;
+
+	amd_free_gatt_pages();
+	amd_free_page_map(&page_dir);
+	return 0;
+}
+
+static int amd_irongate_fetch_size(void)
+{
+	int i;
+	u32 temp;
+	struct aper_size_info_lvl2 *values;
+
+	pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
+	temp = (temp & 0x0000000e);
+	values = A_SIZE_LVL2(agp_bridge.aperture_sizes);
+	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge.previous_size =
+			    agp_bridge.current_size = (void *) (values + i);
+
+			agp_bridge.aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+static int amd_irongate_configure(void)
+{
+	struct aper_size_info_lvl2 *current_size;
+	u32 temp;
+	u16 enable_reg;
+
+	current_size = A_SIZE_LVL2(agp_bridge.current_size);
+
+	/* Get the memory mapped registers */
+	pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp);
+	temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+	amd_irongate_private.registers = (volatile u8 *) ioremap(temp, 4096);
+
+	/* Write out the address of the gatt table */
+	OUTREG32(amd_irongate_private.registers, AMD_ATTBASE,
+		 agp_bridge.gatt_bus_addr);
+
+	/* Write the Sync register */
+	pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80);
+   
+   	/* Set indexing mode */
+   	pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL2, 0x00);
+
+	/* Write the enable register */
+	enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
+	enable_reg = (enable_reg | 0x0004);
+	OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
+
+	/* Write out the size register */
+	pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
+	temp = (((temp & ~(0x0000000e)) | current_size->size_value)
+		| 0x00000001);
+	pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
+
+	/* Flush the tlb */
+	OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
+
+	return 0;
+}
+
+static void amd_irongate_cleanup(void)
+{
+	struct aper_size_info_lvl2 *previous_size;
+	u32 temp;
+	u16 enable_reg;
+
+	previous_size = A_SIZE_LVL2(agp_bridge.previous_size);
+
+	enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
+	enable_reg = (enable_reg & ~(0x0004));
+	OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
+
+	/* Write back the previous size and disable gart translation */
+	pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
+	temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+	pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
+	iounmap((void *) amd_irongate_private.registers);
+}
+
+/*
+ * This routine could be implemented by taking the addresses
+ * written to the GATT, and flushing them individually.  However
+ * currently it just flushes the whole table.  Which is probably
+ * more efficent, since agp_memory blocks can be a large number of
+ * entries.
+ */
+
+static void amd_irongate_tlbflush(agp_memory * temp)
+{
+	OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
+}
+
+static unsigned long amd_irongate_mask_memory(unsigned long addr, int type)
+{
+	/* Only type 0 is supported by the irongate */
+
+	return addr | agp_bridge.masks[0].mask;
+}
+
+static int amd_insert_memory(agp_memory * mem,
+			     off_t pg_start, int type)
+{
+	int i, j, num_entries;
+	unsigned long *cur_gatt;
+	unsigned long addr;
+
+	num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries;
+
+	if (type != 0 || mem->type != 0) {
+		return -EINVAL;
+	}
+	if ((pg_start + mem->page_count) > num_entries) {
+		return -EINVAL;
+	}
+
+	j = pg_start;
+	while (j < (pg_start + mem->page_count)) {
+		addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) {
+			return -EBUSY;
+		}
+		j++;
+	}
+
+	if (mem->is_flushed == FALSE) {
+		CACHE_FLUSH();
+		mem->is_flushed = TRUE;
+	}
+
+	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+		addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i];
+	}
+	agp_bridge.tlb_flush(mem);
+	return 0;
+}
+
+static int amd_remove_memory(agp_memory * mem, off_t pg_start,
+			     int type)
+{
+	int i;
+	unsigned long *cur_gatt;
+	unsigned long addr;
+
+	if (type != 0 || mem->type != 0) {
+		return -EINVAL;
+	}
+	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+		addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		cur_gatt[GET_GATT_OFF(addr)] = 
+			(unsigned long) agp_bridge.scratch_page;
+	}
+
+	agp_bridge.tlb_flush(mem);
+	return 0;
+}
+
+static struct aper_size_info_lvl2 amd_irongate_sizes[7] =
+{
+	{2048, 524288, 0x0000000c},
+	{1024, 262144, 0x0000000a},
+	{512, 131072, 0x00000008},
+	{256, 65536, 0x00000006},
+	{128, 32768, 0x00000004},
+	{64, 16384, 0x00000002},
+	{32, 8192, 0x00000000}
+};
+
+static struct gatt_mask amd_irongate_masks[] =
+{
+	{mask: 0x00000001, type: 0}
+};
+
+int __init amd_irongate_setup (struct pci_dev *pdev)
+{
+	agp_bridge.masks = amd_irongate_masks;
+	agp_bridge.num_of_masks = 1;
+	agp_bridge.aperture_sizes = (void *) amd_irongate_sizes;
+	agp_bridge.size_type = LVL2_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 7;
+	agp_bridge.dev_private_data = (void *) &amd_irongate_private;
+	agp_bridge.needs_scratch_page = FALSE;
+	agp_bridge.configure = amd_irongate_configure;
+	agp_bridge.fetch_size = amd_irongate_fetch_size;
+	agp_bridge.cleanup = amd_irongate_cleanup;
+	agp_bridge.tlb_flush = amd_irongate_tlbflush;
+	agp_bridge.mask_memory = amd_irongate_mask_memory;
+	agp_bridge.agp_enable = agp_generic_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = amd_create_gatt_table;
+	agp_bridge.free_gatt_table = amd_free_gatt_table;
+	agp_bridge.insert_memory = amd_insert_memory;
+	agp_bridge.remove_memory = amd_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 0;
+
+	return 0;
+	
+	(void) pdev; /* unused */
+}
+
diff -Nru a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/drivers/char/agp/frontend.c	Wed Jul 17 11:29:31 2002
@@ -0,0 +1,1086 @@
+/*
+ * AGPGART module frontend version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/agp_backend.h>
+#include <linux/agpgart.h>
+#include <asm/uaccess.h>
+
+#include "agp.h"
+
+static struct agp_front_data agp_fe;
+
+static agp_memory *agp_find_mem_by_key(int key)
+{
+	agp_memory *curr;
+
+	if (agp_fe.current_controller == NULL) {
+		return NULL;
+	}
+	curr = agp_fe.current_controller->pool;
+
+	while (curr != NULL) {
+		if (curr->key == key) {
+			return curr;
+		}
+		curr = curr->next;
+	}
+
+	return NULL;
+}
+
+static void agp_remove_from_pool(agp_memory * temp)
+{
+	agp_memory *prev;
+	agp_memory *next;
+
+	/* Check to see if this is even in the memory pool */
+
+	if (agp_find_mem_by_key(temp->key) != NULL) {
+		next = temp->next;
+		prev = temp->prev;
+
+		if (prev != NULL) {
+			prev->next = next;
+			if (next != NULL) {
+				next->prev = prev;
+			}
+		} else {
+			/* This is the first item on the list */
+			if (next != NULL) {
+				next->prev = NULL;
+			}
+			agp_fe.current_controller->pool = next;
+		}
+	}
+}
+
+/*
+ * Routines for managing each client's segment list -
+ * These routines handle adding and removing segments
+ * to each auth'ed client.
+ */
+
+static agp_segment_priv *agp_find_seg_in_client(const agp_client * client,
+						unsigned long offset,
+					    int size, pgprot_t page_prot)
+{
+	agp_segment_priv *seg;
+	int num_segments, pg_start, pg_count, i;
+
+	pg_start = offset / 4096;
+	pg_count = size / 4096;
+	seg = *(client->segments);
+	num_segments = client->num_segments;
+
+	for (i = 0; i < client->num_segments; i++) {
+		if ((seg[i].pg_start == pg_start) &&
+		    (seg[i].pg_count == pg_count) &&
+		    (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
+			return seg + i;
+		}
+	}
+
+	return NULL;
+}
+
+static void agp_remove_seg_from_client(agp_client * client)
+{
+	if (client->segments != NULL) {
+		if (*(client->segments) != NULL) {
+			kfree(*(client->segments));
+		}
+		kfree(client->segments);
+	}
+}
+
+static void agp_add_seg_to_client(agp_client * client,
+			       agp_segment_priv ** seg, int num_segments)
+{
+	agp_segment_priv **prev_seg;
+
+	prev_seg = client->segments;
+
+	if (prev_seg != NULL) {
+		agp_remove_seg_from_client(client);
+	}
+	client->num_segments = num_segments;
+	client->segments = seg;
+}
+
+/* Originally taken from linux/mm/mmap.c from the array
+ * protection_map.
+ * The original really should be exported to modules, or 
+ * some routine which does the conversion for you 
+ */
+
+static const pgprot_t my_protect_map[16] =
+{
+	__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+	__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+};
+
+static pgprot_t agp_convert_mmap_flags(int prot)
+{
+#define _trans(x,bit1,bit2) \
+((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
+
+	unsigned long prot_bits;
+	pgprot_t temp;
+
+	prot_bits = _trans(prot, PROT_READ, VM_READ) |
+	    _trans(prot, PROT_WRITE, VM_WRITE) |
+	    _trans(prot, PROT_EXEC, VM_EXEC);
+
+	prot_bits |= VM_SHARED;
+
+	temp = my_protect_map[prot_bits & 0x0000000f];
+
+	return temp;
+}
+
+static int agp_create_segment(agp_client * client, agp_region * region)
+{
+	agp_segment_priv **ret_seg;
+	agp_segment_priv *seg;
+	agp_segment *user_seg;
+	int i;
+
+	seg = kmalloc((sizeof(agp_segment_priv) * region->seg_count),
+		      GFP_KERNEL);
+	if (seg == NULL) {
+		kfree(region->seg_list);
+		return -ENOMEM;
+	}
+	memset(seg, 0, (sizeof(agp_segment_priv) * region->seg_count));
+	user_seg = region->seg_list;
+
+	for (i = 0; i < region->seg_count; i++) {
+		seg[i].pg_start = user_seg[i].pg_start;
+		seg[i].pg_count = user_seg[i].pg_count;
+		seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot);
+	}
+	ret_seg = kmalloc(sizeof(void *), GFP_KERNEL);
+	if (ret_seg == NULL) {
+		kfree(region->seg_list);
+		kfree(seg);
+		return -ENOMEM;
+	}
+	*ret_seg = seg;
+	kfree(region->seg_list);
+	agp_add_seg_to_client(client, ret_seg, region->seg_count);
+	return 0;
+}
+
+/* End - Routines for managing each client's segment list */
+
+/* This function must only be called when current_controller != NULL */
+static void agp_insert_into_pool(agp_memory * temp)
+{
+	agp_memory *prev;
+
+	prev = agp_fe.current_controller->pool;
+
+	if (prev != NULL) {
+		prev->prev = temp;
+		temp->next = prev;
+	}
+	agp_fe.current_controller->pool = temp;
+}
+
+
+/* File private list routines */
+
+agp_file_private *agp_find_private(pid_t pid)
+{
+	agp_file_private *curr;
+
+	curr = agp_fe.file_priv_list;
+
+	while (curr != NULL) {
+		if (curr->my_pid == pid) {
+			return curr;
+		}
+		curr = curr->next;
+	}
+
+	return NULL;
+}
+
+void agp_insert_file_private(agp_file_private * priv)
+{
+	agp_file_private *prev;
+
+	prev = agp_fe.file_priv_list;
+
+	if (prev != NULL) {
+		prev->prev = priv;
+	}
+	priv->next = prev;
+	agp_fe.file_priv_list = priv;
+}
+
+void agp_remove_file_private(agp_file_private * priv)
+{
+	agp_file_private *next;
+	agp_file_private *prev;
+
+	next = priv->next;
+	prev = priv->prev;
+
+	if (prev != NULL) {
+		prev->next = next;
+
+		if (next != NULL) {
+			next->prev = prev;
+		}
+	} else {
+		if (next != NULL) {
+			next->prev = NULL;
+		}
+		agp_fe.file_priv_list = next;
+	}
+}
+
+/* End - File flag list routines */
+
+/* 
+ * Wrappers for agp_free_memory & agp_allocate_memory 
+ * These make sure that internal lists are kept updated.
+ */
+static void agp_free_memory_wrap(agp_memory * memory)
+{
+	agp_remove_from_pool(memory);
+	agp_free_memory(memory);
+}
+
+static agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
+{
+	agp_memory *memory;
+
+	memory = agp_allocate_memory(pg_count, type);
+   	printk(KERN_DEBUG "agp_allocate_memory: %p\n", memory);
+	if (memory == NULL) {
+		return NULL;
+	}
+	agp_insert_into_pool(memory);
+	return memory;
+}
+
+/* Routines for managing the list of controllers -
+ * These routines manage the current controller, and the list of
+ * controllers
+ */
+
+static agp_controller *agp_find_controller_by_pid(pid_t id)
+{
+	agp_controller *controller;
+
+	controller = agp_fe.controllers;
+
+	while (controller != NULL) {
+		if (controller->pid == id) {
+			return controller;
+		}
+		controller = controller->next;
+	}
+
+	return NULL;
+}
+
+static agp_controller *agp_create_controller(pid_t id)
+{
+	agp_controller *controller;
+
+	controller = kmalloc(sizeof(agp_controller), GFP_KERNEL);
+
+	if (controller == NULL) {
+		return NULL;
+	}
+	memset(controller, 0, sizeof(agp_controller));
+	controller->pid = id;
+
+	return controller;
+}
+
+static int agp_insert_controller(agp_controller * controller)
+{
+	agp_controller *prev_controller;
+
+	prev_controller = agp_fe.controllers;
+	controller->next = prev_controller;
+
+	if (prev_controller != NULL) {
+		prev_controller->prev = controller;
+	}
+	agp_fe.controllers = controller;
+
+	return 0;
+}
+
+static void agp_remove_all_clients(agp_controller * controller)
+{
+	agp_client *client;
+	agp_client *temp;
+
+	client = controller->clients;
+
+	while (client) {
+		agp_file_private *priv;
+
+		temp = client;
+		agp_remove_seg_from_client(temp);
+		priv = agp_find_private(temp->pid);
+
+		if (priv != NULL) {
+			clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
+			clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+		}
+		client = client->next;
+		kfree(temp);
+	}
+}
+
+static void agp_remove_all_memory(agp_controller * controller)
+{
+	agp_memory *memory;
+	agp_memory *temp;
+
+	memory = controller->pool;
+
+	while (memory) {
+		temp = memory;
+		memory = memory->next;
+		agp_free_memory_wrap(temp);
+	}
+}
+
+static int agp_remove_controller(agp_controller * controller)
+{
+	agp_controller *prev_controller;
+	agp_controller *next_controller;
+
+	prev_controller = controller->prev;
+	next_controller = controller->next;
+
+	if (prev_controller != NULL) {
+		prev_controller->next = next_controller;
+		if (next_controller != NULL) {
+			next_controller->prev = prev_controller;
+		}
+	} else {
+		if (next_controller != NULL) {
+			next_controller->prev = NULL;
+		}
+		agp_fe.controllers = next_controller;
+	}
+
+	agp_remove_all_memory(controller);
+	agp_remove_all_clients(controller);
+
+	if (agp_fe.current_controller == controller) {
+		agp_fe.current_controller = NULL;
+		agp_fe.backend_acquired = FALSE;
+		agp_backend_release();
+	}
+	kfree(controller);
+	return 0;
+}
+
+static void agp_controller_make_current(agp_controller * controller)
+{
+	agp_client *clients;
+
+	clients = controller->clients;
+
+	while (clients != NULL) {
+		agp_file_private *priv;
+
+		priv = agp_find_private(clients->pid);
+
+		if (priv != NULL) {
+			set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+			set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+		}
+		clients = clients->next;
+	}
+
+	agp_fe.current_controller = controller;
+}
+
+static void agp_controller_release_current(agp_controller * controller,
+				      agp_file_private * controller_priv)
+{
+	agp_client *clients;
+
+	clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags);
+	clients = controller->clients;
+
+	while (clients != NULL) {
+		agp_file_private *priv;
+
+		priv = agp_find_private(clients->pid);
+
+		if (priv != NULL) {
+			clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
+		}
+		clients = clients->next;
+	}
+
+	agp_fe.current_controller = NULL;
+	agp_fe.used_by_controller = FALSE;
+	agp_backend_release();
+}
+
+/* 
+ * Routines for managing client lists -
+ * These routines are for managing the list of auth'ed clients.
+ */
+
+static agp_client *agp_find_client_in_controller(agp_controller * controller,
+						 pid_t id)
+{
+	agp_client *client;
+
+	if (controller == NULL) {
+		return NULL;
+	}
+	client = controller->clients;
+
+	while (client != NULL) {
+		if (client->pid == id) {
+			return client;
+		}
+		client = client->next;
+	}
+
+	return NULL;
+}
+
+static agp_controller *agp_find_controller_for_client(pid_t id)
+{
+	agp_controller *controller;
+
+	controller = agp_fe.controllers;
+
+	while (controller != NULL) {
+		if ((agp_find_client_in_controller(controller, id)) != NULL) {
+			return controller;
+		}
+		controller = controller->next;
+	}
+
+	return NULL;
+}
+
+static agp_client *agp_find_client_by_pid(pid_t id)
+{
+	agp_client *temp;
+
+	if (agp_fe.current_controller == NULL) {
+		return NULL;
+	}
+	temp = agp_find_client_in_controller(agp_fe.current_controller, id);
+	return temp;
+}
+
+static void agp_insert_client(agp_client * client)
+{
+	agp_client *prev_client;
+
+	prev_client = agp_fe.current_controller->clients;
+	client->next = prev_client;
+
+	if (prev_client != NULL) {
+		prev_client->prev = client;
+	}
+	agp_fe.current_controller->clients = client;
+	agp_fe.current_controller->num_clients++;
+}
+
+static agp_client *agp_create_client(pid_t id)
+{
+	agp_client *new_client;
+
+	new_client = kmalloc(sizeof(agp_client), GFP_KERNEL);
+
+	if (new_client == NULL) {
+		return NULL;
+	}
+	memset(new_client, 0, sizeof(agp_client));
+	new_client->pid = id;
+	agp_insert_client(new_client);
+	return new_client;
+}
+
+static int agp_remove_client(pid_t id)
+{
+	agp_client *client;
+	agp_client *prev_client;
+	agp_client *next_client;
+	agp_controller *controller;
+
+	controller = agp_find_controller_for_client(id);
+
+	if (controller == NULL) {
+		return -EINVAL;
+	}
+	client = agp_find_client_in_controller(controller, id);
+
+	if (client == NULL) {
+		return -EINVAL;
+	}
+	prev_client = client->prev;
+	next_client = client->next;
+
+	if (prev_client != NULL) {
+		prev_client->next = next_client;
+		if (next_client != NULL) {
+			next_client->prev = prev_client;
+		}
+	} else {
+		if (next_client != NULL) {
+			next_client->prev = NULL;
+		}
+		controller->clients = next_client;
+	}
+
+	controller->num_clients--;
+	agp_remove_seg_from_client(client);
+	kfree(client);
+	return 0;
+}
+
+/* End - Routines for managing client lists */
+
+/* File Operations */
+
+static int agp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	int size;
+	int current_size;
+	unsigned long offset;
+	agp_client *client;
+	agp_file_private *priv = (agp_file_private *) file->private_data;
+	agp_kern_info kerninfo;
+
+	AGP_LOCK();
+
+	if (agp_fe.backend_acquired != TRUE) {
+		AGP_UNLOCK();
+		return -EPERM;
+	}
+	if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags))) {
+		AGP_UNLOCK();
+		return -EPERM;
+	}
+	agp_copy_info(&kerninfo);
+	size = vma->vm_end - vma->vm_start;
+	current_size = kerninfo.aper_size;
+	current_size = current_size * 0x100000;
+	offset = vma->vm_pgoff << PAGE_SHIFT;
+
+	if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
+		if ((size + offset) > current_size) {
+			AGP_UNLOCK();
+			return -EINVAL;
+		}
+		client = agp_find_client_by_pid(current->pid);
+
+		if (client == NULL) {
+			AGP_UNLOCK();
+			return -EPERM;
+		}
+		if (!agp_find_seg_in_client(client, offset,
+					    size, vma->vm_page_prot)) {
+			AGP_UNLOCK();
+			return -EINVAL;
+		}
+		if (remap_page_range(vma, vma->vm_start,
+				     (kerninfo.aper_base + offset),
+				     size, vma->vm_page_prot)) {
+			AGP_UNLOCK();
+			return -EAGAIN;
+		}
+		AGP_UNLOCK();
+		return 0;
+	}
+	if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
+		if (size != current_size) {
+			AGP_UNLOCK();
+			return -EINVAL;
+		}
+		if (remap_page_range(vma, vma->vm_start, kerninfo.aper_base,
+				     size, vma->vm_page_prot)) {
+			AGP_UNLOCK();
+			return -EAGAIN;
+		}
+		AGP_UNLOCK();
+		return 0;
+	}
+	AGP_UNLOCK();
+	return -EPERM;
+}
+
+static int agp_release(struct inode *inode, struct file *file)
+{
+	agp_file_private *priv = (agp_file_private *) file->private_data;
+
+	AGP_LOCK();
+
+	if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
+		agp_controller *controller;
+
+		controller = agp_find_controller_by_pid(priv->my_pid);
+
+		if (controller != NULL) {
+			if (controller == agp_fe.current_controller) {
+				agp_controller_release_current(controller,
+							       priv);
+			}
+			agp_remove_controller(controller);
+		}
+	}
+	if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
+		agp_remove_client(priv->my_pid);
+	}
+	agp_remove_file_private(priv);
+	kfree(priv);
+	AGP_UNLOCK();
+	return 0;
+}
+
+static int agp_open(struct inode *inode, struct file *file)
+{
+	int minor = minor(inode->i_rdev);
+	agp_file_private *priv;
+	agp_client *client;
+	int rc = -ENXIO;
+
+	AGP_LOCK();
+
+	if (minor != AGPGART_MINOR)
+		goto err_out;
+
+	priv = kmalloc(sizeof(agp_file_private), GFP_KERNEL);
+	if (priv == NULL)
+		goto err_out_nomem;
+
+	memset(priv, 0, sizeof(agp_file_private));
+	set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
+	priv->my_pid = current->pid;
+
+	if ((current->uid == 0) || (current->suid == 0)) {
+		/* Root priv, can be controller */
+		set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
+	}
+	client = agp_find_client_by_pid(current->pid);
+
+	if (client != NULL) {
+		set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+		set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+	}
+	file->private_data = (void *) priv;
+	agp_insert_file_private(priv);
+	AGP_UNLOCK();
+	return 0;
+
+err_out_nomem:
+	rc = -ENOMEM;
+err_out:
+	AGP_UNLOCK();
+	return rc;
+}
+
+
+static ssize_t agp_read(struct file *file, char *buf,
+			size_t count, loff_t * ppos)
+{
+	return -EINVAL;
+}
+
+static ssize_t agp_write(struct file *file, const char *buf,
+			 size_t count, loff_t * ppos)
+{
+	return -EINVAL;
+}
+
+static int agpioc_info_wrap(agp_file_private * priv, unsigned long arg)
+{
+	agp_info userinfo;
+	agp_kern_info kerninfo;
+
+	agp_copy_info(&kerninfo);
+
+	userinfo.version.major = kerninfo.version.major;
+	userinfo.version.minor = kerninfo.version.minor;
+	userinfo.bridge_id = kerninfo.device->vendor |
+	    (kerninfo.device->device << 16);
+	userinfo.agp_mode = kerninfo.mode;
+	userinfo.aper_base = kerninfo.aper_base;
+	userinfo.aper_size = kerninfo.aper_size;
+	userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
+	userinfo.pg_used = kerninfo.current_memory;
+
+	if (copy_to_user((void *) arg, &userinfo, sizeof(agp_info))) {
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int agpioc_acquire_wrap(agp_file_private * priv, unsigned long arg)
+{
+	agp_controller *controller;
+	if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags))) {
+		return -EPERM;
+	}
+	if (agp_fe.current_controller != NULL) {
+		return -EBUSY;
+	}
+	if ((agp_backend_acquire()) == 0) {
+		agp_fe.backend_acquired = TRUE;
+	} else {
+		return -EBUSY;
+	}
+
+	controller = agp_find_controller_by_pid(priv->my_pid);
+
+	if (controller != NULL) {
+		agp_controller_make_current(controller);
+	} else {
+		controller = agp_create_controller(priv->my_pid);
+
+		if (controller == NULL) {
+			agp_fe.backend_acquired = FALSE;
+			agp_backend_release();
+			return -ENOMEM;
+		}
+		agp_insert_controller(controller);
+		agp_controller_make_current(controller);
+	}
+
+	set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags);
+	set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+	return 0;
+}
+
+static int agpioc_release_wrap(agp_file_private * priv, unsigned long arg)
+{
+	agp_controller_release_current(agp_fe.current_controller, priv);
+	return 0;
+}
+
+static int agpioc_setup_wrap(agp_file_private * priv, unsigned long arg)
+{
+	agp_setup mode;
+
+	if (copy_from_user(&mode, (void *) arg, sizeof(agp_setup))) {
+		return -EFAULT;
+	}
+	agp_enable(mode.agp_mode);
+	return 0;
+}
+
+static int agpioc_reserve_wrap(agp_file_private * priv, unsigned long arg)
+{
+	agp_region reserve;
+	agp_client *client;
+	agp_file_private *client_priv;
+
+
+	if (copy_from_user(&reserve, (void *) arg, sizeof(agp_region))) {
+		return -EFAULT;
+	}
+	if ((unsigned) reserve.seg_count >= ~0U/sizeof(agp_segment))
+		return -EFAULT;
+
+	client = agp_find_client_by_pid(reserve.pid);
+
+	if (reserve.seg_count == 0) {
+		/* remove a client */
+		client_priv = agp_find_private(reserve.pid);
+
+		if (client_priv != NULL) {
+			set_bit(AGP_FF_IS_CLIENT,
+				&client_priv->access_flags);
+			set_bit(AGP_FF_IS_VALID,
+				&client_priv->access_flags);
+		}
+		if (client == NULL) {
+			/* client is already removed */
+			return 0;
+		}
+		return agp_remove_client(reserve.pid);
+	} else {
+		agp_segment *segment;
+
+		if (reserve.seg_count >= 16384)
+			return -EINVAL;
+			
+		segment = kmalloc((sizeof(agp_segment) * reserve.seg_count),
+				  GFP_KERNEL);
+
+		if (segment == NULL) {
+			return -ENOMEM;
+		}
+		if (copy_from_user(segment, (void *) reserve.seg_list,
+				   sizeof(agp_segment) * reserve.seg_count)) {
+			kfree(segment);
+			return -EFAULT;
+		}
+		reserve.seg_list = segment;
+
+		if (client == NULL) {
+			/* Create the client and add the segment */
+			client = agp_create_client(reserve.pid);
+
+			if (client == NULL) {
+				kfree(segment);
+				return -ENOMEM;
+			}
+			client_priv = agp_find_private(reserve.pid);
+
+			if (client_priv != NULL) {
+				set_bit(AGP_FF_IS_CLIENT,
+					&client_priv->access_flags);
+				set_bit(AGP_FF_IS_VALID,
+					&client_priv->access_flags);
+			}
+			return agp_create_segment(client, &reserve);
+		} else {
+			return agp_create_segment(client, &reserve);
+		}
+	}
+	/* Will never really happen */
+	return -EINVAL;
+}
+
+static int agpioc_protect_wrap(agp_file_private * priv, unsigned long arg)
+{
+	/* This function is not currently implemented */
+	return -EINVAL;
+}
+
+static int agpioc_allocate_wrap(agp_file_private * priv, unsigned long arg)
+{
+	agp_memory *memory;
+	agp_allocate alloc;
+
+	if (copy_from_user(&alloc, (void *) arg, sizeof(agp_allocate))) {
+		return -EFAULT;
+	}
+	memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
+
+	if (memory == NULL) {
+		return -ENOMEM;
+	}
+	alloc.key = memory->key;
+	alloc.physical = memory->physical;
+
+	if (copy_to_user((void *) arg, &alloc, sizeof(agp_allocate))) {
+		agp_free_memory_wrap(memory);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int agpioc_deallocate_wrap(agp_file_private * priv, unsigned long arg)
+{
+	agp_memory *memory;
+
+	memory = agp_find_mem_by_key((int) arg);
+
+	if (memory == NULL) {
+		return -EINVAL;
+	}
+	agp_free_memory_wrap(memory);
+	return 0;
+}
+
+static int agpioc_bind_wrap(agp_file_private * priv, unsigned long arg)
+{
+	agp_bind bind_info;
+	agp_memory *memory;
+
+	if (copy_from_user(&bind_info, (void *) arg, sizeof(agp_bind))) {
+		return -EFAULT;
+	}
+	memory = agp_find_mem_by_key(bind_info.key);
+
+	if (memory == NULL) {
+		return -EINVAL;
+	}
+	return agp_bind_memory(memory, bind_info.pg_start);
+}
+
+static int agpioc_unbind_wrap(agp_file_private * priv, unsigned long arg)
+{
+	agp_memory *memory;
+	agp_unbind unbind;
+
+	if (copy_from_user(&unbind, (void *) arg, sizeof(agp_unbind))) {
+		return -EFAULT;
+	}
+	memory = agp_find_mem_by_key(unbind.key);
+
+	if (memory == NULL) {
+		return -EINVAL;
+	}
+	return agp_unbind_memory(memory);
+}
+
+static int agp_ioctl(struct inode *inode, struct file *file,
+		     unsigned int cmd, unsigned long arg)
+{
+	agp_file_private *curr_priv = (agp_file_private *) file->private_data;
+	int ret_val = -ENOTTY;
+
+	AGP_LOCK();
+
+	if ((agp_fe.current_controller == NULL) &&
+	    (cmd != AGPIOC_ACQUIRE)) {
+		ret_val = -EINVAL;
+	   	goto ioctl_out;
+	}
+	if ((agp_fe.backend_acquired != TRUE) &&
+	    (cmd != AGPIOC_ACQUIRE)) {
+		ret_val = -EBUSY;
+	   	goto ioctl_out;
+	}
+	if (cmd != AGPIOC_ACQUIRE) {
+		if (!(test_bit(AGP_FF_IS_CONTROLLER,
+			       &curr_priv->access_flags))) {
+			ret_val = -EPERM;
+		   	goto ioctl_out;
+		}
+		/* Use the original pid of the controller,
+		 * in case it's threaded */
+
+		if (agp_fe.current_controller->pid != curr_priv->my_pid) {
+			ret_val = -EBUSY;
+		   	goto ioctl_out;
+		}
+	}
+	switch (cmd) {
+	case AGPIOC_INFO:
+		{
+			ret_val = agpioc_info_wrap(curr_priv, arg);
+		   	goto ioctl_out;
+		}
+	case AGPIOC_ACQUIRE:
+		{
+			ret_val = agpioc_acquire_wrap(curr_priv, arg);
+		   	goto ioctl_out;
+		}
+	case AGPIOC_RELEASE:
+		{
+			ret_val = agpioc_release_wrap(curr_priv, arg);
+		   	goto ioctl_out;
+		}
+	case AGPIOC_SETUP:
+		{
+			ret_val = agpioc_setup_wrap(curr_priv, arg);
+		   	goto ioctl_out;
+		}
+	case AGPIOC_RESERVE:
+		{
+			ret_val = agpioc_reserve_wrap(curr_priv, arg);
+		   	goto ioctl_out;
+		}
+	case AGPIOC_PROTECT:
+		{
+			ret_val = agpioc_protect_wrap(curr_priv, arg);
+		   	goto ioctl_out;
+		}
+	case AGPIOC_ALLOCATE:
+		{
+			ret_val = agpioc_allocate_wrap(curr_priv, arg);
+		   	goto ioctl_out;
+		}
+	case AGPIOC_DEALLOCATE:
+		{
+			ret_val = agpioc_deallocate_wrap(curr_priv, arg);
+		   	goto ioctl_out;
+		}
+	case AGPIOC_BIND:
+		{
+			ret_val = agpioc_bind_wrap(curr_priv, arg);
+		   	goto ioctl_out;
+		}
+	case AGPIOC_UNBIND:
+		{
+			ret_val = agpioc_unbind_wrap(curr_priv, arg);
+		   	goto ioctl_out;
+		}
+	}
+   
+ioctl_out:
+	AGP_UNLOCK();
+	return ret_val;
+}
+
+static struct file_operations agp_fops =
+{
+	owner:		THIS_MODULE,
+	llseek:		no_llseek,
+	read:		agp_read,
+	write:		agp_write,
+	ioctl:		agp_ioctl,
+	mmap:		agp_mmap,
+	open:		agp_open,
+	release:	agp_release,
+};
+
+static struct miscdevice agp_miscdev =
+{
+	AGPGART_MINOR,
+	AGPGART_MODULE_NAME,
+	&agp_fops
+};
+
+int __init agp_frontend_initialize(void)
+{
+	memset(&agp_fe, 0, sizeof(struct agp_front_data));
+	AGP_LOCK_INIT();
+
+	if (misc_register(&agp_miscdev)) {
+		printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR);
+		return -EIO;
+	}
+	return 0;
+}
+
+void __exit agp_frontend_cleanup(void)
+{
+	misc_deregister(&agp_miscdev);
+}
+
diff -Nru a/drivers/char/agp/hp.c b/drivers/char/agp/hp.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/drivers/char/agp/hp.c	Wed Jul 17 11:29:31 2002
@@ -0,0 +1,394 @@
+/*
+ * AGPGART module version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO: 
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+
+#ifndef log2
+#define log2(x)		ffz(~(x))
+#endif
+
+#define HP_ZX1_IOVA_BASE	GB(1UL)
+#define HP_ZX1_IOVA_SIZE	GB(1UL)
+#define HP_ZX1_GART_SIZE	(HP_ZX1_IOVA_SIZE / 2)
+#define HP_ZX1_SBA_IOMMU_COOKIE	0x0000badbadc0ffeeUL
+
+#define HP_ZX1_PDIR_VALID_BIT	0x8000000000000000UL
+#define HP_ZX1_IOVA_TO_PDIR(va)	((va - hp_private.iova_base) >> \
+					hp_private.io_tlb_shift)
+
+static struct aper_size_info_fixed hp_zx1_sizes[] =
+{
+	{0, 0, 0},		/* filled in by hp_zx1_fetch_size() */
+};
+
+static struct gatt_mask hp_zx1_masks[] =
+{
+	{mask: HP_ZX1_PDIR_VALID_BIT, type: 0}
+};
+
+static struct _hp_private {
+	struct pci_dev *ioc;
+	volatile u8 *registers;
+	u64 *io_pdir;		// PDIR for entire IOVA
+	u64 *gatt;		// PDIR just for GART (subset of above)
+	u64 gatt_entries;
+	u64 iova_base;
+	u64 gart_base;
+	u64 gart_size;
+	u64 io_pdir_size;
+	int io_pdir_owner;	// do we own it, or share it with sba_iommu?
+	int io_page_size;
+	int io_tlb_shift;
+	int io_tlb_ps;		// IOC ps config
+	int io_pages_per_kpage;
+} hp_private;
+
+static int __init hp_zx1_ioc_shared(void)
+{
+	struct _hp_private *hp = &hp_private;
+
+	printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
+
+	/*
+	 * IOC already configured by sba_iommu module; just use
+	 * its setup.  We assume:
+	 * 	- IOVA space is 1Gb in size
+	 * 	- first 512Mb is IOMMU, second 512Mb is GART
+	 */
+	hp->io_tlb_ps = INREG64(hp->registers, HP_ZX1_TCNFG);
+	switch (hp->io_tlb_ps) {
+		case 0: hp->io_tlb_shift = 12; break;
+		case 1: hp->io_tlb_shift = 13; break;
+		case 2: hp->io_tlb_shift = 14; break;
+		case 3: hp->io_tlb_shift = 16; break;
+		default:
+			printk(KERN_ERR PFX "Invalid IOTLB page size "
+			       "configuration 0x%x\n", hp->io_tlb_ps);
+			hp->gatt = 0;
+			hp->gatt_entries = 0;
+			return -ENODEV;
+	}
+	hp->io_page_size = 1 << hp->io_tlb_shift;
+	hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
+
+	hp->iova_base = INREG64(hp->registers, HP_ZX1_IBASE) & ~0x1;
+	hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
+
+	hp->gart_size = HP_ZX1_GART_SIZE;
+	hp->gatt_entries = hp->gart_size / hp->io_page_size;
+
+	hp->io_pdir = phys_to_virt(INREG64(hp->registers, HP_ZX1_PDIR_BASE));
+	hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
+
+	if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
+	    	hp->gatt = 0;
+		hp->gatt_entries = 0;
+		printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
+		       "GART disabled\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int __init hp_zx1_ioc_owner(u8 ioc_rev)
+{
+	struct _hp_private *hp = &hp_private;
+
+	printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
+
+	/*
+	 * Select an IOV page size no larger than system page size.
+	 */
+	if (PAGE_SIZE >= KB(64)) {
+		hp->io_tlb_shift = 16;
+		hp->io_tlb_ps = 3;
+	} else if (PAGE_SIZE >= KB(16)) {
+		hp->io_tlb_shift = 14;
+		hp->io_tlb_ps = 2;
+	} else if (PAGE_SIZE >= KB(8)) {
+		hp->io_tlb_shift = 13;
+		hp->io_tlb_ps = 1;
+	} else {
+		hp->io_tlb_shift = 12;
+		hp->io_tlb_ps = 0;
+	}
+	hp->io_page_size = 1 << hp->io_tlb_shift;
+	hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
+
+	hp->iova_base = HP_ZX1_IOVA_BASE;
+	hp->gart_size = HP_ZX1_GART_SIZE;
+	hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
+
+	hp->gatt_entries = hp->gart_size / hp->io_page_size;
+	hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
+
+	return 0;
+}
+
+static int __init hp_zx1_ioc_init(void)
+{
+	struct _hp_private *hp = &hp_private;
+	struct pci_dev *ioc;
+	int i;
+	u8 ioc_rev;
+
+	ioc = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_IOC, NULL);
+	if (!ioc) {
+		printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no IOC\n");
+		return -ENODEV;
+	}
+	hp->ioc = ioc;
+
+	pci_read_config_byte(ioc, PCI_REVISION_ID, &ioc_rev);
+
+	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+		if (pci_resource_flags(ioc, i) == IORESOURCE_MEM) {
+			hp->registers = (u8 *) ioremap(pci_resource_start(ioc,
+									    i),
+						    pci_resource_len(ioc, i));
+			break;
+		}
+	}
+	if (!hp->registers) {
+		printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no CSRs\n");
+
+		return -ENODEV;
+	}
+
+	/*
+	 * If the IOTLB is currently disabled, we can take it over.
+	 * Otherwise, we have to share with sba_iommu.
+	 */
+	hp->io_pdir_owner = (INREG64(hp->registers, HP_ZX1_IBASE) & 0x1) == 0;
+
+	if (hp->io_pdir_owner)
+		return hp_zx1_ioc_owner(ioc_rev);
+
+	return hp_zx1_ioc_shared();
+}
+
+static int hp_zx1_fetch_size(void)
+{
+	int size;
+
+	size = hp_private.gart_size / MB(1);
+	hp_zx1_sizes[0].size = size;
+	agp_bridge.current_size = (void *) &hp_zx1_sizes[0];
+	return size;
+}
+
+static int hp_zx1_configure(void)
+{
+	struct _hp_private *hp = &hp_private;
+
+	agp_bridge.gart_bus_addr = hp->gart_base;
+	agp_bridge.capndx = pci_find_capability(agp_bridge.dev, PCI_CAP_ID_AGP);
+	pci_read_config_dword(agp_bridge.dev,
+		agp_bridge.capndx + PCI_AGP_STATUS, &agp_bridge.mode);
+
+	if (hp->io_pdir_owner) {
+		OUTREG64(hp->registers, HP_ZX1_PDIR_BASE,
+			virt_to_phys(hp->io_pdir));
+		OUTREG64(hp->registers, HP_ZX1_TCNFG, hp->io_tlb_ps);
+		OUTREG64(hp->registers, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
+		OUTREG64(hp->registers, HP_ZX1_IBASE, hp->iova_base | 0x1);
+		OUTREG64(hp->registers, HP_ZX1_PCOM,
+			hp->iova_base | log2(HP_ZX1_IOVA_SIZE));
+		INREG64(hp->registers, HP_ZX1_PCOM);
+	}
+
+	return 0;
+}
+
+static void hp_zx1_cleanup(void)
+{
+	struct _hp_private *hp = &hp_private;
+
+	if (hp->io_pdir_owner)
+		OUTREG64(hp->registers, HP_ZX1_IBASE, 0);
+	iounmap((void *) hp->registers);
+}
+
+static void hp_zx1_tlbflush(agp_memory * mem)
+{
+	struct _hp_private *hp = &hp_private;
+
+	OUTREG64(hp->registers, HP_ZX1_PCOM, 
+		hp->gart_base | log2(hp->gart_size));
+	INREG64(hp->registers, HP_ZX1_PCOM);
+}
+
+static int hp_zx1_create_gatt_table(void)
+{
+	struct _hp_private *hp = &hp_private;
+	int i;
+
+	if (hp->io_pdir_owner) {
+		hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
+						get_order(hp->io_pdir_size));
+		if (!hp->io_pdir) {
+			printk(KERN_ERR PFX "Couldn't allocate contiguous "
+				"memory for I/O PDIR\n");
+			hp->gatt = 0;
+			hp->gatt_entries = 0;
+			return -ENOMEM;
+		}
+		memset(hp->io_pdir, 0, hp->io_pdir_size);
+
+		hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
+	}
+
+	for (i = 0; i < hp->gatt_entries; i++) {
+		hp->gatt[i] = (unsigned long) agp_bridge.scratch_page;
+	}
+
+	return 0;
+}
+
+static int hp_zx1_free_gatt_table(void)
+{
+	struct _hp_private *hp = &hp_private;
+	
+	if (hp->io_pdir_owner)
+		free_pages((unsigned long) hp->io_pdir,
+			    get_order(hp->io_pdir_size));
+	else
+		hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
+	return 0;
+}
+
+static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type)
+{
+	struct _hp_private *hp = &hp_private;
+	int i, k;
+	off_t j, io_pg_start;
+	int io_pg_count;
+
+	if (type != 0 || mem->type != 0) {
+		return -EINVAL;
+	}
+
+	io_pg_start = hp->io_pages_per_kpage * pg_start;
+	io_pg_count = hp->io_pages_per_kpage * mem->page_count;
+	if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
+		return -EINVAL;
+	}
+
+	j = io_pg_start;
+	while (j < (io_pg_start + io_pg_count)) {
+		if (hp->gatt[j]) {
+			return -EBUSY;
+		}
+		j++;
+	}
+
+	if (mem->is_flushed == FALSE) {
+		CACHE_FLUSH();
+		mem->is_flushed = TRUE;
+	}
+
+	for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
+		unsigned long paddr;
+
+		paddr = mem->memory[i];
+		for (k = 0;
+		     k < hp->io_pages_per_kpage;
+		     k++, j++, paddr += hp->io_page_size) {
+			hp->gatt[j] = agp_bridge.mask_memory(paddr, type);
+		}
+	}
+
+	agp_bridge.tlb_flush(mem);
+	return 0;
+}
+
+static int hp_zx1_remove_memory(agp_memory * mem, off_t pg_start, int type)
+{
+	struct _hp_private *hp = &hp_private;
+	int i, io_pg_start, io_pg_count;
+
+	if (type != 0 || mem->type != 0) {
+		return -EINVAL;
+	}
+
+	io_pg_start = hp->io_pages_per_kpage * pg_start;
+	io_pg_count = hp->io_pages_per_kpage * mem->page_count;
+	for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
+		hp->gatt[i] = agp_bridge.scratch_page;
+	}
+
+	agp_bridge.tlb_flush(mem);
+	return 0;
+}
+
+static unsigned long hp_zx1_mask_memory(unsigned long addr, int type)
+{
+	return HP_ZX1_PDIR_VALID_BIT | addr;
+}
+
+static unsigned long hp_zx1_unmask_memory(unsigned long addr)
+{
+	return addr & ~(HP_ZX1_PDIR_VALID_BIT);
+}
+
+int __init hp_zx1_setup (struct pci_dev *pdev)
+{
+	agp_bridge.masks = hp_zx1_masks;
+	agp_bridge.num_of_masks = 1;
+	agp_bridge.dev_private_data = NULL;
+	agp_bridge.size_type = FIXED_APER_SIZE;
+	agp_bridge.needs_scratch_page = FALSE;
+	agp_bridge.configure = hp_zx1_configure;
+	agp_bridge.fetch_size = hp_zx1_fetch_size;
+	agp_bridge.cleanup = hp_zx1_cleanup;
+	agp_bridge.tlb_flush = hp_zx1_tlbflush;
+	agp_bridge.mask_memory = hp_zx1_mask_memory;
+	agp_bridge.unmask_memory = hp_zx1_unmask_memory;
+	agp_bridge.agp_enable = agp_generic_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = hp_zx1_create_gatt_table;
+	agp_bridge.free_gatt_table = hp_zx1_free_gatt_table;
+	agp_bridge.insert_memory = hp_zx1_insert_memory;
+	agp_bridge.remove_memory = hp_zx1_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+	agp_bridge.cant_use_aperture = 1;
+
+	return hp_zx1_ioc_init();
+
+	(void) pdev; /* unused */
+}
+
diff -Nru a/drivers/char/agp/i460.c b/drivers/char/agp/i460.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/drivers/char/agp/i460.c	Wed Jul 17 11:29:31 2002
@@ -0,0 +1,595 @@
+/*
+ * AGPGART module version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO: 
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+/* BIOS configures the chipset so that one of two apbase registers are used */
+static u8 intel_i460_dynamic_apbase = 0x10;
+
+/* 460 supports multiple GART page sizes, so GART pageshift is dynamic */
+static u8 intel_i460_pageshift = 12;
+static u32 intel_i460_pagesize;
+
+/* Keep track of which is larger, chipset or kernel page size. */
+static u32 intel_i460_cpk = 1;
+
+/* Structure for tracking partial use of 4MB GART pages */
+static u32 **i460_pg_detail = NULL;
+static u32 *i460_pg_count = NULL;
+
+#define I460_CPAGES_PER_KPAGE (PAGE_SIZE >> intel_i460_pageshift)
+#define I460_KPAGES_PER_CPAGE ((1 << intel_i460_pageshift) >> PAGE_SHIFT)
+
+#define I460_SRAM_IO_DISABLE		(1 << 4)
+#define I460_BAPBASE_ENABLE		(1 << 3)
+#define I460_AGPSIZ_MASK		0x7
+#define I460_4M_PS			(1 << 1)
+
+#define log2(x)				ffz(~(x))
+
+static inline void intel_i460_read_back (volatile u32 *entry)
+{
+	/*
+	 * The 460 spec says we have to read the last location written to
+	 * make sure that all writes have taken effect
+	 */
+	*entry;
+}
+
+static int intel_i460_fetch_size(void)
+{
+	int i;
+	u8 temp;
+	struct aper_size_info_8 *values;
+
+	/* Determine the GART page size */
+	pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &temp);
+	intel_i460_pageshift = (temp & I460_4M_PS) ? 22 : 12;
+	intel_i460_pagesize = 1UL << intel_i460_pageshift;
+
+	values = A_SIZE_8(agp_bridge.aperture_sizes);
+
+	pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp);
+
+	/* Exit now if the IO drivers for the GART SRAMS are turned off */
+	if (temp & I460_SRAM_IO_DISABLE) {
+		printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n");
+		printk(KERN_ERR PFX "AGPGART operation not possible\n");
+		return 0;
+	}
+
+	/* Make sure we don't try to create an 2 ^ 23 entry GATT */
+	if ((intel_i460_pageshift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) {
+		printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n");
+		return 0;
+	}
+
+	/* Determine the proper APBASE register */
+	if (temp & I460_BAPBASE_ENABLE)
+		intel_i460_dynamic_apbase = INTEL_I460_BAPBASE;
+	else
+		intel_i460_dynamic_apbase = INTEL_I460_APBASE;
+
+	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+		/*
+		 * Dynamically calculate the proper num_entries and page_order values for
+		 * the define aperture sizes. Take care not to shift off the end of
+		 * values[i].size.
+		 */
+		values[i].num_entries = (values[i].size << 8) >> (intel_i460_pageshift - 12);
+		values[i].page_order = log2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT);
+	}
+
+	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+		/* Neglect control bits when matching up size_value */
+		if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) {
+			agp_bridge.previous_size = agp_bridge.current_size = (void *) (values + i);
+			agp_bridge.aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+/* There isn't anything to do here since 460 has no GART TLB. */
+static void intel_i460_tlb_flush(agp_memory * mem)
+{
+	return;
+}
+
+/*
+ * This utility function is needed to prevent corruption of the control bits
+ * which are stored along with the aperture size in 460's AGPSIZ register
+ */
+static void intel_i460_write_agpsiz(u8 size_value)
+{
+	u8 temp;
+
+	pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp);
+	pci_write_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ,
+			      ((temp & ~I460_AGPSIZ_MASK) | size_value));
+}
+
+static void intel_i460_cleanup(void)
+{
+	struct aper_size_info_8 *previous_size;
+
+	previous_size = A_SIZE_8(agp_bridge.previous_size);
+	intel_i460_write_agpsiz(previous_size->size_value);
+
+	if (intel_i460_cpk == 0) {
+		vfree(i460_pg_detail);
+		vfree(i460_pg_count);
+	}
+}
+
+
+/* Control bits for Out-Of-GART coherency and Burst Write Combining */
+#define I460_GXBCTL_OOG		(1UL << 0)
+#define I460_GXBCTL_BWC		(1UL << 2)
+
+static int intel_i460_configure(void)
+{
+	union {
+		u32 small[2];
+		u64 large;
+	} temp;
+	u8 scratch;
+	int i;
+
+	struct aper_size_info_8 *current_size;
+
+	temp.large = 0;
+
+	current_size = A_SIZE_8(agp_bridge.current_size);
+	intel_i460_write_agpsiz(current_size->size_value);
+
+	/*
+	 * Do the necessary rigmarole to read all eight bytes of APBASE.
+	 * This has to be done since the AGP aperture can be above 4GB on
+	 * 460 based systems.
+	 */
+	pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase, &(temp.small[0]));
+	pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase + 4, &(temp.small[1]));
+
+	/* Clear BAR control bits */
+	agp_bridge.gart_bus_addr = temp.large & ~((1UL << 3) - 1);
+
+	pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &scratch);
+	pci_write_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL,
+			      (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC);
+
+	/*
+	 * Initialize partial allocation trackers if a GART page is bigger than
+	 * a kernel page.
+	 */
+	if (I460_CPAGES_PER_KPAGE >= 1) {
+		intel_i460_cpk = 1;
+	} else {
+		intel_i460_cpk = 0;
+
+		i460_pg_detail = vmalloc(sizeof(*i460_pg_detail) * current_size->num_entries);
+		i460_pg_count = vmalloc(sizeof(*i460_pg_count) * current_size->num_entries);
+
+		for (i = 0; i < current_size->num_entries; i++) {
+			i460_pg_count[i] = 0;
+			i460_pg_detail[i] = NULL;
+		}
+	}
+	return 0;
+}
+
+static int intel_i460_create_gatt_table(void)
+{
+	char *table;
+	int i;
+	int page_order;
+	int num_entries;
+	void *temp;
+
+	/*
+	 * Load up the fixed address of the GART SRAMS which hold our
+	 * GATT table.
+	 */
+	table = (char *) __va(INTEL_I460_ATTBASE);
+
+	temp = agp_bridge.current_size;
+	page_order = A_SIZE_8(temp)->page_order;
+	num_entries = A_SIZE_8(temp)->num_entries;
+
+	agp_bridge.gatt_table_real = (u32 *) table;
+	agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
+						(PAGE_SIZE * (1 << page_order)));
+	agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real);
+
+	for (i = 0; i < num_entries; i++) {
+		agp_bridge.gatt_table[i] = 0;
+	}
+
+	intel_i460_read_back(agp_bridge.gatt_table + i - 1);
+	return 0;
+}
+
+static int intel_i460_free_gatt_table(void)
+{
+	int num_entries;
+	int i;
+	void *temp;
+
+	temp = agp_bridge.current_size;
+
+	num_entries = A_SIZE_8(temp)->num_entries;
+
+	for (i = 0; i < num_entries; i++) {
+		agp_bridge.gatt_table[i] = 0;
+	}
+
+	intel_i460_read_back(agp_bridge.gatt_table + i - 1);
+
+	iounmap(agp_bridge.gatt_table);
+	return 0;
+}
+
+/* These functions are called when PAGE_SIZE exceeds the GART page size */
+
+static int intel_i460_insert_memory_cpk(agp_memory * mem, off_t pg_start, int type)
+{
+	int i, j, k, num_entries;
+	void *temp;
+	unsigned long paddr;
+
+	/*
+	 * The rest of the kernel will compute page offsets in terms of
+	 * PAGE_SIZE.
+	 */
+	pg_start = I460_CPAGES_PER_KPAGE * pg_start;
+
+	temp = agp_bridge.current_size;
+	num_entries = A_SIZE_8(temp)->num_entries;
+
+	if ((pg_start + I460_CPAGES_PER_KPAGE * mem->page_count) > num_entries) {
+		printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
+		return -EINVAL;
+	}
+
+	j = pg_start;
+	while (j < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count)) {
+		if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
+			return -EBUSY;
+		}
+		j++;
+	}
+
+#if 0
+	/* not necessary since 460 GART is operated in coherent mode... */
+	if (mem->is_flushed == FALSE) {
+		CACHE_FLUSH();
+		mem->is_flushed = TRUE;
+	}
+#endif
+
+	for (i = 0, j = pg_start; i < mem->page_count; i++) {
+		paddr = mem->memory[i];
+		for (k = 0; k < I460_CPAGES_PER_KPAGE; k++, j++, paddr += intel_i460_pagesize)
+			agp_bridge.gatt_table[j] = (u32) agp_bridge.mask_memory(paddr, mem->type);
+	}
+
+	intel_i460_read_back(agp_bridge.gatt_table + j - 1);
+	return 0;
+}
+
+static int intel_i460_remove_memory_cpk(agp_memory * mem, off_t pg_start, int type)
+{
+	int i;
+
+	pg_start = I460_CPAGES_PER_KPAGE * pg_start;
+
+	for (i = pg_start; i < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count); i++)
+		agp_bridge.gatt_table[i] = 0;
+
+	intel_i460_read_back(agp_bridge.gatt_table + i - 1);
+	return 0;
+}
+
+/*
+ * These functions are called when the GART page size exceeds PAGE_SIZE.
+ *
+ * This situation is interesting since AGP memory allocations that are
+ * smaller than a single GART page are possible.  The structures i460_pg_count
+ * and i460_pg_detail track partial allocation of the large GART pages to
+ * work around this issue.
+ *
+ * i460_pg_count[pg_num] tracks the number of kernel pages in use within
+ * GART page pg_num.  i460_pg_detail[pg_num] is an array containing a
+ * psuedo-GART entry for each of the aforementioned kernel pages.  The whole
+ * of i460_pg_detail is equivalent to a giant GATT with page size equal to
+ * that of the kernel.
+ */
+
+static void *intel_i460_alloc_large_page(int pg_num)
+{
+	int i;
+	void *bp, *bp_end;
+	struct page *page;
+
+	i460_pg_detail[pg_num] = (void *) vmalloc(sizeof(u32) * I460_KPAGES_PER_CPAGE);
+	if (i460_pg_detail[pg_num] == NULL) {
+		printk(KERN_ERR PFX "Out of memory, we're in trouble...\n");
+		return NULL;
+	}
+
+	for (i = 0; i < I460_KPAGES_PER_CPAGE; i++)
+		i460_pg_detail[pg_num][i] = 0;
+
+	bp = (void *) __get_free_pages(GFP_KERNEL, intel_i460_pageshift - PAGE_SHIFT);
+	if (bp == NULL) {
+		printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n");
+		return NULL;
+	}
+
+	bp_end = bp + ((PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT))) - 1);
+
+	for (page = virt_to_page(bp); page <= virt_to_page(bp_end); page++) {
+		atomic_inc(&agp_bridge.current_memory_agp);
+	}
+	return bp;
+}
+
+static void intel_i460_free_large_page(int pg_num, unsigned long addr)
+{
+	struct page *page;
+	void *bp, *bp_end;
+
+	bp = (void *) __va(addr);
+	bp_end = bp + (PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT)));
+
+	vfree(i460_pg_detail[pg_num]);
+	i460_pg_detail[pg_num] = NULL;
+
+	for (page = virt_to_page(bp); page < virt_to_page(bp_end); page++) {
+		atomic_dec(&agp_bridge.current_memory_agp);
+	}
+
+	free_pages((unsigned long) bp, intel_i460_pageshift - PAGE_SHIFT);
+}
+
+static int intel_i460_insert_memory_kpc(agp_memory * mem, off_t pg_start, int type)
+{
+	int i, pg, start_pg, end_pg, start_offset, end_offset, idx;
+	int num_entries;
+	void *temp;
+	unsigned long paddr;
+
+	temp = agp_bridge.current_size;
+	num_entries = A_SIZE_8(temp)->num_entries;
+
+	/* Figure out what pg_start means in terms of our large GART pages */
+	start_pg 	= pg_start / I460_KPAGES_PER_CPAGE;
+	start_offset 	= pg_start % I460_KPAGES_PER_CPAGE;
+	end_pg 		= (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE;
+	end_offset 	= (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE;
+
+	if (end_pg > num_entries) {
+		printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
+		return -EINVAL;
+	}
+
+	/* Check if the requested region of the aperture is free */
+	for (pg = start_pg; pg <= end_pg; pg++) {
+		/* Allocate new GART pages if necessary */
+		if (i460_pg_detail[pg] == NULL) {
+			temp = intel_i460_alloc_large_page(pg);
+			if (temp == NULL)
+				return -ENOMEM;
+			agp_bridge.gatt_table[pg] = agp_bridge.mask_memory((unsigned long) temp,
+									   0);
+			intel_i460_read_back(agp_bridge.gatt_table + pg);
+		}
+
+		for (idx = ((pg == start_pg) ? start_offset : 0);
+		     idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE);
+		     idx++)
+		{
+			if (i460_pg_detail[pg][idx] != 0)
+				return -EBUSY;
+		}
+	}
+
+#if 0
+	/* not necessary since 460 GART is operated in coherent mode... */
+	if (mem->is_flushed == FALSE) {
+		CACHE_FLUSH();
+		mem->is_flushed = TRUE;
+	}
+#endif
+
+	for (pg = start_pg, i = 0; pg <= end_pg; pg++) {
+		paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]);
+		for (idx = ((pg == start_pg) ? start_offset : 0);
+		     idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE);
+		     idx++, i++)
+		{
+			mem->memory[i] = paddr + (idx * PAGE_SIZE);
+			i460_pg_detail[pg][idx] = agp_bridge.mask_memory(mem->memory[i],
+									 mem->type);
+			i460_pg_count[pg]++;
+		}
+	}
+
+	return 0;
+}
+
+static int intel_i460_remove_memory_kpc(agp_memory * mem, off_t pg_start, int type)
+{
+	int i, pg, start_pg, end_pg, start_offset, end_offset, idx;
+	int num_entries;
+	void *temp;
+	unsigned long paddr;
+
+	temp = agp_bridge.current_size;
+	num_entries = A_SIZE_8(temp)->num_entries;
+
+	/* Figure out what pg_start means in terms of our large GART pages */
+	start_pg 	= pg_start / I460_KPAGES_PER_CPAGE;
+	start_offset 	= pg_start % I460_KPAGES_PER_CPAGE;
+	end_pg 		= (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE;
+	end_offset 	= (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE;
+
+	for (i = 0, pg = start_pg; pg <= end_pg; pg++) {
+		for (idx = ((pg == start_pg) ? start_offset : 0);
+		    idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE);
+		    idx++, i++)
+		{
+			mem->memory[i] = 0;
+			i460_pg_detail[pg][idx] = 0;
+			i460_pg_count[pg]--;
+		}
+
+		/* Free GART pages if they are unused */
+		if (i460_pg_count[pg] == 0) {
+			paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]);
+			agp_bridge.gatt_table[pg] = agp_bridge.scratch_page;
+			intel_i460_read_back(agp_bridge.gatt_table + pg);
+			intel_i460_free_large_page(pg, paddr);
+		}
+	}
+	return 0;
+}
+
+/* Dummy routines to call the approriate {cpk,kpc} function */
+
+static int intel_i460_insert_memory(agp_memory * mem, off_t pg_start, int type)
+{
+	if (intel_i460_cpk)
+		return intel_i460_insert_memory_cpk(mem, pg_start, type);
+	else
+		return intel_i460_insert_memory_kpc(mem, pg_start, type);
+}
+
+static int intel_i460_remove_memory(agp_memory * mem, off_t pg_start, int type)
+{
+	if (intel_i460_cpk)
+		return intel_i460_remove_memory_cpk(mem, pg_start, type);
+	else
+		return intel_i460_remove_memory_kpc(mem, pg_start, type);
+}
+
+/*
+ * If the kernel page size is smaller that the chipset page size, we don't
+ * want to allocate memory until we know where it is to be bound in the
+ * aperture (a multi-kernel-page alloc might fit inside of an already
+ * allocated GART page).  Consequently, don't allocate or free anything
+ * if i460_cpk (meaning chipset pages per kernel page) isn't set.
+ *
+ * Let's just hope nobody counts on the allocated AGP memory being there
+ * before bind time (I don't think current drivers do)...
+ */
+static void * intel_i460_alloc_page(void)
+{
+	if (intel_i460_cpk)
+		return agp_generic_alloc_page();
+
+	/* Returning NULL would cause problems */
+	/* AK: really dubious code. */
+	return (void *)~0UL;
+}
+
+static void intel_i460_destroy_page(void *page)
+{
+	if (intel_i460_cpk)
+		agp_generic_destroy_page(page);
+}
+
+static struct gatt_mask intel_i460_masks[] =
+{
+	{
+	  mask: INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT,
+	  type: 0
+	}
+};
+
+static unsigned long intel_i460_mask_memory(unsigned long addr, int type)
+{
+	/* Make sure the returned address is a valid GATT entry */
+	return (agp_bridge.masks[0].mask
+		| (((addr & ~((1 << intel_i460_pageshift) - 1)) & 0xffffff000) >> 12));
+}
+
+static unsigned long intel_i460_unmask_memory(unsigned long addr)
+{
+	/* Turn a GATT entry into a physical address */
+	return ((addr & 0xffffff) << 12);
+}
+
+static struct aper_size_info_8 intel_i460_sizes[3] =
+{
+	/*
+	 * The 32GB aperture is only available with a 4M GART page size.
+	 * Due to the dynamic GART page size, we can't figure out page_order
+	 * or num_entries until runtime.
+	 */
+	{32768, 0, 0, 4},
+	{1024, 0, 0, 2},
+	{256, 0, 0, 1}
+};
+
+int __init intel_i460_setup (struct pci_dev *pdev __attribute__((unused)))
+{
+	agp_bridge.masks = intel_i460_masks;
+	agp_bridge.aperture_sizes = (void *) intel_i460_sizes;
+	agp_bridge.size_type = U8_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 3;
+	agp_bridge.dev_private_data = NULL;
+	agp_bridge.needs_scratch_page = FALSE;
+	agp_bridge.configure = intel_i460_configure;
+	agp_bridge.fetch_size = intel_i460_fetch_size;
+	agp_bridge.cleanup = intel_i460_cleanup;
+	agp_bridge.tlb_flush = intel_i460_tlb_flush;
+	agp_bridge.mask_memory = intel_i460_mask_memory;
+	agp_bridge.unmask_memory = intel_i460_unmask_memory;
+	agp_bridge.agp_enable = agp_generic_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = intel_i460_create_gatt_table;
+	agp_bridge.free_gatt_table = intel_i460_free_gatt_table;
+	agp_bridge.insert_memory = intel_i460_insert_memory;
+	agp_bridge.remove_memory = intel_i460_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = intel_i460_alloc_page;
+	agp_bridge.agp_destroy_page = intel_i460_destroy_page;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 1;
+	return 0;
+}
+
diff -Nru a/drivers/char/agp/i810.c b/drivers/char/agp/i810.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/drivers/char/agp/i810.c	Wed Jul 17 11:29:31 2002
@@ -0,0 +1,594 @@
+/*
+ * AGPGART module version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO: 
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+static struct aper_size_info_fixed intel_i810_sizes[] =
+{
+	{64, 16384, 4},
+     /* The 32M mode still requires a 64k gatt */
+	{32, 8192, 4}
+};
+
+#define AGP_DCACHE_MEMORY 1
+#define AGP_PHYS_MEMORY   2
+
+static struct gatt_mask intel_i810_masks[] =
+{
+	{mask: I810_PTE_VALID, type: 0},
+	{mask: (I810_PTE_VALID | I810_PTE_LOCAL), type: AGP_DCACHE_MEMORY},
+	{mask: I810_PTE_VALID, type: 0}
+};
+
+static struct _intel_i810_private {
+	struct pci_dev *i810_dev;	/* device one */
+	volatile u8 *registers;
+	int num_dcache_entries;
+} intel_i810_private;
+
+static int intel_i810_fetch_size(void)
+{
+	u32 smram_miscc;
+	struct aper_size_info_fixed *values;
+
+	pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc);
+	values = A_SIZE_FIX(agp_bridge.aperture_sizes);
+
+	if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
+		printk(KERN_WARNING PFX "i810 is disabled\n");
+		return 0;
+	}
+	if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
+		agp_bridge.previous_size =
+		    agp_bridge.current_size = (void *) (values + 1);
+		agp_bridge.aperture_size_idx = 1;
+		return values[1].size;
+	} else {
+		agp_bridge.previous_size =
+		    agp_bridge.current_size = (void *) (values);
+		agp_bridge.aperture_size_idx = 0;
+		return values[0].size;
+	}
+
+	return 0;
+}
+
+static int intel_i810_configure(void)
+{
+	struct aper_size_info_fixed *current_size;
+	u32 temp;
+	int i;
+
+	current_size = A_SIZE_FIX(agp_bridge.current_size);
+
+	pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
+	temp &= 0xfff80000;
+
+	intel_i810_private.registers =
+	    (volatile u8 *) ioremap(temp, 128 * 4096);
+
+	if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL)
+	     & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+		/* This will need to be dynamically assigned */
+		printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n");
+		intel_i810_private.num_dcache_entries = 1024;
+	}
+	pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+	OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL,
+		 agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED);
+	CACHE_FLUSH();
+
+	if (agp_bridge.needs_scratch_page == TRUE) {
+		for (i = 0; i < current_size->num_entries; i++) {
+			OUTREG32(intel_i810_private.registers,
+				 I810_PTE_BASE + (i * 4),
+				 agp_bridge.scratch_page);
+		}
+	}
+	return 0;
+}
+
+static void intel_i810_cleanup(void)
+{
+	OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0);
+	iounmap((void *) intel_i810_private.registers);
+}
+
+static void intel_i810_tlbflush(agp_memory * mem)
+{
+	return;
+}
+
+static void intel_i810_agp_enable(u32 mode)
+{
+	return;
+}
+
+static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start,
+				     int type)
+{
+	int i, j, num_entries;
+	void *temp;
+
+	temp = agp_bridge.current_size;
+	num_entries = A_SIZE_FIX(temp)->num_entries;
+
+	if ((pg_start + mem->page_count) > num_entries) {
+		return -EINVAL;
+	}
+	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
+		if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
+			return -EBUSY;
+		}
+	}
+
+	if (type != 0 || mem->type != 0) {
+		if ((type == AGP_DCACHE_MEMORY) &&
+		    (mem->type == AGP_DCACHE_MEMORY)) {
+			/* special insert */
+			CACHE_FLUSH();
+			for (i = pg_start;
+			     i < (pg_start + mem->page_count); i++) {
+				OUTREG32(intel_i810_private.registers,
+					 I810_PTE_BASE + (i * 4),
+					 (i * 4096) | I810_PTE_LOCAL |
+					 I810_PTE_VALID);
+			}
+			CACHE_FLUSH();
+			agp_bridge.tlb_flush(mem);
+			return 0;
+		}
+	        if((type == AGP_PHYS_MEMORY) &&
+		   (mem->type == AGP_PHYS_MEMORY)) {
+		   goto insert;
+		}
+		return -EINVAL;
+	}
+
+insert:
+   	CACHE_FLUSH();
+	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+		OUTREG32(intel_i810_private.registers,
+			 I810_PTE_BASE + (j * 4), mem->memory[i]);
+	}
+	CACHE_FLUSH();
+
+	agp_bridge.tlb_flush(mem);
+	return 0;
+}
+
+static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start,
+				     int type)
+{
+	int i;
+
+	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+		OUTREG32(intel_i810_private.registers,
+			 I810_PTE_BASE + (i * 4),
+			 agp_bridge.scratch_page);
+	}
+
+	CACHE_FLUSH();
+	agp_bridge.tlb_flush(mem);
+	return 0;
+}
+
+static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
+{
+	agp_memory *new;
+
+	if (type == AGP_DCACHE_MEMORY) {
+		if (pg_count != intel_i810_private.num_dcache_entries) {
+			return NULL;
+		}
+		new = agp_create_memory(1);
+
+		if (new == NULL) {
+			return NULL;
+		}
+		new->type = AGP_DCACHE_MEMORY;
+		new->page_count = pg_count;
+		new->num_scratch_pages = 0;
+		vfree(new->memory);
+	   	MOD_INC_USE_COUNT;
+		return new;
+	}
+	if(type == AGP_PHYS_MEMORY) {
+		void *addr;
+		/* The I810 requires a physical address to program
+		 * it's mouse pointer into hardware.  However the
+		 * Xserver still writes to it through the agp
+		 * aperture
+		 */
+	   	if (pg_count != 1) {
+		   	return NULL;
+		}
+	   	new = agp_create_memory(1);
+
+		if (new == NULL) {
+			return NULL;
+		}
+	   	MOD_INC_USE_COUNT;
+		addr = agp_bridge.agp_alloc_page();
+
+		if (addr == NULL) {
+			/* Free this structure */
+			agp_free_memory(new);
+			return NULL;
+		}
+		new->memory[0] = agp_bridge.mask_memory(virt_to_phys(addr), type);
+		new->page_count = 1;
+	   	new->num_scratch_pages = 1;
+	   	new->type = AGP_PHYS_MEMORY;
+	        new->physical = virt_to_phys((void *) new->memory[0]);
+	   	return new;
+	}
+   
+	return NULL;
+}
+
+static void intel_i810_free_by_type(agp_memory * curr)
+{
+	agp_free_key(curr->key);
+   	if(curr->type == AGP_PHYS_MEMORY) {
+	   	agp_bridge.agp_destroy_page(
+				 phys_to_virt(curr->memory[0]));
+		vfree(curr->memory);
+	}
+	kfree(curr);
+   	MOD_DEC_USE_COUNT;
+}
+
+static unsigned long intel_i810_mask_memory(unsigned long addr, int type)
+{
+	/* Type checking must be done elsewhere */
+	return addr | agp_bridge.masks[type].mask;
+}
+
+int __init intel_i810_setup(struct pci_dev *i810_dev)
+{
+	intel_i810_private.i810_dev = i810_dev;
+
+	agp_bridge.masks = intel_i810_masks;
+	agp_bridge.num_of_masks = 2;
+	agp_bridge.aperture_sizes = (void *) intel_i810_sizes;
+	agp_bridge.size_type = FIXED_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 2;
+	agp_bridge.dev_private_data = (void *) &intel_i810_private;
+	agp_bridge.needs_scratch_page = TRUE;
+	agp_bridge.configure = intel_i810_configure;
+	agp_bridge.fetch_size = intel_i810_fetch_size;
+	agp_bridge.cleanup = intel_i810_cleanup;
+	agp_bridge.tlb_flush = intel_i810_tlbflush;
+	agp_bridge.mask_memory = intel_i810_mask_memory;
+	agp_bridge.agp_enable = intel_i810_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+	agp_bridge.insert_memory = intel_i810_insert_entries;
+	agp_bridge.remove_memory = intel_i810_remove_entries;
+	agp_bridge.alloc_by_type = intel_i810_alloc_by_type;
+	agp_bridge.free_by_type = intel_i810_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 0;
+
+	return 0;
+}
+
+static struct aper_size_info_fixed intel_i830_sizes[] =
+{
+	{128, 32768, 5},
+	/* The 64M mode still requires a 128k gatt */
+	{64, 16384, 5}
+};
+
+static struct _intel_i830_private {
+	struct pci_dev *i830_dev;   /* device one */
+	volatile u8 *registers;
+	int gtt_entries;
+} intel_i830_private;
+
+static void intel_i830_init_gtt_entries(void)
+{
+	u16 gmch_ctrl;
+	int gtt_entries;
+	u8 rdct;
+	static const int ddt[4] = { 0, 16, 32, 64 };
+
+	pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl);
+
+	switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+	case I830_GMCH_GMS_STOLEN_512:
+		gtt_entries = KB(512) - KB(132);
+		printk(KERN_INFO PFX "detected %dK stolen memory.\n",gtt_entries / KB(1));
+		break;
+	case I830_GMCH_GMS_STOLEN_1024:
+		gtt_entries = MB(1) - KB(132);
+		printk(KERN_INFO PFX "detected %dK stolen memory.\n",gtt_entries / KB(1));
+		break;
+	case I830_GMCH_GMS_STOLEN_8192:
+		gtt_entries = MB(8) - KB(132);
+		printk(KERN_INFO PFX "detected %dK stolen memory.\n",gtt_entries / KB(1));
+		break;
+	case I830_GMCH_GMS_LOCAL:
+		rdct = INREG8(intel_i830_private.registers,I830_RDRAM_CHANNEL_TYPE);
+		gtt_entries = (I830_RDRAM_ND(rdct) + 1) * MB(ddt[I830_RDRAM_DDT(rdct)]);
+		printk(KERN_INFO PFX "detected %dK local memory.\n",gtt_entries / KB(1));
+		break;
+	default:
+		printk(KERN_INFO PFX "no video memory detected.\n");
+		gtt_entries = 0;
+		break;
+	}
+
+	gtt_entries /= KB(4);
+
+	intel_i830_private.gtt_entries = gtt_entries;
+}
+
+/* The intel i830 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i830_create_gatt_table(void)
+{
+	int page_order;
+	struct aper_size_info_fixed *size;
+	int num_entries;
+	u32 temp;
+
+	size = agp_bridge.current_size;
+	page_order = size->page_order;
+	num_entries = size->num_entries;
+	agp_bridge.gatt_table_real = 0;
+
+	pci_read_config_dword(intel_i830_private.i830_dev,I810_MMADDR,&temp);
+	temp &= 0xfff80000;
+
+	intel_i830_private.registers = (volatile u8 *) ioremap(temp,128 * 4096);
+	if (!intel_i830_private.registers) return (-ENOMEM);
+
+	temp = INREG32(intel_i830_private.registers,I810_PGETBL_CTL) & 0xfffff000;
+	CACHE_FLUSH();
+
+	/* we have to call this as early as possible after the MMIO base address is known */
+	intel_i830_init_gtt_entries();
+
+	agp_bridge.gatt_table = NULL;
+
+	agp_bridge.gatt_bus_addr = temp;
+
+	return(0);
+}
+
+/* Return the gatt table to a sane state. Use the top of stolen
+ * memory for the GTT.
+ */
+static int intel_i830_free_gatt_table(void)
+{
+	return(0);
+}
+
+static int intel_i830_fetch_size(void)
+{
+	u16 gmch_ctrl;
+	struct aper_size_info_fixed *values;
+
+	pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl);
+	values = A_SIZE_FIX(agp_bridge.aperture_sizes);
+
+	if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
+		agp_bridge.previous_size = agp_bridge.current_size = (void *) values;
+		agp_bridge.aperture_size_idx = 0;
+		return(values[0].size);
+	} else {
+		agp_bridge.previous_size = agp_bridge.current_size = (void *) values;
+		agp_bridge.aperture_size_idx = 1;
+		return(values[1].size);
+	}
+
+	return(0);
+}
+
+static int intel_i830_configure(void)
+{
+	struct aper_size_info_fixed *current_size;
+	u32 temp;
+	u16 gmch_ctrl;
+	int i;
+
+	current_size = A_SIZE_FIX(agp_bridge.current_size);
+
+	pci_read_config_dword(intel_i830_private.i830_dev,I810_GMADDR,&temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl);
+	gmch_ctrl |= I830_GMCH_ENABLED;
+	pci_write_config_word(agp_bridge.dev,I830_GMCH_CTRL,gmch_ctrl);
+
+	OUTREG32(intel_i830_private.registers,I810_PGETBL_CTL,agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED);
+	CACHE_FLUSH();
+
+	if (agp_bridge.needs_scratch_page == TRUE)
+		for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++)
+			OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge.scratch_page);
+
+	return (0);
+}
+
+static void intel_i830_cleanup(void)
+{
+	iounmap((void *) intel_i830_private.registers);
+}
+
+static int intel_i830_insert_entries(agp_memory *mem,off_t pg_start,int type)
+{
+	int i,j,num_entries;
+	void *temp;
+
+	temp = agp_bridge.current_size;
+	num_entries = A_SIZE_FIX(temp)->num_entries;
+
+	if (pg_start < intel_i830_private.gtt_entries) {
+		printk (KERN_DEBUG "pg_start == 0x%.8lx,intel_i830_private.gtt_entries == 0x%.8x\n",
+				pg_start,intel_i830_private.gtt_entries);
+
+		printk ("Trying to insert into local/stolen memory\n");
+		return (-EINVAL);
+	}
+
+	if ((pg_start + mem->page_count) > num_entries)
+		return (-EINVAL);
+
+	/* The i830 can't check the GTT for entries since its read only,
+	 * depend on the caller to make the correct offset decisions.
+	 */
+
+	if ((type != 0 && type != AGP_PHYS_MEMORY) ||
+		(mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
+		return (-EINVAL);
+
+	CACHE_FLUSH();
+
+	for (i = 0, j = pg_start; i < mem->page_count; i++, j++)
+		OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (j * 4),mem->memory[i]);
+
+	CACHE_FLUSH();
+
+	agp_bridge.tlb_flush(mem);
+
+	return(0);
+}
+
+static int intel_i830_remove_entries(agp_memory *mem,off_t pg_start,int type)
+{
+	int i;
+
+	CACHE_FLUSH ();
+
+	if (pg_start < intel_i830_private.gtt_entries) {
+		printk ("Trying to disable local/stolen memory\n");
+		return (-EINVAL);
+	}
+
+	for (i = pg_start; i < (mem->page_count + pg_start); i++)
+		OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge.scratch_page);
+
+	CACHE_FLUSH();
+
+	agp_bridge.tlb_flush(mem);
+
+	return (0);
+}
+
+static agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type)
+{
+	agp_memory *nw;
+
+	/* always return NULL for now */
+	if (type == AGP_DCACHE_MEMORY) return(NULL);
+
+	if (type == AGP_PHYS_MEMORY) {
+		void *addr;
+
+		/* The i830 requires a physical address to program
+		 * it's mouse pointer into hardware. However the
+		 * Xserver still writes to it through the agp
+		 * aperture
+		 */
+
+		if (pg_count != 1) return(NULL);
+
+		nw = agp_create_memory(1);
+
+		if (nw == NULL) return(NULL);
+
+		MOD_INC_USE_COUNT;
+		addr = agp_bridge.agp_alloc_page();
+		if (addr == NULL) {
+			/* free this structure */
+			agp_free_memory(nw);
+			return(NULL);
+		}
+
+		nw->memory[0] = agp_bridge.mask_memory(virt_to_phys(addr),type);
+		nw->page_count = 1;
+		nw->num_scratch_pages = 1;
+		nw->type = AGP_PHYS_MEMORY;
+		nw->physical = virt_to_phys(addr);
+		return(nw);
+	}
+
+	return(NULL);
+}
+
+int __init intel_i830_setup(struct pci_dev *i830_dev)
+{
+	intel_i830_private.i830_dev = i830_dev;
+
+	agp_bridge.masks = intel_i810_masks;
+	agp_bridge.num_of_masks = 3;
+	agp_bridge.aperture_sizes = (void *) intel_i830_sizes;
+	agp_bridge.size_type = FIXED_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 2;
+
+	agp_bridge.dev_private_data = (void *) &intel_i830_private;
+	agp_bridge.needs_scratch_page = TRUE;
+
+	agp_bridge.configure = intel_i830_configure;
+	agp_bridge.fetch_size = intel_i830_fetch_size;
+	agp_bridge.cleanup = intel_i830_cleanup;
+	agp_bridge.tlb_flush = intel_i810_tlbflush;
+	agp_bridge.mask_memory = intel_i810_mask_memory;
+	agp_bridge.agp_enable = intel_i810_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+
+	agp_bridge.create_gatt_table = intel_i830_create_gatt_table;
+	agp_bridge.free_gatt_table = intel_i830_free_gatt_table;
+
+	agp_bridge.insert_memory = intel_i830_insert_entries;
+	agp_bridge.remove_memory = intel_i830_remove_entries;
+	agp_bridge.alloc_by_type = intel_i830_alloc_by_type;
+	agp_bridge.free_by_type = intel_i810_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 0;
+
+	return(0);
+}
+
diff -Nru a/drivers/char/agp/i8x0.c b/drivers/char/agp/i8x0.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/drivers/char/agp/i8x0.c	Wed Jul 17 11:29:31 2002
@@ -0,0 +1,726 @@
+/*
+ * AGPGART module version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO: 
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+
+static int intel_fetch_size(void)
+{
+	int i;
+	u16 temp;
+	struct aper_size_info_16 *values;
+
+	pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp);
+	values = A_SIZE_16(agp_bridge.aperture_sizes);
+
+	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge.previous_size =
+			    agp_bridge.current_size = (void *) (values + i);
+			agp_bridge.aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+static int intel_8xx_fetch_size(void)
+{
+	int i;
+	u8 temp;
+	struct aper_size_info_8 *values;
+
+	pci_read_config_byte(agp_bridge.dev, INTEL_APSIZE, &temp);
+
+	/* Intel 815 chipsets have a _weird_ APSIZE register with only
+	 * one non-reserved bit, so mask the others out ... */
+	if (agp_bridge.type == INTEL_I815)
+		temp &= (1 << 3);
+
+	values = A_SIZE_8(agp_bridge.aperture_sizes);
+
+	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge.previous_size =
+				agp_bridge.current_size = (void *) (values + i);
+			agp_bridge.aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+	return 0;
+}
+
+
+static void intel_tlbflush(agp_memory * mem)
+{
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200);
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
+}
+
+
+static void intel_8xx_tlbflush(agp_memory * mem)
+{
+  u32 temp;
+  pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp);
+  pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp & ~(1 << 7));
+  pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp);
+  pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp | (1 << 7));
+}
+
+
+static void intel_cleanup(void)
+{
+	u16 temp;
+	struct aper_size_info_16 *previous_size;
+
+	previous_size = A_SIZE_16(agp_bridge.previous_size);
+	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
+	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
+	pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
+			      previous_size->size_value);
+}
+
+
+static void intel_8xx_cleanup(void)
+{
+	u16 temp;
+	struct aper_size_info_8 *previous_size;
+
+	previous_size = A_SIZE_8(agp_bridge.previous_size);
+	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
+	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
+	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+			      previous_size->size_value);
+}
+
+
+static int intel_configure(void)
+{
+	u32 temp;
+	u16 temp2;
+	struct aper_size_info_16 *current_size;
+
+	current_size = A_SIZE_16(agp_bridge.current_size);
+
+	/* aperture size */
+	pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
+			      current_size->size_value);
+
+	/* address to map to */
+	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
+			       agp_bridge.gatt_bus_addr);
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
+
+	/* paccfg/nbxcfg */
+	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
+	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
+			      (temp2 & ~(1 << 10)) | (1 << 9));
+	/* clear any possible error conditions */
+	pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7);
+	return 0;
+}
+
+static int intel_815_configure(void)
+{
+	u32 temp, addr;
+	u8 temp2;
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge.current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+			current_size->size_value); 
+
+	/* address to map to */
+	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* attbase - aperture base */
+	/* the Intel 815 chipset spec. says that bits 29-31 in the
+	* ATTBASE register are reserved -> try not to write them */
+	if (agp_bridge.gatt_bus_addr &  INTEL_815_ATTBASE_MASK)
+		panic("gatt bus addr too high");
+	pci_read_config_dword(agp_bridge.dev, INTEL_ATTBASE, &addr);
+	addr &= INTEL_815_ATTBASE_MASK;
+	addr |= agp_bridge.gatt_bus_addr;
+	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, addr);
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
+
+	/* apcont */
+	pci_read_config_byte(agp_bridge.dev, INTEL_815_APCONT, &temp2);
+	pci_write_config_byte(agp_bridge.dev, INTEL_815_APCONT, temp2 | (1 << 1));
+
+	/* clear any possible error conditions */
+	/* Oddness : this chipset seems to have no ERRSTS register ! */
+	return 0;
+}
+
+static void intel_820_tlbflush(agp_memory * mem)
+{
+  return;
+}
+
+static void intel_820_cleanup(void)
+{
+	u8 temp;
+	struct aper_size_info_8 *previous_size;
+
+	previous_size = A_SIZE_8(agp_bridge.previous_size);
+	pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp);
+	pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, 
+			      temp & ~(1 << 1));
+	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+			      previous_size->size_value);
+}
+
+
+static int intel_820_configure(void)
+{
+	u32 temp;
+ 	u8 temp2; 
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge.current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+			      current_size->size_value); 
+
+	/* address to map to */
+	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
+			       agp_bridge.gatt_bus_addr); 
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
+
+	/* global enable aperture access */
+	/* This flag is not accessed through MCHCFG register as in */
+	/* i850 chipset. */
+	pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp2);
+	pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, 
+			      temp2 | (1 << 1));
+	/* clear any possible AGP-related error conditions */
+	pci_write_config_word(agp_bridge.dev, INTEL_I820_ERRSTS, 0x001c); 
+	return 0;
+}
+
+static int intel_840_configure(void)
+{
+	u32 temp;
+	u16 temp2;
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge.current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+			      current_size->size_value); 
+
+	/* address to map to */
+	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
+			       agp_bridge.gatt_bus_addr); 
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
+
+	/* mcgcfg */
+	pci_read_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, &temp2);
+	pci_write_config_word(agp_bridge.dev, INTEL_I840_MCHCFG,
+			      temp2 | (1 << 9));
+	/* clear any possible error conditions */
+	pci_write_config_word(agp_bridge.dev, INTEL_I840_ERRSTS, 0xc000); 
+	return 0;
+}
+
+static int intel_845_configure(void)
+{
+	u32 temp;
+	u8 temp2;
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge.current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+			      current_size->size_value); 
+
+	/* address to map to */
+	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
+			       agp_bridge.gatt_bus_addr); 
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
+
+	/* agpm */
+	pci_read_config_byte(agp_bridge.dev, INTEL_I845_AGPM, &temp2);
+	pci_write_config_byte(agp_bridge.dev, INTEL_I845_AGPM,
+			      temp2 | (1 << 1));
+	/* clear any possible error conditions */
+	pci_write_config_word(agp_bridge.dev, INTEL_I845_ERRSTS, 0x001c); 
+	return 0;
+}
+
+static int intel_850_configure(void)
+{
+	u32 temp;
+	u16 temp2;
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge.current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+			      current_size->size_value); 
+
+	/* address to map to */
+	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
+			       agp_bridge.gatt_bus_addr); 
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
+
+	/* mcgcfg */
+	pci_read_config_word(agp_bridge.dev, INTEL_I850_MCHCFG, &temp2);
+	pci_write_config_word(agp_bridge.dev, INTEL_I850_MCHCFG,
+			      temp2 | (1 << 9));
+	/* clear any possible AGP-related error conditions */
+	pci_write_config_word(agp_bridge.dev, INTEL_I850_ERRSTS, 0x001c); 
+	return 0;
+}
+
+static int intel_860_configure(void)
+{
+	u32 temp;
+	u16 temp2;
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge.current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+			      current_size->size_value);
+
+	/* address to map to */
+	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
+			       agp_bridge.gatt_bus_addr);
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
+
+	/* mcgcfg */
+	pci_read_config_word(agp_bridge.dev, INTEL_I860_MCHCFG, &temp2);
+	pci_write_config_word(agp_bridge.dev, INTEL_I860_MCHCFG,
+			      temp2 | (1 << 9));
+	/* clear any possible AGP-related error conditions */
+	pci_write_config_word(agp_bridge.dev, INTEL_I860_ERRSTS, 0xf700);
+	return 0;
+}
+
+static int intel_830mp_configure(void)
+{
+	u32 temp;
+	u16 temp2;
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge.current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+			      current_size->size_value);
+
+	/* address to map to */
+	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
+			       agp_bridge.gatt_bus_addr);
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
+
+	/* gmch */
+	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
+	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
+			      temp2 | (1 << 9));
+	/* clear any possible AGP-related error conditions */
+	pci_write_config_word(agp_bridge.dev, INTEL_I830_ERRSTS, 0x1c);
+	return 0;
+}
+
+static unsigned long intel_mask_memory(unsigned long addr, int type)
+{
+	/* Memory type is ignored */
+
+	return addr | agp_bridge.masks[0].mask;
+}
+
+static void intel_resume(void)
+{
+	intel_configure();
+}
+
+/* Setup function */
+static struct gatt_mask intel_generic_masks[] =
+{
+	{mask: 0x00000017, type: 0}
+};
+
+static struct aper_size_info_8 intel_815_sizes[2] =
+{
+	{64, 16384, 4, 0},
+	{32, 8192, 3, 8},
+};
+	
+static struct aper_size_info_8 intel_8xx_sizes[7] =
+{
+	{256, 65536, 6, 0},
+	{128, 32768, 5, 32},
+	{64, 16384, 4, 48},
+	{32, 8192, 3, 56},
+	{16, 4096, 2, 60},
+	{8, 2048, 1, 62},
+	{4, 1024, 0, 63}
+};
+
+static struct aper_size_info_16 intel_generic_sizes[7] =
+{
+	{256, 65536, 6, 0},
+	{128, 32768, 5, 32},
+	{64, 16384, 4, 48},
+	{32, 8192, 3, 56},
+	{16, 4096, 2, 60},
+	{8, 2048, 1, 62},
+	{4, 1024, 0, 63}
+};
+
+static struct aper_size_info_8 intel_830mp_sizes[4] = 
+{
+  {256, 65536, 6, 0},
+  {128, 32768, 5, 32},
+  {64, 16384, 4, 48},
+  {32, 8192, 3, 56}
+};
+
+int __init intel_generic_setup (struct pci_dev *pdev)
+{
+	agp_bridge.masks = intel_generic_masks;
+	agp_bridge.num_of_masks = 1;
+	agp_bridge.aperture_sizes = (void *) intel_generic_sizes;
+	agp_bridge.size_type = U16_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 7;
+	agp_bridge.dev_private_data = NULL;
+	agp_bridge.needs_scratch_page = FALSE;
+	agp_bridge.configure = intel_configure;
+	agp_bridge.fetch_size = intel_fetch_size;
+	agp_bridge.cleanup = intel_cleanup;
+	agp_bridge.tlb_flush = intel_tlbflush;
+	agp_bridge.mask_memory = intel_mask_memory;
+	agp_bridge.agp_enable = agp_generic_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+	agp_bridge.insert_memory = agp_generic_insert_memory;
+	agp_bridge.remove_memory = agp_generic_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = intel_resume;
+	agp_bridge.cant_use_aperture = 0;
+
+	return 0;
+	
+	(void) pdev; /* unused */
+}
+
+int __init intel_815_setup (struct pci_dev *pdev)
+{
+	agp_bridge.masks = intel_generic_masks;
+	agp_bridge.num_of_masks = 1;
+	agp_bridge.aperture_sizes = (void *) intel_815_sizes;
+	agp_bridge.size_type = U8_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 2;
+	agp_bridge.dev_private_data = NULL;
+	agp_bridge.needs_scratch_page = FALSE;
+	agp_bridge.configure = intel_815_configure;
+	agp_bridge.fetch_size = intel_8xx_fetch_size;
+	agp_bridge.cleanup = intel_8xx_cleanup;
+	agp_bridge.tlb_flush = intel_8xx_tlbflush;
+	agp_bridge.mask_memory = intel_mask_memory;
+	agp_bridge.agp_enable = agp_generic_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+	agp_bridge.insert_memory = agp_generic_insert_memory;
+	agp_bridge.remove_memory = agp_generic_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 0;
+
+	return 0;
+}
+
+
+int __init intel_820_setup (struct pci_dev *pdev)
+{
+       agp_bridge.masks = intel_generic_masks;
+       agp_bridge.num_of_masks = 1;
+       agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
+       agp_bridge.size_type = U8_APER_SIZE;
+       agp_bridge.num_aperture_sizes = 7;
+       agp_bridge.dev_private_data = NULL;
+       agp_bridge.needs_scratch_page = FALSE;
+       agp_bridge.configure = intel_820_configure;
+       agp_bridge.fetch_size = intel_8xx_fetch_size;
+       agp_bridge.cleanup = intel_820_cleanup;
+       agp_bridge.tlb_flush = intel_820_tlbflush;
+       agp_bridge.mask_memory = intel_mask_memory;
+       agp_bridge.agp_enable = agp_generic_agp_enable;
+       agp_bridge.cache_flush = global_cache_flush;
+       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+       agp_bridge.insert_memory = agp_generic_insert_memory;
+       agp_bridge.remove_memory = agp_generic_remove_memory;
+       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+       agp_bridge.free_by_type = agp_generic_free_by_type;
+       agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+       agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+       agp_bridge.suspend = agp_generic_suspend;
+       agp_bridge.resume = agp_generic_resume;
+       agp_bridge.cant_use_aperture = 0;
+
+       return 0;
+
+       (void) pdev; /* unused */
+}
+
+int __init intel_830mp_setup (struct pci_dev *pdev)
+{
+       agp_bridge.masks = intel_generic_masks;
+       agp_bridge.num_of_masks = 1;
+       agp_bridge.aperture_sizes = (void *) intel_830mp_sizes;
+       agp_bridge.size_type = U8_APER_SIZE;
+       agp_bridge.num_aperture_sizes = 4;
+       agp_bridge.dev_private_data = NULL;
+       agp_bridge.needs_scratch_page = FALSE;
+       agp_bridge.configure = intel_830mp_configure;
+       agp_bridge.fetch_size = intel_8xx_fetch_size;
+       agp_bridge.cleanup = intel_8xx_cleanup;
+       agp_bridge.tlb_flush = intel_8xx_tlbflush;
+       agp_bridge.mask_memory = intel_mask_memory;
+       agp_bridge.agp_enable = agp_generic_agp_enable;
+       agp_bridge.cache_flush = global_cache_flush;
+       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+       agp_bridge.insert_memory = agp_generic_insert_memory;
+       agp_bridge.remove_memory = agp_generic_remove_memory;
+       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+       agp_bridge.free_by_type = agp_generic_free_by_type;
+       agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+       agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+       agp_bridge.suspend = agp_generic_suspend;
+       agp_bridge.resume = agp_generic_resume;
+       agp_bridge.cant_use_aperture = 0;
+
+       return 0;
+
+       (void) pdev; /* unused */
+}
+
+int __init intel_840_setup (struct pci_dev *pdev)
+{
+	agp_bridge.masks = intel_generic_masks;
+	agp_bridge.num_of_masks = 1;
+	agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
+	agp_bridge.size_type = U8_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 7;
+	agp_bridge.dev_private_data = NULL;
+	agp_bridge.needs_scratch_page = FALSE;
+	agp_bridge.configure = intel_840_configure;
+	agp_bridge.fetch_size = intel_8xx_fetch_size;
+	agp_bridge.cleanup = intel_8xx_cleanup;
+	agp_bridge.tlb_flush = intel_8xx_tlbflush;
+	agp_bridge.mask_memory = intel_mask_memory;
+	agp_bridge.agp_enable = agp_generic_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+	agp_bridge.insert_memory = agp_generic_insert_memory;
+	agp_bridge.remove_memory = agp_generic_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 0;
+
+	return 0;
+	
+	(void) pdev; /* unused */
+}
+
+int __init intel_845_setup (struct pci_dev *pdev)
+{
+	agp_bridge.masks = intel_generic_masks;
+	agp_bridge.num_of_masks = 1;
+	agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
+	agp_bridge.size_type = U8_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 7;
+	agp_bridge.dev_private_data = NULL;
+	agp_bridge.needs_scratch_page = FALSE;
+	agp_bridge.configure = intel_845_configure;
+	agp_bridge.fetch_size = intel_8xx_fetch_size;
+	agp_bridge.cleanup = intel_8xx_cleanup;
+	agp_bridge.tlb_flush = intel_8xx_tlbflush;
+	agp_bridge.mask_memory = intel_mask_memory;
+	agp_bridge.agp_enable = agp_generic_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+	agp_bridge.insert_memory = agp_generic_insert_memory;
+	agp_bridge.remove_memory = agp_generic_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 0;
+
+	return 0;
+	
+	(void) pdev; /* unused */
+}
+
+int __init intel_850_setup (struct pci_dev *pdev)
+{
+	agp_bridge.masks = intel_generic_masks;
+	agp_bridge.num_of_masks = 1;
+	agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
+	agp_bridge.size_type = U8_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 7;
+	agp_bridge.dev_private_data = NULL;
+	agp_bridge.needs_scratch_page = FALSE;
+	agp_bridge.configure = intel_850_configure;
+	agp_bridge.fetch_size = intel_8xx_fetch_size;
+	agp_bridge.cleanup = intel_8xx_cleanup;
+	agp_bridge.tlb_flush = intel_8xx_tlbflush;
+	agp_bridge.mask_memory = intel_mask_memory;
+	agp_bridge.agp_enable = agp_generic_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+	agp_bridge.insert_memory = agp_generic_insert_memory;
+	agp_bridge.remove_memory = agp_generic_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 0;
+
+	return 0;
+	
+	(void) pdev; /* unused */
+}
+
+int __init intel_860_setup (struct pci_dev *pdev)
+{
+	agp_bridge.masks = intel_generic_masks;
+	agp_bridge.num_of_masks = 1;
+	agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
+	agp_bridge.size_type = U8_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 7;
+	agp_bridge.dev_private_data = NULL;
+	agp_bridge.needs_scratch_page = FALSE;
+	agp_bridge.configure = intel_860_configure;
+	agp_bridge.fetch_size = intel_8xx_fetch_size;
+	agp_bridge.cleanup = intel_8xx_cleanup;
+	agp_bridge.tlb_flush = intel_8xx_tlbflush;
+	agp_bridge.mask_memory = intel_mask_memory;
+	agp_bridge.agp_enable = agp_generic_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+	agp_bridge.insert_memory = agp_generic_insert_memory;
+	agp_bridge.remove_memory = agp_generic_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 0;
+
+	return 0;
+
+	(void) pdev; /* unused */
+}
+
diff -Nru a/drivers/char/agp/sis.c b/drivers/char/agp/sis.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/drivers/char/agp/sis.c	Wed Jul 17 11:29:31 2002
@@ -0,0 +1,142 @@
+/*
+ * AGPGART module version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO: 
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+static int sis_fetch_size(void)
+{
+	u8 temp_size;
+	int i;
+	struct aper_size_info_8 *values;
+
+	pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size);
+	values = A_SIZE_8(agp_bridge.aperture_sizes);
+	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+		if ((temp_size == values[i].size_value) ||
+		    ((temp_size & ~(0x03)) ==
+		     (values[i].size_value & ~(0x03)))) {
+			agp_bridge.previous_size =
+			    agp_bridge.current_size = (void *) (values + i);
+
+			agp_bridge.aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+
+static void sis_tlbflush(agp_memory * mem)
+{
+	pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02);
+}
+
+static int sis_configure(void)
+{
+	u32 temp;
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge.current_size);
+	pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05);
+	pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+	pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE,
+			       agp_bridge.gatt_bus_addr);
+	pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
+			      current_size->size_value);
+	return 0;
+}
+
+static void sis_cleanup(void)
+{
+	struct aper_size_info_8 *previous_size;
+
+	previous_size = A_SIZE_8(agp_bridge.previous_size);
+	pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
+			      (previous_size->size_value & ~(0x03)));
+}
+
+static unsigned long sis_mask_memory(unsigned long addr, int type)
+{
+	/* Memory type is ignored */
+
+	return addr | agp_bridge.masks[0].mask;
+}
+
+static struct aper_size_info_8 sis_generic_sizes[7] =
+{
+	{256, 65536, 6, 99},
+	{128, 32768, 5, 83},
+	{64, 16384, 4, 67},
+	{32, 8192, 3, 51},
+	{16, 4096, 2, 35},
+	{8, 2048, 1, 19},
+	{4, 1024, 0, 3}
+};
+
+static struct gatt_mask sis_generic_masks[] =
+{
+	{mask: 0x00000000, type: 0}
+};
+
+int __init sis_generic_setup (struct pci_dev *pdev)
+{
+	agp_bridge.masks = sis_generic_masks;
+	agp_bridge.num_of_masks = 1;
+	agp_bridge.aperture_sizes = (void *) sis_generic_sizes;
+	agp_bridge.size_type = U8_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 7;
+	agp_bridge.dev_private_data = NULL;
+	agp_bridge.needs_scratch_page = FALSE;
+	agp_bridge.configure = sis_configure;
+	agp_bridge.fetch_size = sis_fetch_size;
+	agp_bridge.cleanup = sis_cleanup;
+	agp_bridge.tlb_flush = sis_tlbflush;
+	agp_bridge.mask_memory = sis_mask_memory;
+	agp_bridge.agp_enable = agp_generic_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+	agp_bridge.insert_memory = agp_generic_insert_memory;
+	agp_bridge.remove_memory = agp_generic_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 0;
+
+	return 0;
+}
+
diff -Nru a/drivers/char/agp/sworks.c b/drivers/char/agp/sworks.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/drivers/char/agp/sworks.c	Wed Jul 17 11:29:31 2002
@@ -0,0 +1,626 @@
+/*
+ * AGPGART module version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO: 
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+struct serverworks_page_map {
+	unsigned long *real;
+	unsigned long *remapped;
+};
+
+static struct _serverworks_private {
+	struct pci_dev *svrwrks_dev;	/* device one */
+	volatile u8 *registers;
+	struct serverworks_page_map **gatt_pages;
+	int num_tables;
+	struct serverworks_page_map scratch_dir;
+
+	int gart_addr_ofs;
+	int mm_addr_ofs;
+} serverworks_private;
+
+static int serverworks_create_page_map(struct serverworks_page_map *page_map)
+{
+	int i;
+
+	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+	if (page_map->real == NULL) {
+		return -ENOMEM;
+	}
+	SetPageReserved(virt_to_page(page_map->real));
+	CACHE_FLUSH();
+	page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), 
+					    PAGE_SIZE);
+	if (page_map->remapped == NULL) {
+		ClearPageReserved(virt_to_page(page_map->real));
+		free_page((unsigned long) page_map->real);
+		page_map->real = NULL;
+		return -ENOMEM;
+	}
+	CACHE_FLUSH();
+
+	for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
+		page_map->remapped[i] = agp_bridge.scratch_page;
+	}
+
+	return 0;
+}
+
+static void serverworks_free_page_map(struct serverworks_page_map *page_map)
+{
+	iounmap(page_map->remapped);
+	ClearPageReserved(virt_to_page(page_map->real));
+	free_page((unsigned long) page_map->real);
+}
+
+static void serverworks_free_gatt_pages(void)
+{
+	int i;
+	struct serverworks_page_map **tables;
+	struct serverworks_page_map *entry;
+
+	tables = serverworks_private.gatt_pages;
+	for(i = 0; i < serverworks_private.num_tables; i++) {
+		entry = tables[i];
+		if (entry != NULL) {
+			if (entry->real != NULL) {
+				serverworks_free_page_map(entry);
+			}
+			kfree(entry);
+		}
+	}
+	kfree(tables);
+}
+
+static int serverworks_create_gatt_pages(int nr_tables)
+{
+	struct serverworks_page_map **tables;
+	struct serverworks_page_map *entry;
+	int retval = 0;
+	int i;
+
+	tables = kmalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *), 
+			 GFP_KERNEL);
+	if (tables == NULL) {
+		return -ENOMEM;
+	}
+	memset(tables, 0, sizeof(struct serverworks_page_map *) * (nr_tables + 1));
+	for (i = 0; i < nr_tables; i++) {
+		entry = kmalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
+		if (entry == NULL) {
+			retval = -ENOMEM;
+			break;
+		}
+		memset(entry, 0, sizeof(struct serverworks_page_map));
+		tables[i] = entry;
+		retval = serverworks_create_page_map(entry);
+		if (retval != 0) break;
+	}
+	serverworks_private.num_tables = nr_tables;
+	serverworks_private.gatt_pages = tables;
+
+	if (retval != 0) serverworks_free_gatt_pages();
+
+	return retval;
+}
+
+#define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
+	GET_PAGE_DIR_IDX(addr)]->remapped)
+
+#ifndef GET_PAGE_DIR_OFF
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#endif
+
+#ifndef GET_PAGE_DIR_IDX
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+	GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
+#endif
+
+#ifndef GET_GATT_OFF
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#endif
+
+static int serverworks_create_gatt_table(void)
+{
+	struct aper_size_info_lvl2 *value;
+	struct serverworks_page_map page_dir;
+	int retval;
+	u32 temp;
+	int i;
+
+	value = A_SIZE_LVL2(agp_bridge.current_size);
+	retval = serverworks_create_page_map(&page_dir);
+	if (retval != 0) {
+		return retval;
+	}
+	retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
+	if (retval != 0) {
+		serverworks_free_page_map(&page_dir);
+		return retval;
+	}
+	/* Create a fake scratch directory */
+	for(i = 0; i < 1024; i++) {
+		serverworks_private.scratch_dir.remapped[i] = (unsigned long) agp_bridge.scratch_page;
+		page_dir.remapped[i] =
+			virt_to_phys(serverworks_private.scratch_dir.real);
+		page_dir.remapped[i] |= 0x00000001;
+	}
+
+	retval = serverworks_create_gatt_pages(value->num_entries / 1024);
+	if (retval != 0) {
+		serverworks_free_page_map(&page_dir);
+		serverworks_free_page_map(&serverworks_private.scratch_dir);
+		return retval;
+	}
+
+	agp_bridge.gatt_table_real = page_dir.real;
+	agp_bridge.gatt_table = page_dir.remapped;
+	agp_bridge.gatt_bus_addr = virt_to_phys(page_dir.real);
+
+	/* Get the address for the gart region.
+	 * This is a bus address even on the alpha, b/c its
+	 * used to program the agp master not the cpu
+	 */
+
+	pci_read_config_dword(agp_bridge.dev,
+			      serverworks_private.gart_addr_ofs,
+			      &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* Calculate the agp offset */	
+
+	for(i = 0; i < value->num_entries / 1024; i++) {
+		page_dir.remapped[i] =
+			virt_to_phys(serverworks_private.gatt_pages[i]->real);
+		page_dir.remapped[i] |= 0x00000001;
+	}
+
+	return 0;
+}
+
+static int serverworks_free_gatt_table(void)
+{
+	struct serverworks_page_map page_dir;
+   
+	page_dir.real = agp_bridge.gatt_table_real;
+	page_dir.remapped = agp_bridge.gatt_table;
+
+	serverworks_free_gatt_pages();
+	serverworks_free_page_map(&page_dir);
+	serverworks_free_page_map(&serverworks_private.scratch_dir);
+	return 0;
+}
+
+static int serverworks_fetch_size(void)
+{
+	int i;
+	u32 temp;
+	u32 temp2;
+	struct aper_size_info_lvl2 *values;
+
+	values = A_SIZE_LVL2(agp_bridge.aperture_sizes);
+	pci_read_config_dword(agp_bridge.dev,
+			      serverworks_private.gart_addr_ofs,
+			      &temp);
+	pci_write_config_dword(agp_bridge.dev,
+			       serverworks_private.gart_addr_ofs,
+			       SVWRKS_SIZE_MASK);
+	pci_read_config_dword(agp_bridge.dev,
+			      serverworks_private.gart_addr_ofs,
+			      &temp2);
+	pci_write_config_dword(agp_bridge.dev,
+			       serverworks_private.gart_addr_ofs,
+			       temp);
+	temp2 &= SVWRKS_SIZE_MASK;
+
+	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+		if (temp2 == values[i].size_value) {
+			agp_bridge.previous_size =
+			    agp_bridge.current_size = (void *) (values + i);
+
+			agp_bridge.aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+static int serverworks_configure(void)
+{
+	struct aper_size_info_lvl2 *current_size;
+	u32 temp;
+	u8 enable_reg;
+	u8 cap_ptr;
+	u32 cap_id;
+	u16 cap_reg;
+
+	current_size = A_SIZE_LVL2(agp_bridge.current_size);
+
+	/* Get the memory mapped registers */
+	pci_read_config_dword(agp_bridge.dev,
+			      serverworks_private.mm_addr_ofs,
+			      &temp);
+	temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+	serverworks_private.registers = (volatile u8 *) ioremap(temp, 4096);
+
+	OUTREG8(serverworks_private.registers, SVWRKS_GART_CACHE, 0x0a);
+
+	OUTREG32(serverworks_private.registers, SVWRKS_GATTBASE, 
+		 agp_bridge.gatt_bus_addr);
+
+	cap_reg = INREG16(serverworks_private.registers, SVWRKS_COMMAND);
+	cap_reg &= ~0x0007;
+	cap_reg |= 0x4;
+	OUTREG16(serverworks_private.registers, SVWRKS_COMMAND, cap_reg);
+
+	pci_read_config_byte(serverworks_private.svrwrks_dev,
+			     SVWRKS_AGP_ENABLE, &enable_reg);
+	enable_reg |= 0x1; /* Agp Enable bit */
+	pci_write_config_byte(serverworks_private.svrwrks_dev,
+			      SVWRKS_AGP_ENABLE, enable_reg);
+	agp_bridge.tlb_flush(NULL);
+
+	pci_read_config_byte(serverworks_private.svrwrks_dev, 0x34, &cap_ptr);
+	if (cap_ptr != 0x00) {
+		do {
+			pci_read_config_dword(serverworks_private.svrwrks_dev,
+					      cap_ptr, &cap_id);
+
+			if ((cap_id & 0xff) != 0x02)
+				cap_ptr = (cap_id >> 8) & 0xff;
+		}
+		while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
+	}
+	agp_bridge.capndx = cap_ptr;
+
+	/* Fill in the mode register */
+	pci_read_config_dword(serverworks_private.svrwrks_dev,
+			      agp_bridge.capndx + 4,
+			      &agp_bridge.mode);
+
+	pci_read_config_byte(agp_bridge.dev,
+			     SVWRKS_CACHING,
+			     &enable_reg);
+	enable_reg &= ~0x3;
+	pci_write_config_byte(agp_bridge.dev,
+			      SVWRKS_CACHING,
+			      enable_reg);
+
+	pci_read_config_byte(agp_bridge.dev,
+			     SVWRKS_FEATURE,
+			     &enable_reg);
+	enable_reg |= (1<<6);
+	pci_write_config_byte(agp_bridge.dev,
+			      SVWRKS_FEATURE,
+			      enable_reg);
+
+	return 0;
+}
+
+static void serverworks_cleanup(void)
+{
+	iounmap((void *) serverworks_private.registers);
+}
+
+/*
+ * This routine could be implemented by taking the addresses
+ * written to the GATT, and flushing them individually.  However
+ * currently it just flushes the whole table.  Which is probably
+ * more efficent, since agp_memory blocks can be a large number of
+ * entries.
+ */
+
+static void serverworks_tlbflush(agp_memory * temp)
+{
+	unsigned long end;
+
+	OUTREG8(serverworks_private.registers, SVWRKS_POSTFLUSH, 0x01);
+	end = jiffies + 3*HZ;
+	while(INREG8(serverworks_private.registers, 
+		     SVWRKS_POSTFLUSH) == 0x01) {
+		if((signed)(end - jiffies) <= 0) {
+			printk(KERN_ERR "Posted write buffer flush took more"
+			       "then 3 seconds\n");
+		}
+	}
+	OUTREG32(serverworks_private.registers, SVWRKS_DIRFLUSH, 0x00000001);
+	end = jiffies + 3*HZ;
+	while(INREG32(serverworks_private.registers, 
+		     SVWRKS_DIRFLUSH) == 0x00000001) {
+		if((signed)(end - jiffies) <= 0) {
+			printk(KERN_ERR "TLB flush took more"
+			       "then 3 seconds\n");
+		}
+	}
+}
+
+static unsigned long serverworks_mask_memory(unsigned long addr, int type)
+{
+	/* Only type 0 is supported by the serverworks chipsets */
+
+	return addr | agp_bridge.masks[0].mask;
+}
+
+static int serverworks_insert_memory(agp_memory * mem,
+			     off_t pg_start, int type)
+{
+	int i, j, num_entries;
+	unsigned long *cur_gatt;
+	unsigned long addr;
+
+	num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries;
+
+	if (type != 0 || mem->type != 0) {
+		return -EINVAL;
+	}
+	if ((pg_start + mem->page_count) > num_entries) {
+		return -EINVAL;
+	}
+
+	j = pg_start;
+	while (j < (pg_start + mem->page_count)) {
+		addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
+		cur_gatt = SVRWRKS_GET_GATT(addr);
+		if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) {
+			return -EBUSY;
+		}
+		j++;
+	}
+
+	if (mem->is_flushed == FALSE) {
+		CACHE_FLUSH();
+		mem->is_flushed = TRUE;
+	}
+
+	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+		addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
+		cur_gatt = SVRWRKS_GET_GATT(addr);
+		cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i];
+	}
+	agp_bridge.tlb_flush(mem);
+	return 0;
+}
+
+static int serverworks_remove_memory(agp_memory * mem, off_t pg_start,
+			     int type)
+{
+	int i;
+	unsigned long *cur_gatt;
+	unsigned long addr;
+
+	if (type != 0 || mem->type != 0) {
+		return -EINVAL;
+	}
+
+	CACHE_FLUSH();
+	agp_bridge.tlb_flush(mem);
+
+	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+		addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr;
+		cur_gatt = SVRWRKS_GET_GATT(addr);
+		cur_gatt[GET_GATT_OFF(addr)] = 
+			(unsigned long) agp_bridge.scratch_page;
+	}
+
+	agp_bridge.tlb_flush(mem);
+	return 0;
+}
+
+static struct gatt_mask serverworks_masks[] =
+{
+	{mask: 0x00000001, type: 0}
+};
+
+static struct aper_size_info_lvl2 serverworks_sizes[7] =
+{
+	{2048, 524288, 0x80000000},
+	{1024, 262144, 0xc0000000},
+	{512, 131072, 0xe0000000},
+	{256, 65536, 0xf0000000},
+	{128, 32768, 0xf8000000},
+	{64, 16384, 0xfc000000},
+	{32, 8192, 0xfe000000}
+};
+
+static void serverworks_agp_enable(u32 mode)
+{
+	struct pci_dev *device = NULL;
+	u32 command, scratch, cap_id;
+	u8 cap_ptr;
+
+	pci_read_config_dword(serverworks_private.svrwrks_dev,
+			      agp_bridge.capndx + 4,
+			      &command);
+
+	/*
+	 * PASS1: go throu all devices that claim to be
+	 *        AGP devices and collect their data.
+	 */
+
+
+	pci_for_each_dev(device) {
+		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
+		if (cap_ptr != 0x00) {
+			do {
+				pci_read_config_dword(device,
+						      cap_ptr, &cap_id);
+
+				if ((cap_id & 0xff) != 0x02)
+					cap_ptr = (cap_id >> 8) & 0xff;
+			}
+			while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
+		}
+		if (cap_ptr != 0x00) {
+			/*
+			 * Ok, here we have a AGP device. Disable impossible 
+			 * settings, and adjust the readqueue to the minimum.
+			 */
+
+			pci_read_config_dword(device, cap_ptr + 4, &scratch);
+
+			/* adjust RQ depth */
+			command =
+			    ((command & ~0xff000000) |
+			     min_t(u32, (mode & 0xff000000),
+				 min_t(u32, (command & 0xff000000),
+				     (scratch & 0xff000000))));
+
+			/* disable SBA if it's not supported */
+			if (!((command & 0x00000200) &&
+			      (scratch & 0x00000200) &&
+			      (mode & 0x00000200)))
+				command &= ~0x00000200;
+
+			/* disable FW */
+			command &= ~0x00000010;
+
+			command &= ~0x00000008;
+
+			if (!((command & 4) &&
+			      (scratch & 4) &&
+			      (mode & 4)))
+				command &= ~0x00000004;
+
+			if (!((command & 2) &&
+			      (scratch & 2) &&
+			      (mode & 2)))
+				command &= ~0x00000002;
+
+			if (!((command & 1) &&
+			      (scratch & 1) &&
+			      (mode & 1)))
+				command &= ~0x00000001;
+		}
+	}
+	/*
+	 * PASS2: Figure out the 4X/2X/1X setting and enable the
+	 *        target (our motherboard chipset).
+	 */
+
+	if (command & 4) {
+		command &= ~3;	/* 4X */
+	}
+	if (command & 2) {
+		command &= ~5;	/* 2X */
+	}
+	if (command & 1) {
+		command &= ~6;	/* 1X */
+	}
+	command |= 0x00000100;
+
+	pci_write_config_dword(serverworks_private.svrwrks_dev,
+			       agp_bridge.capndx + 8,
+			       command);
+
+	/*
+	 * PASS3: Go throu all AGP devices and update the
+	 *        command registers.
+	 */
+
+	pci_for_each_dev(device) {
+		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
+		if (cap_ptr != 0x00)
+			pci_write_config_dword(device, cap_ptr + 8, command);
+	}
+}
+
+int __init serverworks_setup (struct pci_dev *pdev)
+{
+	u32 temp;
+	u32 temp2;
+
+	serverworks_private.svrwrks_dev = pdev;
+
+	agp_bridge.masks = serverworks_masks;
+	agp_bridge.num_of_masks = 1;
+	agp_bridge.aperture_sizes = (void *) serverworks_sizes;
+	agp_bridge.size_type = LVL2_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 7;
+	agp_bridge.dev_private_data = (void *) &serverworks_private;
+	agp_bridge.needs_scratch_page = TRUE;
+	agp_bridge.configure = serverworks_configure;
+	agp_bridge.fetch_size = serverworks_fetch_size;
+	agp_bridge.cleanup = serverworks_cleanup;
+	agp_bridge.tlb_flush = serverworks_tlbflush;
+	agp_bridge.mask_memory = serverworks_mask_memory;
+	agp_bridge.agp_enable = serverworks_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = serverworks_create_gatt_table;
+	agp_bridge.free_gatt_table = serverworks_free_gatt_table;
+	agp_bridge.insert_memory = serverworks_insert_memory;
+	agp_bridge.remove_memory = serverworks_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 0;
+
+	pci_read_config_dword(agp_bridge.dev,
+			      SVWRKS_APSIZE,
+			      &temp);
+
+	serverworks_private.gart_addr_ofs = 0x10;
+
+	if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+		pci_read_config_dword(agp_bridge.dev,
+				      SVWRKS_APSIZE + 4,
+				      &temp2);
+		if(temp2 != 0) {
+			printk("Detected 64 bit aperture address, but top "
+			       "bits are not zero.  Disabling agp\n");
+			return -ENODEV;
+		}
+		serverworks_private.mm_addr_ofs = 0x18;
+	} else {
+		serverworks_private.mm_addr_ofs = 0x14;
+	}
+
+	pci_read_config_dword(agp_bridge.dev,
+			      serverworks_private.mm_addr_ofs,
+			      &temp);
+	if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+		pci_read_config_dword(agp_bridge.dev,
+				      serverworks_private.mm_addr_ofs + 4,
+				      &temp2);
+		if(temp2 != 0) {
+			printk("Detected 64 bit MMIO address, but top "
+			       "bits are not zero.  Disabling agp\n");
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
+
diff -Nru a/drivers/char/agp/via.c b/drivers/char/agp/via.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/drivers/char/agp/via.c	Wed Jul 17 11:29:31 2002
@@ -0,0 +1,151 @@
+/*
+ * AGPGART module version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO: 
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+
+static int via_fetch_size(void)
+{
+	int i;
+	u8 temp;
+	struct aper_size_info_8 *values;
+
+	values = A_SIZE_8(agp_bridge.aperture_sizes);
+	pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp);
+	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge.previous_size =
+			    agp_bridge.current_size = (void *) (values + i);
+			agp_bridge.aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+static int via_configure(void)
+{
+	u32 temp;
+	struct aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge.current_size);
+	/* aperture size */
+	pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
+			      current_size->size_value);
+	/* address to map too */
+	pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* GART control register */
+	pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
+
+	/* attbase - aperture GATT base */
+	pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE,
+			    (agp_bridge.gatt_bus_addr & 0xfffff000) | 3);
+	return 0;
+}
+
+static void via_cleanup(void)
+{
+	struct aper_size_info_8 *previous_size;
+
+	previous_size = A_SIZE_8(agp_bridge.previous_size);
+	pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
+			      previous_size->size_value);
+	/* Do not disable by writing 0 to VIA_ATTBASE, it screws things up
+	 * during reinitialization.
+	 */
+}
+
+static void via_tlbflush(agp_memory * mem)
+{
+	pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f);
+	pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
+}
+
+static unsigned long via_mask_memory(unsigned long addr, int type)
+{
+	/* Memory type is ignored */
+
+	return addr | agp_bridge.masks[0].mask;
+}
+
+static struct aper_size_info_8 via_generic_sizes[7] =
+{
+	{256, 65536, 6, 0},
+	{128, 32768, 5, 128},
+	{64, 16384, 4, 192},
+	{32, 8192, 3, 224},
+	{16, 4096, 2, 240},
+	{8, 2048, 1, 248},
+	{4, 1024, 0, 252}
+};
+
+static struct gatt_mask via_generic_masks[] =
+{
+	{mask: 0x00000000, type: 0}
+};
+
+int __init via_generic_setup (struct pci_dev *pdev)
+{
+	agp_bridge.masks = via_generic_masks;
+	agp_bridge.num_of_masks = 1;
+	agp_bridge.aperture_sizes = (void *) via_generic_sizes;
+	agp_bridge.size_type = U8_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 7;
+	agp_bridge.dev_private_data = NULL;
+	agp_bridge.needs_scratch_page = FALSE;
+	agp_bridge.configure = via_configure;
+	agp_bridge.fetch_size = via_fetch_size;
+	agp_bridge.cleanup = via_cleanup;
+	agp_bridge.tlb_flush = via_tlbflush;
+	agp_bridge.mask_memory = via_mask_memory;
+	agp_bridge.agp_enable = agp_generic_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+	agp_bridge.insert_memory = agp_generic_insert_memory;
+	agp_bridge.remove_memory = agp_generic_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 0;
+
+	return 0;
+	
+	(void) pdev; /* unused */
+}
