generic.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418
  1. /*
  2. * AGPGART driver.
  3. * Copyright (C) 2004 Silicon Graphics, Inc.
  4. * Copyright (C) 2002-2005 Dave Jones.
  5. * Copyright (C) 1999 Jeff Hartmann.
  6. * Copyright (C) 1999 Precision Insight, Inc.
  7. * Copyright (C) 1999 Xi Graphics, Inc.
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a
  10. * copy of this software and associated documentation files (the "Software"),
  11. * to deal in the Software without restriction, including without limitation
  12. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13. * and/or sell copies of the Software, and to permit persons to whom the
  14. * Software is furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included
  17. * in all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  22. * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
  25. * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. * TODO:
  28. * - Allocate more than order 0 pages to avoid too much linear map splitting.
  29. */
  30. #include <linux/module.h>
  31. #include <linux/pci.h>
  32. #include <linux/pagemap.h>
  33. #include <linux/miscdevice.h>
  34. #include <linux/pm.h>
  35. #include <linux/agp_backend.h>
  36. #include <linux/vmalloc.h>
  37. #include <linux/dma-mapping.h>
  38. #include <linux/mm.h>
  39. #include <linux/sched.h>
  40. #include <linux/slab.h>
  41. #include <asm/io.h>
  42. #include <asm/cacheflush.h>
  43. #include <asm/pgtable.h>
  44. #include "agp.h"
  45. __u32 *agp_gatt_table;
  46. int agp_memory_reserved;
  47. /*
  48. * Needed by the Nforce GART driver for the time being. Would be
  49. * nice to do this some other way instead of needing this export.
  50. */
  51. EXPORT_SYMBOL_GPL(agp_memory_reserved);
  52. /*
  53. * Generic routines for handling agp_memory structures -
  54. * They use the basic page allocation routines to do the brunt of the work.
  55. */
  56. void agp_free_key(int key)
  57. {
  58. if (key < 0)
  59. return;
  60. if (key < MAXKEY)
  61. clear_bit(key, agp_bridge->key_list);
  62. }
  63. EXPORT_SYMBOL(agp_free_key);
  64. static int agp_get_key(void)
  65. {
  66. int bit;
  67. bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
  68. if (bit < MAXKEY) {
  69. set_bit(bit, agp_bridge->key_list);
  70. return bit;
  71. }
  72. return -1;
  73. }
  74. /*
  75. * Use kmalloc if possible for the page list. Otherwise fall back to
  76. * vmalloc. This speeds things up and also saves memory for small AGP
  77. * regions.
  78. */
  79. void agp_alloc_page_array(size_t size, struct agp_memory *mem)
  80. {
  81. mem->pages = kvmalloc(size, GFP_KERNEL);
  82. }
  83. EXPORT_SYMBOL(agp_alloc_page_array);
  84. static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
  85. {
  86. struct agp_memory *new;
  87. unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
  88. if (INT_MAX/sizeof(struct page *) < num_agp_pages)
  89. return NULL;
  90. new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
  91. if (new == NULL)
  92. return NULL;
  93. new->key = agp_get_key();
  94. if (new->key < 0) {
  95. kfree(new);
  96. return NULL;
  97. }
  98. agp_alloc_page_array(alloc_size, new);
  99. if (new->pages == NULL) {
  100. agp_free_key(new->key);
  101. kfree(new);
  102. return NULL;
  103. }
  104. new->num_scratch_pages = 0;
  105. return new;
  106. }
  107. struct agp_memory *agp_create_memory(int scratch_pages)
  108. {
  109. struct agp_memory *new;
  110. new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
  111. if (new == NULL)
  112. return NULL;
  113. new->key = agp_get_key();
  114. if (new->key < 0) {
  115. kfree(new);
  116. return NULL;
  117. }
  118. agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
  119. if (new->pages == NULL) {
  120. agp_free_key(new->key);
  121. kfree(new);
  122. return NULL;
  123. }
  124. new->num_scratch_pages = scratch_pages;
  125. new->type = AGP_NORMAL_MEMORY;
  126. return new;
  127. }
  128. EXPORT_SYMBOL(agp_create_memory);
  129. /**
  130. * agp_free_memory - free memory associated with an agp_memory pointer.
  131. *
  132. * @curr: agp_memory pointer to be freed.
  133. *
  134. * It is the only function that can be called when the backend is not owned
  135. * by the caller. (So it can free memory on client death.)
  136. */
  137. void agp_free_memory(struct agp_memory *curr)
  138. {
  139. size_t i;
  140. if (curr == NULL)
  141. return;
  142. if (curr->is_bound)
  143. agp_unbind_memory(curr);
  144. if (curr->type >= AGP_USER_TYPES) {
  145. agp_generic_free_by_type(curr);
  146. return;
  147. }
  148. if (curr->type != 0) {
  149. curr->bridge->driver->free_by_type(curr);
  150. return;
  151. }
  152. if (curr->page_count != 0) {
  153. if (curr->bridge->driver->agp_destroy_pages) {
  154. curr->bridge->driver->agp_destroy_pages(curr);
  155. } else {
  156. for (i = 0; i < curr->page_count; i++) {
  157. curr->bridge->driver->agp_destroy_page(
  158. curr->pages[i],
  159. AGP_PAGE_DESTROY_UNMAP);
  160. }
  161. for (i = 0; i < curr->page_count; i++) {
  162. curr->bridge->driver->agp_destroy_page(
  163. curr->pages[i],
  164. AGP_PAGE_DESTROY_FREE);
  165. }
  166. }
  167. }
  168. agp_free_key(curr->key);
  169. agp_free_page_array(curr);
  170. kfree(curr);
  171. }
  172. EXPORT_SYMBOL(agp_free_memory);
  173. #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
  174. /**
  175. * agp_allocate_memory - allocate a group of pages of a certain type.
  176. *
  177. * @page_count: size_t argument of the number of pages
  178. * @type: u32 argument of the type of memory to be allocated.
  179. *
  180. * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
  181. * maps to physical ram. Any other type is device dependent.
  182. *
  183. * It returns NULL whenever memory is unavailable.
  184. */
  185. struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
  186. size_t page_count, u32 type)
  187. {
  188. int scratch_pages;
  189. struct agp_memory *new;
  190. size_t i;
  191. int cur_memory;
  192. if (!bridge)
  193. return NULL;
  194. cur_memory = atomic_read(&bridge->current_memory_agp);
  195. if ((cur_memory + page_count > bridge->max_memory_agp) ||
  196. (cur_memory + page_count < page_count))
  197. return NULL;
  198. if (type >= AGP_USER_TYPES) {
  199. new = agp_generic_alloc_user(page_count, type);
  200. if (new)
  201. new->bridge = bridge;
  202. return new;
  203. }
  204. if (type != 0) {
  205. new = bridge->driver->alloc_by_type(page_count, type);
  206. if (new)
  207. new->bridge = bridge;
  208. return new;
  209. }
  210. scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
  211. new = agp_create_memory(scratch_pages);
  212. if (new == NULL)
  213. return NULL;
  214. if (bridge->driver->agp_alloc_pages) {
  215. if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
  216. agp_free_memory(new);
  217. return NULL;
  218. }
  219. new->bridge = bridge;
  220. return new;
  221. }
  222. for (i = 0; i < page_count; i++) {
  223. struct page *page = bridge->driver->agp_alloc_page(bridge);
  224. if (page == NULL) {
  225. agp_free_memory(new);
  226. return NULL;
  227. }
  228. new->pages[i] = page;
  229. new->page_count++;
  230. }
  231. new->bridge = bridge;
  232. return new;
  233. }
  234. EXPORT_SYMBOL(agp_allocate_memory);
  235. /* End - Generic routines for handling agp_memory structures */
  236. static int agp_return_size(void)
  237. {
  238. int current_size;
  239. void *temp;
  240. temp = agp_bridge->current_size;
  241. switch (agp_bridge->driver->size_type) {
  242. case U8_APER_SIZE:
  243. current_size = A_SIZE_8(temp)->size;
  244. break;
  245. case U16_APER_SIZE:
  246. current_size = A_SIZE_16(temp)->size;
  247. break;
  248. case U32_APER_SIZE:
  249. current_size = A_SIZE_32(temp)->size;
  250. break;
  251. case LVL2_APER_SIZE:
  252. current_size = A_SIZE_LVL2(temp)->size;
  253. break;
  254. case FIXED_APER_SIZE:
  255. current_size = A_SIZE_FIX(temp)->size;
  256. break;
  257. default:
  258. current_size = 0;
  259. break;
  260. }
  261. current_size -= (agp_memory_reserved / (1024*1024));
  262. if (current_size <0)
  263. current_size = 0;
  264. return current_size;
  265. }
  266. int agp_num_entries(void)
  267. {
  268. int num_entries;
  269. void *temp;
  270. temp = agp_bridge->current_size;
  271. switch (agp_bridge->driver->size_type) {
  272. case U8_APER_SIZE:
  273. num_entries = A_SIZE_8(temp)->num_entries;
  274. break;
  275. case U16_APER_SIZE:
  276. num_entries = A_SIZE_16(temp)->num_entries;
  277. break;
  278. case U32_APER_SIZE:
  279. num_entries = A_SIZE_32(temp)->num_entries;
  280. break;
  281. case LVL2_APER_SIZE:
  282. num_entries = A_SIZE_LVL2(temp)->num_entries;
  283. break;
  284. case FIXED_APER_SIZE:
  285. num_entries = A_SIZE_FIX(temp)->num_entries;
  286. break;
  287. default:
  288. num_entries = 0;
  289. break;
  290. }
  291. num_entries -= agp_memory_reserved>>PAGE_SHIFT;
  292. if (num_entries<0)
  293. num_entries = 0;
  294. return num_entries;
  295. }
  296. EXPORT_SYMBOL_GPL(agp_num_entries);
  297. /**
  298. * agp_copy_info - copy bridge state information
  299. *
  300. * @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
  301. *
  302. * This function copies information about the agp bridge device and the state of
  303. * the agp backend into an agp_kern_info pointer.
  304. */
  305. int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
  306. {
  307. memset(info, 0, sizeof(struct agp_kern_info));
  308. if (!bridge) {
  309. info->chipset = NOT_SUPPORTED;
  310. return -EIO;
  311. }
  312. info->version.major = bridge->version->major;
  313. info->version.minor = bridge->version->minor;
  314. info->chipset = SUPPORTED;
  315. info->device = bridge->dev;
  316. if (bridge->mode & AGPSTAT_MODE_3_0)
  317. info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
  318. else
  319. info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
  320. info->aper_base = bridge->gart_bus_addr;
  321. info->aper_size = agp_return_size();
  322. info->max_memory = bridge->max_memory_agp;
  323. info->current_memory = atomic_read(&bridge->current_memory_agp);
  324. info->cant_use_aperture = bridge->driver->cant_use_aperture;
  325. info->vm_ops = bridge->vm_ops;
  326. info->page_mask = ~0UL;
  327. return 0;
  328. }
  329. EXPORT_SYMBOL(agp_copy_info);
  330. /* End - Routine to copy over information structure */
  331. /*
  332. * Routines for handling swapping of agp_memory into the GATT -
  333. * These routines take agp_memory and insert them into the GATT.
  334. * They call device specific routines to actually write to the GATT.
  335. */
  336. /**
  337. * agp_bind_memory - Bind an agp_memory structure into the GATT.
  338. *
  339. * @curr: agp_memory pointer
  340. * @pg_start: an offset into the graphics aperture translation table
  341. *
  342. * It returns -EINVAL if the pointer == NULL.
  343. * It returns -EBUSY if the area of the table requested is already in use.
  344. */
  345. int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
  346. {
  347. int ret_val;
  348. if (curr == NULL)
  349. return -EINVAL;
  350. if (curr->is_bound) {
  351. printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
  352. return -EINVAL;
  353. }
  354. if (!curr->is_flushed) {
  355. curr->bridge->driver->cache_flush();
  356. curr->is_flushed = true;
  357. }
  358. ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
  359. if (ret_val != 0)
  360. return ret_val;
  361. curr->is_bound = true;
  362. curr->pg_start = pg_start;
  363. spin_lock(&agp_bridge->mapped_lock);
  364. list_add(&curr->mapped_list, &agp_bridge->mapped_list);
  365. spin_unlock(&agp_bridge->mapped_lock);
  366. return 0;
  367. }
  368. EXPORT_SYMBOL(agp_bind_memory);
  369. /**
  370. * agp_unbind_memory - Removes an agp_memory structure from the GATT
  371. *
  372. * @curr: agp_memory pointer to be removed from the GATT.
  373. *
  374. * It returns -EINVAL if this piece of agp_memory is not currently bound to
  375. * the graphics aperture translation table or if the agp_memory pointer == NULL
  376. */
  377. int agp_unbind_memory(struct agp_memory *curr)
  378. {
  379. int ret_val;
  380. if (curr == NULL)
  381. return -EINVAL;
  382. if (!curr->is_bound) {
  383. printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
  384. return -EINVAL;
  385. }
  386. ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
  387. if (ret_val != 0)
  388. return ret_val;
  389. curr->is_bound = false;
  390. curr->pg_start = 0;
  391. spin_lock(&curr->bridge->mapped_lock);
  392. list_del(&curr->mapped_list);
  393. spin_unlock(&curr->bridge->mapped_lock);
  394. return 0;
  395. }
  396. EXPORT_SYMBOL(agp_unbind_memory);
  397. /* End - Routines for handling swapping of agp_memory into the GATT */
  398. /* Generic Agp routines - Start */
  399. static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
  400. {
  401. u32 tmp;
  402. if (*requested_mode & AGP2_RESERVED_MASK) {
  403. printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
  404. *requested_mode & AGP2_RESERVED_MASK, *requested_mode);
  405. *requested_mode &= ~AGP2_RESERVED_MASK;
  406. }
  407. /*
  408. * Some dumb bridges are programmed to disobey the AGP2 spec.
  409. * This is likely a BIOS misprogramming rather than poweron default, or
  410. * it would be a lot more common.
  411. * https://bugs.freedesktop.org/show_bug.cgi?id=8816
  412. * AGPv2 spec 6.1.9 states:
  413. * The RATE field indicates the data transfer rates supported by this
  414. * device. A.G.P. devices must report all that apply.
  415. * Fix them up as best we can.
  416. */
  417. switch (*bridge_agpstat & 7) {
  418. case 4:
  419. *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
  420. printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. "
  421. "Fixing up support for x2 & x1\n");
  422. break;
  423. case 2:
  424. *bridge_agpstat |= AGPSTAT2_1X;
  425. printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. "
  426. "Fixing up support for x1\n");
  427. break;
  428. default:
  429. break;
  430. }
  431. /* Check the speed bits make sense. Only one should be set. */
  432. tmp = *requested_mode & 7;
  433. switch (tmp) {
  434. case 0:
  435. printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
  436. *requested_mode |= AGPSTAT2_1X;
  437. break;
  438. case 1:
  439. case 2:
  440. break;
  441. case 3:
  442. *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */
  443. break;
  444. case 4:
  445. break;
  446. case 5:
  447. case 6:
  448. case 7:
  449. *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
  450. break;
  451. }
  452. /* disable SBA if it's not supported */
  453. if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
  454. *bridge_agpstat &= ~AGPSTAT_SBA;
  455. /* Set rate */
  456. if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
  457. *bridge_agpstat &= ~AGPSTAT2_4X;
  458. if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
  459. *bridge_agpstat &= ~AGPSTAT2_2X;
  460. if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
  461. *bridge_agpstat &= ~AGPSTAT2_1X;
  462. /* Now we know what mode it should be, clear out the unwanted bits. */
  463. if (*bridge_agpstat & AGPSTAT2_4X)
  464. *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */
  465. if (*bridge_agpstat & AGPSTAT2_2X)
  466. *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */
  467. if (*bridge_agpstat & AGPSTAT2_1X)
  468. *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */
  469. /* Apply any errata. */
  470. if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
  471. *bridge_agpstat &= ~AGPSTAT_FW;
  472. if (agp_bridge->flags & AGP_ERRATA_SBA)
  473. *bridge_agpstat &= ~AGPSTAT_SBA;
  474. if (agp_bridge->flags & AGP_ERRATA_1X) {
  475. *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
  476. *bridge_agpstat |= AGPSTAT2_1X;
  477. }
  478. /* If we've dropped down to 1X, disable fast writes. */
  479. if (*bridge_agpstat & AGPSTAT2_1X)
  480. *bridge_agpstat &= ~AGPSTAT_FW;
  481. }
  482. /*
  483. * requested_mode = Mode requested by (typically) X.
  484. * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
  485. * vga_agpstat = PCI_AGP_STATUS from graphic card.
  486. */
  487. static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
  488. {
  489. u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
  490. u32 tmp;
  491. if (*requested_mode & AGP3_RESERVED_MASK) {
  492. printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
  493. *requested_mode & AGP3_RESERVED_MASK, *requested_mode);
  494. *requested_mode &= ~AGP3_RESERVED_MASK;
  495. }
  496. /* Check the speed bits make sense. */
  497. tmp = *requested_mode & 7;
  498. if (tmp == 0) {
  499. printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
  500. *requested_mode |= AGPSTAT3_4X;
  501. }
  502. if (tmp >= 3) {
  503. printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
  504. *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
  505. }
  506. /* ARQSZ - Set the value to the maximum one.
  507. * Don't allow the mode register to override values. */
  508. *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
  509. max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
  510. /* Calibration cycle.
  511. * Don't allow the mode register to override values. */
  512. *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
  513. min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
  514. /* SBA *must* be supported for AGP v3 */
  515. *bridge_agpstat |= AGPSTAT_SBA;
  516. /*
  517. * Set speed.
  518. * Check for invalid speeds. This can happen when applications
  519. * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
  520. */
  521. if (*requested_mode & AGPSTAT_MODE_3_0) {
  522. /*
  523. * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
  524. * have been passed a 3.0 mode, but with 2.x speed bits set.
  525. * AGP2.x 4x -> AGP3.0 4x.
  526. */
  527. if (*requested_mode & AGPSTAT2_4X) {
  528. printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
  529. current->comm, *requested_mode);
  530. *requested_mode &= ~AGPSTAT2_4X;
  531. *requested_mode |= AGPSTAT3_4X;
  532. }
  533. } else {
  534. /*
  535. * The caller doesn't know what they are doing. We are in 3.0 mode,
  536. * but have been passed an AGP 2.x mode.
  537. * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
  538. */
  539. printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
  540. current->comm, *requested_mode);
  541. *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
  542. *requested_mode |= AGPSTAT3_4X;
  543. }
  544. if (*requested_mode & AGPSTAT3_8X) {
  545. if (!(*bridge_agpstat & AGPSTAT3_8X)) {
  546. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  547. *bridge_agpstat |= AGPSTAT3_4X;
  548. printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
  549. return;
  550. }
  551. if (!(*vga_agpstat & AGPSTAT3_8X)) {
  552. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  553. *bridge_agpstat |= AGPSTAT3_4X;
  554. printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
  555. return;
  556. }
  557. /* All set, bridge & device can do AGP x8*/
  558. *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
  559. goto done;
  560. } else if (*requested_mode & AGPSTAT3_4X) {
  561. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  562. *bridge_agpstat |= AGPSTAT3_4X;
  563. goto done;
  564. } else {
  565. /*
  566. * If we didn't specify an AGP mode, we see if both
  567. * the graphics card, and the bridge can do x8, and use if so.
  568. * If not, we fall back to x4 mode.
  569. */
  570. if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
  571. printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
  572. "supported by bridge & card (x8).\n");
  573. *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
  574. *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
  575. } else {
  576. printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
  577. if (!(*bridge_agpstat & AGPSTAT3_8X)) {
  578. printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
  579. *bridge_agpstat, origbridge);
  580. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  581. *bridge_agpstat |= AGPSTAT3_4X;
  582. }
  583. if (!(*vga_agpstat & AGPSTAT3_8X)) {
  584. printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
  585. *vga_agpstat, origvga);
  586. *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  587. *vga_agpstat |= AGPSTAT3_4X;
  588. }
  589. }
  590. }
  591. done:
  592. /* Apply any errata. */
  593. if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
  594. *bridge_agpstat &= ~AGPSTAT_FW;
  595. if (agp_bridge->flags & AGP_ERRATA_SBA)
  596. *bridge_agpstat &= ~AGPSTAT_SBA;
  597. if (agp_bridge->flags & AGP_ERRATA_1X) {
  598. *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
  599. *bridge_agpstat |= AGPSTAT2_1X;
  600. }
  601. }
  602. /**
  603. * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
  604. * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
  605. * @requested_mode: requested agp_stat from userspace (Typically from X)
  606. * @bridge_agpstat: current agp_stat from AGP bridge.
  607. *
  608. * This function will hunt for an AGP graphics card, and try to match
  609. * the requested mode to the capabilities of both the bridge and the card.
  610. */
  611. u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
  612. {
  613. struct pci_dev *device = NULL;
  614. u32 vga_agpstat;
  615. u8 cap_ptr;
  616. for (;;) {
  617. device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
  618. if (!device) {
  619. printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
  620. return 0;
  621. }
  622. cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
  623. if (cap_ptr)
  624. break;
  625. }
  626. /*
  627. * Ok, here we have a AGP device. Disable impossible
  628. * settings, and adjust the readqueue to the minimum.
  629. */
  630. pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
  631. /* adjust RQ depth */
  632. bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
  633. min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
  634. min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
  635. /* disable FW if it's not supported */
  636. if (!((bridge_agpstat & AGPSTAT_FW) &&
  637. (vga_agpstat & AGPSTAT_FW) &&
  638. (requested_mode & AGPSTAT_FW)))
  639. bridge_agpstat &= ~AGPSTAT_FW;
  640. /* Check to see if we are operating in 3.0 mode */
  641. if (agp_bridge->mode & AGPSTAT_MODE_3_0)
  642. agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
  643. else
  644. agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
  645. pci_dev_put(device);
  646. return bridge_agpstat;
  647. }
  648. EXPORT_SYMBOL(agp_collect_device_status);
  649. void agp_device_command(u32 bridge_agpstat, bool agp_v3)
  650. {
  651. struct pci_dev *device = NULL;
  652. int mode;
  653. mode = bridge_agpstat & 0x7;
  654. if (agp_v3)
  655. mode *= 4;
  656. for_each_pci_dev(device) {
  657. u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
  658. if (!agp)
  659. continue;
  660. dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
  661. agp_v3 ? 3 : 2, mode);
  662. pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
  663. }
  664. }
  665. EXPORT_SYMBOL(agp_device_command);
  666. void get_agp_version(struct agp_bridge_data *bridge)
  667. {
  668. u32 ncapid;
  669. /* Exit early if already set by errata workarounds. */
  670. if (bridge->major_version != 0)
  671. return;
  672. pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
  673. bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
  674. bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
  675. }
  676. EXPORT_SYMBOL(get_agp_version);
  677. void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
  678. {
  679. u32 bridge_agpstat, temp;
  680. get_agp_version(agp_bridge);
  681. dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
  682. agp_bridge->major_version, agp_bridge->minor_version);
  683. pci_read_config_dword(agp_bridge->dev,
  684. agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
  685. bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
  686. if (bridge_agpstat == 0)
  687. /* Something bad happened. FIXME: Return error code? */
  688. return;
  689. bridge_agpstat |= AGPSTAT_AGP_ENABLE;
  690. /* Do AGP version specific frobbing. */
  691. if (bridge->major_version >= 3) {
  692. if (bridge->mode & AGPSTAT_MODE_3_0) {
  693. /* If we have 3.5, we can do the isoch stuff. */
  694. if (bridge->minor_version >= 5)
  695. agp_3_5_enable(bridge);
  696. agp_device_command(bridge_agpstat, true);
  697. return;
  698. } else {
  699. /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
  700. bridge_agpstat &= ~(7<<10) ;
  701. pci_read_config_dword(bridge->dev,
  702. bridge->capndx+AGPCTRL, &temp);
  703. temp |= (1<<9);
  704. pci_write_config_dword(bridge->dev,
  705. bridge->capndx+AGPCTRL, temp);
  706. dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
  707. }
  708. }
  709. /* AGP v<3 */
  710. agp_device_command(bridge_agpstat, false);
  711. }
  712. EXPORT_SYMBOL(agp_generic_enable);
  713. int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
  714. {
  715. char *table;
  716. char *table_end;
  717. int size;
  718. int page_order;
  719. int num_entries;
  720. int i;
  721. void *temp;
  722. struct page *page;
  723. /* The generic routines can't handle 2 level gatt's */
  724. if (bridge->driver->size_type == LVL2_APER_SIZE)
  725. return -EINVAL;
  726. table = NULL;
  727. i = bridge->aperture_size_idx;
  728. temp = bridge->current_size;
  729. size = page_order = num_entries = 0;
  730. if (bridge->driver->size_type != FIXED_APER_SIZE) {
  731. do {
  732. switch (bridge->driver->size_type) {
  733. case U8_APER_SIZE:
  734. size = A_SIZE_8(temp)->size;
  735. page_order =
  736. A_SIZE_8(temp)->page_order;
  737. num_entries =
  738. A_SIZE_8(temp)->num_entries;
  739. break;
  740. case U16_APER_SIZE:
  741. size = A_SIZE_16(temp)->size;
  742. page_order = A_SIZE_16(temp)->page_order;
  743. num_entries = A_SIZE_16(temp)->num_entries;
  744. break;
  745. case U32_APER_SIZE:
  746. size = A_SIZE_32(temp)->size;
  747. page_order = A_SIZE_32(temp)->page_order;
  748. num_entries = A_SIZE_32(temp)->num_entries;
  749. break;
  750. /* This case will never really happen. */
  751. case FIXED_APER_SIZE:
  752. case LVL2_APER_SIZE:
  753. default:
  754. size = page_order = num_entries = 0;
  755. break;
  756. }
  757. table = alloc_gatt_pages(page_order);
  758. if (table == NULL) {
  759. i++;
  760. switch (bridge->driver->size_type) {
  761. case U8_APER_SIZE:
  762. bridge->current_size = A_IDX8(bridge);
  763. break;
  764. case U16_APER_SIZE:
  765. bridge->current_size = A_IDX16(bridge);
  766. break;
  767. case U32_APER_SIZE:
  768. bridge->current_size = A_IDX32(bridge);
  769. break;
  770. /* These cases will never really happen. */
  771. case FIXED_APER_SIZE:
  772. case LVL2_APER_SIZE:
  773. default:
  774. break;
  775. }
  776. temp = bridge->current_size;
  777. } else {
  778. bridge->aperture_size_idx = i;
  779. }
  780. } while (!table && (i < bridge->driver->num_aperture_sizes));
  781. } else {
  782. size = ((struct aper_size_info_fixed *) temp)->size;
  783. page_order = ((struct aper_size_info_fixed *) temp)->page_order;
  784. num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
  785. table = alloc_gatt_pages(page_order);
  786. }
  787. if (table == NULL)
  788. return -ENOMEM;
  789. table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
  790. for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
  791. SetPageReserved(page);
  792. bridge->gatt_table_real = (u32 *) table;
  793. agp_gatt_table = (void *)table;
  794. bridge->driver->cache_flush();
  795. #ifdef CONFIG_X86
  796. if (set_memory_uc((unsigned long)table, 1 << page_order))
  797. printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
  798. bridge->gatt_table = (u32 __iomem *)table;
  799. #else
  800. bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
  801. (PAGE_SIZE * (1 << page_order)));
  802. bridge->driver->cache_flush();
  803. #endif
  804. if (bridge->gatt_table == NULL) {
  805. for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
  806. ClearPageReserved(page);
  807. free_gatt_pages(table, page_order);
  808. return -ENOMEM;
  809. }
  810. bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
  811. /* AK: bogus, should encode addresses > 4GB */
  812. for (i = 0; i < num_entries; i++) {
  813. writel(bridge->scratch_page, bridge->gatt_table+i);
  814. readl(bridge->gatt_table+i); /* PCI Posting. */
  815. }
  816. return 0;
  817. }
  818. EXPORT_SYMBOL(agp_generic_create_gatt_table);
  819. int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
  820. {
  821. int page_order;
  822. char *table, *table_end;
  823. void *temp;
  824. struct page *page;
  825. temp = bridge->current_size;
  826. switch (bridge->driver->size_type) {
  827. case U8_APER_SIZE:
  828. page_order = A_SIZE_8(temp)->page_order;
  829. break;
  830. case U16_APER_SIZE:
  831. page_order = A_SIZE_16(temp)->page_order;
  832. break;
  833. case U32_APER_SIZE:
  834. page_order = A_SIZE_32(temp)->page_order;
  835. break;
  836. case FIXED_APER_SIZE:
  837. page_order = A_SIZE_FIX(temp)->page_order;
  838. break;
  839. case LVL2_APER_SIZE:
  840. /* The generic routines can't deal with 2 level gatt's */
  841. return -EINVAL;
  842. default:
  843. page_order = 0;
  844. break;
  845. }
  846. /* Do not worry about freeing memory, because if this is
  847. * called, then all agp memory is deallocated and removed
  848. * from the table. */
  849. #ifdef CONFIG_X86
  850. set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
  851. #else
  852. iounmap(bridge->gatt_table);
  853. #endif
  854. table = (char *) bridge->gatt_table_real;
  855. table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
  856. for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
  857. ClearPageReserved(page);
  858. free_gatt_pages(bridge->gatt_table_real, page_order);
  859. agp_gatt_table = NULL;
  860. bridge->gatt_table = NULL;
  861. bridge->gatt_table_real = NULL;
  862. bridge->gatt_bus_addr = 0;
  863. return 0;
  864. }
  865. EXPORT_SYMBOL(agp_generic_free_gatt_table);
  866. int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
  867. {
  868. int num_entries;
  869. size_t i;
  870. off_t j;
  871. void *temp;
  872. struct agp_bridge_data *bridge;
  873. int mask_type;
  874. bridge = mem->bridge;
  875. if (!bridge)
  876. return -EINVAL;
  877. if (mem->page_count == 0)
  878. return 0;
  879. temp = bridge->current_size;
  880. switch (bridge->driver->size_type) {
  881. case U8_APER_SIZE:
  882. num_entries = A_SIZE_8(temp)->num_entries;
  883. break;
  884. case U16_APER_SIZE:
  885. num_entries = A_SIZE_16(temp)->num_entries;
  886. break;
  887. case U32_APER_SIZE:
  888. num_entries = A_SIZE_32(temp)->num_entries;
  889. break;
  890. case FIXED_APER_SIZE:
  891. num_entries = A_SIZE_FIX(temp)->num_entries;
  892. break;
  893. case LVL2_APER_SIZE:
  894. /* The generic routines can't deal with 2 level gatt's */
  895. return -EINVAL;
  896. default:
  897. num_entries = 0;
  898. break;
  899. }
  900. num_entries -= agp_memory_reserved/PAGE_SIZE;
  901. if (num_entries < 0) num_entries = 0;
  902. if (type != mem->type)
  903. return -EINVAL;
  904. mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
  905. if (mask_type != 0) {
  906. /* The generic routines know nothing of memory types */
  907. return -EINVAL;
  908. }
  909. if (((pg_start + mem->page_count) > num_entries) ||
  910. ((pg_start + mem->page_count) < pg_start))
  911. return -EINVAL;
  912. j = pg_start;
  913. while (j < (pg_start + mem->page_count)) {
  914. if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
  915. return -EBUSY;
  916. j++;
  917. }
  918. if (!mem->is_flushed) {
  919. bridge->driver->cache_flush();
  920. mem->is_flushed = true;
  921. }
  922. for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
  923. writel(bridge->driver->mask_memory(bridge,
  924. page_to_phys(mem->pages[i]),
  925. mask_type),
  926. bridge->gatt_table+j);
  927. }
  928. readl(bridge->gatt_table+j-1); /* PCI Posting. */
  929. bridge->driver->tlb_flush(mem);
  930. return 0;
  931. }
  932. EXPORT_SYMBOL(agp_generic_insert_memory);
  933. int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
  934. {
  935. size_t i;
  936. struct agp_bridge_data *bridge;
  937. int mask_type, num_entries;
  938. bridge = mem->bridge;
  939. if (!bridge)
  940. return -EINVAL;
  941. if (mem->page_count == 0)
  942. return 0;
  943. if (type != mem->type)
  944. return -EINVAL;
  945. num_entries = agp_num_entries();
  946. if (((pg_start + mem->page_count) > num_entries) ||
  947. ((pg_start + mem->page_count) < pg_start))
  948. return -EINVAL;
  949. mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
  950. if (mask_type != 0) {
  951. /* The generic routines know nothing of memory types */
  952. return -EINVAL;
  953. }
  954. /* AK: bogus, should encode addresses > 4GB */
  955. for (i = pg_start; i < (mem->page_count + pg_start); i++) {
  956. writel(bridge->scratch_page, bridge->gatt_table+i);
  957. }
  958. readl(bridge->gatt_table+i-1); /* PCI Posting. */
  959. bridge->driver->tlb_flush(mem);
  960. return 0;
  961. }
  962. EXPORT_SYMBOL(agp_generic_remove_memory);
  963. struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
  964. {
  965. return NULL;
  966. }
  967. EXPORT_SYMBOL(agp_generic_alloc_by_type);
  968. void agp_generic_free_by_type(struct agp_memory *curr)
  969. {
  970. agp_free_page_array(curr);
  971. agp_free_key(curr->key);
  972. kfree(curr);
  973. }
  974. EXPORT_SYMBOL(agp_generic_free_by_type);
  975. struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
  976. {
  977. struct agp_memory *new;
  978. int i;
  979. int pages;
  980. pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
  981. new = agp_create_user_memory(page_count);
  982. if (new == NULL)
  983. return NULL;
  984. for (i = 0; i < page_count; i++)
  985. new->pages[i] = NULL;
  986. new->page_count = 0;
  987. new->type = type;
  988. new->num_scratch_pages = pages;
  989. return new;
  990. }
  991. EXPORT_SYMBOL(agp_generic_alloc_user);
  992. /*
  993. * Basic Page Allocation Routines -
  994. * These routines handle page allocation and by default they reserve the allocated
  995. * memory. They also handle incrementing the current_memory_agp value, Which is checked
  996. * against a maximum value.
  997. */
  998. int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
  999. {
  1000. struct page * page;
  1001. int i, ret = -ENOMEM;
  1002. for (i = 0; i < num_pages; i++) {
  1003. page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  1004. /* agp_free_memory() needs gart address */
  1005. if (page == NULL)
  1006. goto out;
  1007. #ifndef CONFIG_X86
  1008. map_page_into_agp(page);
  1009. #endif
  1010. get_page(page);
  1011. atomic_inc(&agp_bridge->current_memory_agp);
  1012. mem->pages[i] = page;
  1013. mem->page_count++;
  1014. }
  1015. #ifdef CONFIG_X86
  1016. set_pages_array_uc(mem->pages, num_pages);
  1017. #endif
  1018. ret = 0;
  1019. out:
  1020. return ret;
  1021. }
  1022. EXPORT_SYMBOL(agp_generic_alloc_pages);
  1023. struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
  1024. {
  1025. struct page * page;
  1026. page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  1027. if (page == NULL)
  1028. return NULL;
  1029. map_page_into_agp(page);
  1030. get_page(page);
  1031. atomic_inc(&agp_bridge->current_memory_agp);
  1032. return page;
  1033. }
  1034. EXPORT_SYMBOL(agp_generic_alloc_page);
  1035. void agp_generic_destroy_pages(struct agp_memory *mem)
  1036. {
  1037. int i;
  1038. struct page *page;
  1039. if (!mem)
  1040. return;
  1041. #ifdef CONFIG_X86
  1042. set_pages_array_wb(mem->pages, mem->page_count);
  1043. #endif
  1044. for (i = 0; i < mem->page_count; i++) {
  1045. page = mem->pages[i];
  1046. #ifndef CONFIG_X86
  1047. unmap_page_from_agp(page);
  1048. #endif
  1049. put_page(page);
  1050. __free_page(page);
  1051. atomic_dec(&agp_bridge->current_memory_agp);
  1052. mem->pages[i] = NULL;
  1053. }
  1054. }
  1055. EXPORT_SYMBOL(agp_generic_destroy_pages);
  1056. void agp_generic_destroy_page(struct page *page, int flags)
  1057. {
  1058. if (page == NULL)
  1059. return;
  1060. if (flags & AGP_PAGE_DESTROY_UNMAP)
  1061. unmap_page_from_agp(page);
  1062. if (flags & AGP_PAGE_DESTROY_FREE) {
  1063. put_page(page);
  1064. __free_page(page);
  1065. atomic_dec(&agp_bridge->current_memory_agp);
  1066. }
  1067. }
  1068. EXPORT_SYMBOL(agp_generic_destroy_page);
  1069. /* End Basic Page Allocation Routines */
  1070. /**
  1071. * agp_enable - initialise the agp point-to-point connection.
  1072. *
  1073. * @mode: agp mode register value to configure with.
  1074. */
  1075. void agp_enable(struct agp_bridge_data *bridge, u32 mode)
  1076. {
  1077. if (!bridge)
  1078. return;
  1079. bridge->driver->agp_enable(bridge, mode);
  1080. }
  1081. EXPORT_SYMBOL(agp_enable);
  1082. /* When we remove the global variable agp_bridge from all drivers
  1083. * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
  1084. */
  1085. struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
  1086. {
  1087. if (list_empty(&agp_bridges))
  1088. return NULL;
  1089. return agp_bridge;
  1090. }
  1091. static void ipi_handler(void *null)
  1092. {
  1093. flush_agp_cache();
  1094. }
  1095. void global_cache_flush(void)
  1096. {
  1097. if (on_each_cpu(ipi_handler, NULL, 1) != 0)
  1098. panic(PFX "timed out waiting for the other CPUs!\n");
  1099. }
  1100. EXPORT_SYMBOL(global_cache_flush);
  1101. unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
  1102. dma_addr_t addr, int type)
  1103. {
  1104. /* memory type is ignored in the generic routine */
  1105. if (bridge->driver->masks)
  1106. return addr | bridge->driver->masks[0].mask;
  1107. else
  1108. return addr;
  1109. }
  1110. EXPORT_SYMBOL(agp_generic_mask_memory);
  1111. int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
  1112. int type)
  1113. {
  1114. if (type >= AGP_USER_TYPES)
  1115. return 0;
  1116. return type;
  1117. }
  1118. EXPORT_SYMBOL(agp_generic_type_to_mask_type);
  1119. /*
  1120. * These functions are implemented according to the AGPv3 spec,
  1121. * which covers implementation details that had previously been
  1122. * left open.
  1123. */
  1124. int agp3_generic_fetch_size(void)
  1125. {
  1126. u16 temp_size;
  1127. int i;
  1128. struct aper_size_info_16 *values;
  1129. pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
  1130. values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
  1131. for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
  1132. if (temp_size == values[i].size_value) {
  1133. agp_bridge->previous_size =
  1134. agp_bridge->current_size = (void *) (values + i);
  1135. agp_bridge->aperture_size_idx = i;
  1136. return values[i].size;
  1137. }
  1138. }
  1139. return 0;
  1140. }
  1141. EXPORT_SYMBOL(agp3_generic_fetch_size);
  1142. void agp3_generic_tlbflush(struct agp_memory *mem)
  1143. {
  1144. u32 ctrl;
  1145. pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
  1146. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
  1147. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
  1148. }
  1149. EXPORT_SYMBOL(agp3_generic_tlbflush);
  1150. int agp3_generic_configure(void)
  1151. {
  1152. u32 temp;
  1153. struct aper_size_info_16 *current_size;
  1154. current_size = A_SIZE_16(agp_bridge->current_size);
  1155. agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
  1156. AGP_APERTURE_BAR);
  1157. /* set aperture size */
  1158. pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
  1159. /* set gart pointer */
  1160. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
  1161. /* enable aperture and GTLB */
  1162. pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
  1163. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
  1164. return 0;
  1165. }
  1166. EXPORT_SYMBOL(agp3_generic_configure);
  1167. void agp3_generic_cleanup(void)
  1168. {
  1169. u32 ctrl;
  1170. pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
  1171. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
  1172. }
  1173. EXPORT_SYMBOL(agp3_generic_cleanup);
  1174. const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
  1175. {
  1176. {4096, 1048576, 10,0x000},
  1177. {2048, 524288, 9, 0x800},
  1178. {1024, 262144, 8, 0xc00},
  1179. { 512, 131072, 7, 0xe00},
  1180. { 256, 65536, 6, 0xf00},
  1181. { 128, 32768, 5, 0xf20},
  1182. { 64, 16384, 4, 0xf30},
  1183. { 32, 8192, 3, 0xf38},
  1184. { 16, 4096, 2, 0xf3c},
  1185. { 8, 2048, 1, 0xf3e},
  1186. { 4, 1024, 0, 0xf3f}
  1187. };
  1188. EXPORT_SYMBOL(agp3_generic_sizes);