001/* 002 * $Id: GroebnerBaseDistributedHybridMPI.java 5797 2018-03-26 09:48:51Z kredel $ 003 */ 004 005package edu.jas.gb; 006 007 008import java.io.IOException; 009import java.util.ArrayList; 010import java.util.Collections; 011import java.util.List; 012import java.util.ListIterator; 013import java.util.concurrent.atomic.AtomicInteger; 014 015import mpi.Comm; 016import mpi.MPIException; 017 018import org.apache.log4j.Logger; 019 020import edu.jas.kern.MPIEngine; 021import edu.jas.poly.ExpVector; 022import edu.jas.poly.GenPolynomial; 023import edu.jas.structure.RingElem; 024import edu.jas.util.DistHashTableMPI; 025import edu.jas.util.MPIChannel; 026import edu.jas.util.Terminator; 027import edu.jas.util.ThreadPool; 028 029 030/** 031 * Groebner Base distributed hybrid algorithm with MPI. Implements a distributed 032 * memory with multi-core CPUs parallel version of Groebner bases with MPI. 033 * Using pairlist class, distributed multi-threaded tasks do reduction, one 034 * communication channel per remote node. 035 * @param <C> coefficient type 036 * @author Heinz Kredel 037 */ 038 039public class GroebnerBaseDistributedHybridMPI<C extends RingElem<C>> extends GroebnerBaseAbstract<C> { 040 041 042 private static final Logger logger = Logger.getLogger(GroebnerBaseDistributedHybridMPI.class); 043 044 045 public final boolean debug = logger.isDebugEnabled(); 046 047 048 /** 049 * Number of threads to use. 050 */ 051 protected final int threads; 052 053 054 /** 055 * Default number of threads. 056 */ 057 protected static final int DEFAULT_THREADS = 2; 058 059 060 /** 061 * Number of threads per node to use. 062 */ 063 protected final int threadsPerNode; 064 065 066 /** 067 * Default number of threads per compute node. 068 */ 069 protected static final int DEFAULT_THREADS_PER_NODE = 1; 070 071 072 /** 073 * Pool of threads to use. 074 */ 075 //protected final ExecutorService pool; // not for single node tests 076 protected transient final ThreadPool pool; 077 078 079 /* 080 * Underlying MPI engine. 081 */ 082 protected transient final Comm engine; 083 084 085 /** 086 * Message tag for pairs. 087 */ 088 public static final int pairTag = GroebnerBaseDistributedHybridEC.pairTag.intValue(); 089 090 091 /** 092 * Message tag for results. 093 */ 094 public static final int resultTag = GroebnerBaseDistributedHybridEC.resultTag.intValue(); 095 096 097 /** 098 * Message tag for acknowledgments. 099 */ 100 public static final int ackTag = GroebnerBaseDistributedHybridEC.ackTag.intValue(); 101 102 103 /** 104 * Constructor. 105 */ 106 public GroebnerBaseDistributedHybridMPI() throws IOException { 107 this(DEFAULT_THREADS); 108 } 109 110 111 /** 112 * Constructor. 113 * @param threads number of threads to use. 114 */ 115 public GroebnerBaseDistributedHybridMPI(int threads) throws IOException { 116 this(threads, new ThreadPool(threads)); 117 } 118 119 120 /** 121 * Constructor. 122 * @param threads number of threads to use. 123 * @param threadsPerNode threads per node to use. 124 */ 125 public GroebnerBaseDistributedHybridMPI(int threads, int threadsPerNode) throws IOException { 126 this(threads, threadsPerNode, new ThreadPool(threads)); 127 } 128 129 130 /** 131 * Constructor. 132 * @param threads number of threads to use. 133 * @param pool ThreadPool to use. 134 */ 135 public GroebnerBaseDistributedHybridMPI(int threads, ThreadPool pool) throws IOException { 136 this(threads, DEFAULT_THREADS_PER_NODE, pool); 137 } 138 139 140 /** 141 * Constructor. 142 * @param threads number of threads to use. 143 * @param threadsPerNode threads per node to use. 144 * @param pl pair selection strategy 145 */ 146 public GroebnerBaseDistributedHybridMPI(int threads, int threadsPerNode, PairList<C> pl) 147 throws IOException { 148 this(threads, threadsPerNode, new ThreadPool(threads), pl); 149 } 150 151 152 /** 153 * Constructor. 154 * @param threads number of threads to use. 155 * @param threadsPerNode threads per node to use. 156 */ 157 public GroebnerBaseDistributedHybridMPI(int threads, int threadsPerNode, ThreadPool pool) 158 throws IOException { 159 this(threads, threadsPerNode, pool, new OrderedPairlist<C>()); 160 } 161 162 163 /** 164 * Constructor. 165 * @param threads number of threads to use. 166 * @param threadsPerNode threads per node to use. 167 * @param pool ThreadPool to use. 168 * @param pl pair selection strategy 169 */ 170 public GroebnerBaseDistributedHybridMPI(int threads, int threadsPerNode, ThreadPool pool, PairList<C> pl) 171 throws IOException { 172 super(new ReductionPar<C>(), pl); 173 int size = 0; 174 try { 175 engine = MPIEngine.getCommunicator(); 176 size = engine.Size(); 177 } catch (MPIException e) { 178 throw new IOException(e); 179 } 180 if (size < 2) { 181 throw new IllegalArgumentException("Minimal 2 MPI processes required, not " + size); 182 } 183 if (threads != size || pool.getNumber() != size) { 184 throw new IllegalArgumentException("threads != size: " + threads + " != " + size + ", #pool " 185 + pool.getNumber()); 186 } 187 this.threads = threads; 188 this.pool = pool; 189 this.threadsPerNode = threadsPerNode; 190 //logger.info("generated pool: " + pool); 191 } 192 193 194 /** 195 * Cleanup and terminate. 196 */ 197 @Override 198 public void terminate() { 199 if (pool == null) { 200 return; 201 } 202 //pool.terminate(); 203 pool.cancel(); 204 } 205 206 207 /** 208 * Distributed Groebner base. 209 * @param modv number of module variables. 210 * @param F polynomial list. 211 * @return GB(F) a Groebner base of F or null, if a IOException occurs or on 212 * MPI client part. 213 */ 214 public List<GenPolynomial<C>> GB(int modv, List<GenPolynomial<C>> F) { 215 try { 216 if (engine.Rank() == 0) { 217 return GBmaster(modv, F); 218 } 219 } catch (MPIException e) { 220 logger.info("GBmaster: " + e); 221 e.printStackTrace(); 222 return null; 223 } catch (IOException e) { 224 logger.info("GBmaster: " + e); 225 e.printStackTrace(); 226 return null; 227 } 228 pool.terminate(); // not used on clients 229 try { 230 clientPart(0); // only 0 231 } catch (IOException e) { 232 logger.info("clientPart: " + e); 233 e.printStackTrace(); 234 } catch (MPIException e) { 235 logger.info("clientPart: " + e); 236 e.printStackTrace(); 237 } 238 return null; 239 } 240 241 242 /** 243 * Distributed hybrid Groebner base. 244 * @param modv number of module variables. 245 * @param F polynomial list. 246 * @return GB(F) a Groebner base of F or null, if a IOException occurs. 247 */ 248 public List<GenPolynomial<C>> GBmaster(int modv, List<GenPolynomial<C>> F) throws MPIException, 249 IOException { 250 long t = System.currentTimeMillis(); 251 GenPolynomial<C> p; 252 List<GenPolynomial<C>> G = new ArrayList<GenPolynomial<C>>(); 253 PairList<C> pairlist = null; 254 boolean oneInGB = false; 255 //int l = F.size(); 256 int unused = 0; 257 ListIterator<GenPolynomial<C>> it = F.listIterator(); 258 while (it.hasNext()) { 259 p = it.next(); 260 if (p.length() > 0) { 261 p = p.monic(); 262 if (p.isONE()) { 263 oneInGB = true; 264 G.clear(); 265 G.add(p); 266 //return G; must signal termination to others 267 } 268 if (!oneInGB) { 269 G.add(p); 270 } 271 if (pairlist == null) { 272 //pairlist = new OrderedPairlist<C>(modv, p.ring); 273 pairlist = strategy.create(modv, p.ring); 274 if (!p.ring.coFac.isField()) { 275 throw new IllegalArgumentException("coefficients not from a field"); 276 } 277 } 278 // theList not updated here 279 if (p.isONE()) { 280 unused = pairlist.putOne(); 281 } else { 282 unused = pairlist.put(p); 283 } 284 } else { 285 //l--; 286 } 287 } 288 //if (l <= 1) { 289 //return G; must signal termination to others 290 //} 291 logger.info("pairlist " + pairlist + ": " + unused); 292 293 logger.debug("looking for clients"); 294 DistHashTableMPI<Integer, GenPolynomial<C>> theList = new DistHashTableMPI<Integer, GenPolynomial<C>>( 295 engine); 296 theList.init(); 297 298 List<GenPolynomial<C>> al = pairlist.getList(); 299 for (int i = 0; i < al.size(); i++) { 300 // no wait required 301 GenPolynomial<C> nn = theList.put(Integer.valueOf(i), al.get(i)); 302 if (nn != null) { 303 logger.info("double polynomials " + i + ", nn = " + nn + ", al(i) = " + al.get(i)); 304 } 305 } 306 307 Terminator finner = new Terminator((threads - 1) * threadsPerNode); 308 HybridReducerServerMPI<C> R; 309 logger.info("using pool = " + pool); 310 for (int i = 1; i < threads; i++) { 311 MPIChannel chan = new MPIChannel(engine, i); // closed in server 312 R = new HybridReducerServerMPI<C>(i, threadsPerNode, finner, chan, theList, pairlist); 313 pool.addJob(R); 314 //logger.info("server submitted " + R); 315 } 316 logger.info("main loop waiting " + finner); 317 finner.waitDone(); 318 int ps = theList.size(); 319 logger.info("#distributed list = " + ps); 320 // make sure all polynomials arrived: not needed in master 321 G = pairlist.getList(); 322 if (ps != G.size()) { 323 logger.info("#distributed list = " + theList.size() + " #pairlist list = " + G.size()); 324 } 325 for (GenPolynomial<C> q : theList.getValueList()) { 326 if (q != null && !q.isZERO()) { 327 logger.debug("final q = " + q.leadingExpVector()); 328 } 329 } 330 logger.debug("distributed list end"); 331 long time = System.currentTimeMillis(); 332 List<GenPolynomial<C>> Gp; 333 Gp = minimalGB(G); // not jet distributed but threaded 334 time = System.currentTimeMillis() - time; 335 logger.debug("parallel gbmi time = " + time); 336 G = Gp; 337 logger.info("server theList.terminate() " + theList.size()); 338 theList.terminate(); 339 t = System.currentTimeMillis() - t; 340 logger.info("server GB end, time = " + t + ", " + pairlist.toString()); 341 return G; 342 } 343 344 345 /** 346 * GB distributed client. 347 * @param rank of the MPI where the server runs on. 348 * @throws IOException 349 */ 350 public void clientPart(int rank) throws IOException, MPIException { 351 if (rank != 0) { 352 throw new UnsupportedOperationException("only master at rank 0 implemented: " + rank); 353 } 354 Comm engine = MPIEngine.getCommunicator(); 355 356 DistHashTableMPI<Integer, GenPolynomial<C>> theList = new DistHashTableMPI<Integer, GenPolynomial<C>>( 357 engine); 358 theList.init(); 359 360 MPIChannel chan = new MPIChannel(engine, rank); 361 362 ThreadPool pool = new ThreadPool(threadsPerNode); 363 logger.info("client using pool = " + pool); 364 for (int i = 0; i < threadsPerNode; i++) { 365 HybridReducerClientMPI<C> Rr = new HybridReducerClientMPI<C>(chan, theList); // i 366 pool.addJob(Rr); 367 } 368 if (debug) { 369 logger.info("clients submitted"); 370 } 371 pool.terminate(); 372 logger.info("client pool.terminate()"); 373 chan.close(); 374 theList.terminate(); 375 return; 376 } 377 378 379 /** 380 * Minimal ordered groebner basis. 381 * @param Fp a Groebner base. 382 * @return a reduced Groebner base of Fp. 383 */ 384 @SuppressWarnings("unchecked") 385 @Override 386 public List<GenPolynomial<C>> minimalGB(List<GenPolynomial<C>> Fp) { 387 GenPolynomial<C> a; 388 ArrayList<GenPolynomial<C>> G; 389 G = new ArrayList<GenPolynomial<C>>(Fp.size()); 390 ListIterator<GenPolynomial<C>> it = Fp.listIterator(); 391 while (it.hasNext()) { 392 a = it.next(); 393 if (a.length() != 0) { // always true 394 // already monic a = a.monic(); 395 G.add(a); 396 } 397 } 398 if (G.size() <= 1) { 399 return G; 400 } 401 402 ExpVector e; 403 ExpVector f; 404 GenPolynomial<C> p; 405 ArrayList<GenPolynomial<C>> F; 406 F = new ArrayList<GenPolynomial<C>>(G.size()); 407 boolean mt; 408 409 while (G.size() > 0) { 410 a = G.remove(0); 411 e = a.leadingExpVector(); 412 413 it = G.listIterator(); 414 mt = false; 415 while (it.hasNext() && !mt) { 416 p = it.next(); 417 f = p.leadingExpVector(); 418 mt = e.multipleOf(f); 419 } 420 it = F.listIterator(); 421 while (it.hasNext() && !mt) { 422 p = it.next(); 423 f = p.leadingExpVector(); 424 mt = e.multipleOf(f); 425 } 426 if (!mt) { 427 F.add(a); 428 } else { 429 // System.out.println("dropped " + a.length()); 430 } 431 } 432 G = F; 433 if (G.size() <= 1) { 434 return G; 435 } 436 Collections.reverse(G); // important for lex GB 437 438 MiMPIReducerServer<C>[] mirs = (MiMPIReducerServer<C>[]) new MiMPIReducerServer[G.size()]; 439 int i = 0; 440 F = new ArrayList<GenPolynomial<C>>(G.size()); 441 while (G.size() > 0) { 442 a = G.remove(0); 443 // System.out.println("doing " + a.length()); 444 List<GenPolynomial<C>> R = new ArrayList<GenPolynomial<C>>(G.size() + F.size()); 445 R.addAll(G); 446 R.addAll(F); 447 mirs[i] = new MiMPIReducerServer<C>(R, a); 448 pool.addJob(mirs[i]); 449 i++; 450 F.add(a); 451 } 452 G = F; 453 F = new ArrayList<GenPolynomial<C>>(G.size()); 454 for (i = 0; i < mirs.length; i++) { 455 a = mirs[i].getNF(); 456 F.add(a); 457 } 458 return F; 459 } 460 461} 462 463 464/** 465 * Distributed server reducing worker proxy threads. 466 * @param <C> coefficient type 467 */ 468 469class HybridReducerServerMPI<C extends RingElem<C>> implements Runnable { 470 471 472 private static final Logger logger = Logger.getLogger(HybridReducerServerMPI.class); 473 474 475 public final boolean debug = logger.isDebugEnabled(); 476 477 478 private final Terminator finner; 479 480 481 private final MPIChannel pairChannel; 482 483 484 //protected transient final Comm engine; 485 486 487 private final DistHashTableMPI<Integer, GenPolynomial<C>> theList; 488 489 490 private final PairList<C> pairlist; 491 492 493 private final int threadsPerNode; 494 495 496 final int rank; 497 498 499 /** 500 * Message tag for pairs. 501 */ 502 public static final int pairTag = GroebnerBaseDistributedHybridMPI.pairTag; 503 504 505 /** 506 * Constructor. 507 * @param r MPI rank of partner. 508 * @param tpn number of threads per node 509 * @param fin terminator 510 * @param chan MPIChannel 511 * @param dl distributed hash table 512 * @param L ordered pair list 513 */ 514 HybridReducerServerMPI(int r, int tpn, Terminator fin, MPIChannel chan, 515 DistHashTableMPI<Integer, GenPolynomial<C>> dl, PairList<C> L) { 516 rank = r; 517 threadsPerNode = tpn; 518 finner = fin; 519 this.pairChannel = chan; 520 theList = dl; 521 pairlist = L; 522 //logger.info("reducer server created " + this); 523 } 524 525 526 /** 527 * Work loop. 528 * @see java.lang.Runnable#run() 529 */ 530 @Override 531 @SuppressWarnings("unchecked") 532 public void run() { 533 //logger.info("reducer server running with " + engine); 534 // try { 535 // pairChannel = new MPIChannel(engine, rank); //,pairTag 536 // } catch (IOException e) { 537 // e.printStackTrace(); 538 // return; 539 // } catch (MPIException e) { 540 // e.printStackTrace(); 541 // return; 542 // } 543 if (logger.isInfoEnabled()) { 544 logger.info("reducer server running: pairChannel = " + pairChannel); 545 } 546 // record idle remote workers (minus one?) 547 //finner.beIdle(threadsPerNode-1); 548 finner.initIdle(threadsPerNode); 549 AtomicInteger active = new AtomicInteger(0); 550 551 // start receiver 552 HybridReducerReceiverMPI<C> receiver = new HybridReducerReceiverMPI<C>(rank, finner, active, 553 pairChannel, theList, pairlist); 554 receiver.start(); 555 556 Pair<C> pair; 557 //boolean set = false; 558 boolean goon = true; 559 //int polIndex = -1; 560 int red = 0; 561 int sleeps = 0; 562 563 // while more requests 564 while (goon) { 565 // receive request if thread is reported incactive 566 logger.debug("receive request"); 567 Object req = null; 568 try { 569 req = pairChannel.receive(pairTag); 570 //} catch (InterruptedException e) { 571 //goon = false; 572 //e.printStackTrace(); 573 } catch (IOException e) { 574 goon = false; 575 e.printStackTrace(); 576 } catch (MPIException e) { 577 e.printStackTrace(); 578 return; 579 } catch (ClassNotFoundException e) { 580 goon = false; 581 e.printStackTrace(); 582 } 583 logger.debug("received request, req = " + req); 584 if (req == null) { 585 goon = false; 586 break; 587 } 588 if (!(req instanceof GBTransportMessReq)) { 589 goon = false; 590 break; 591 } 592 593 // find pair and manage termination status 594 logger.debug("find pair"); 595 while (!pairlist.hasNext()) { // wait 596 if (!finner.hasJobs() && !pairlist.hasNext()) { 597 goon = false; 598 break; 599 } 600 try { 601 sleeps++; 602 if (sleeps % 3 == 0) { 603 logger.info("waiting for reducers, remaining = " + finner.getJobs()); 604 } 605 Thread.sleep(100); 606 } catch (InterruptedException e) { 607 goon = false; 608 break; 609 } 610 } 611 if (!pairlist.hasNext() && !finner.hasJobs()) { 612 logger.info("termination detection: no pairs and no jobs left"); 613 goon = false; 614 break; //continue; //break? 615 } 616 finner.notIdle(); // before pairlist get!! 617 pair = pairlist.removeNext(); 618 // send pair to client, even if null 619 if (debug) { 620 logger.info("active count = " + active.get()); 621 logger.info("send pair = " + pair); 622 } 623 GBTransportMess msg = null; 624 if (pair != null) { 625 msg = new GBTransportMessPairIndex(pair); 626 } else { 627 msg = new GBTransportMess(); //not End(); at this time 628 // goon ?= false; 629 } 630 try { 631 red++; 632 pairChannel.send(pairTag, msg); 633 @SuppressWarnings("unused") 634 int a = active.getAndIncrement(); 635 } catch (IOException e) { 636 e.printStackTrace(); 637 goon = false; 638 break; 639 } catch (MPIException e) { 640 e.printStackTrace(); 641 goon = false; 642 break; 643 } 644 //logger.debug("#distributed list = " + theList.size()); 645 } 646 logger.info("terminated, send " + red + " reduction pairs"); 647 648 /* 649 * send end mark to clients 650 */ 651 logger.debug("send end"); 652 try { 653 for (int i = 0; i < threadsPerNode; i++) { // -1 654 pairChannel.send(pairTag, new GBTransportMessEnd()); 655 } 656 logger.info("sent end to clients"); 657 // send also end to receiver, no more 658 //pairChannel.send(resultTag, new GBTransportMessEnd(), engine.Rank()); 659 } catch (IOException e) { 660 if (logger.isDebugEnabled()) { 661 e.printStackTrace(); 662 } 663 } catch (MPIException e) { 664 e.printStackTrace(); 665 } 666 int d = active.get(); 667 if (d > 0) { 668 logger.info("remaining active tasks = " + d); 669 } 670 receiver.terminate(); 671 //logger.info("terminated, send " + red + " reduction pairs"); 672 pairChannel.close(); 673 logger.info("redServ pairChannel.close()"); 674 finner.release(); 675 } 676} 677 678 679/** 680 * Distributed server receiving worker thread. 681 * @param <C> coefficient type 682 */ 683 684class HybridReducerReceiverMPI<C extends RingElem<C>> extends Thread { 685 686 687 private static final Logger logger = Logger.getLogger(HybridReducerReceiverMPI.class); 688 689 690 public final boolean debug = logger.isDebugEnabled(); 691 692 693 private final DistHashTableMPI<Integer, GenPolynomial<C>> theList; 694 695 696 private final PairList<C> pairlist; 697 698 699 private final MPIChannel pairChannel; 700 701 702 final int rank; 703 704 705 private final Terminator finner; 706 707 708 //private final int threadsPerNode; 709 710 711 private final AtomicInteger active; 712 713 714 private volatile boolean goon; 715 716 717 /** 718 * Message tag for results. 719 */ 720 public static final int resultTag = GroebnerBaseDistributedHybridMPI.resultTag; 721 722 723 /** 724 * Message tag for acknowledgments. 725 */ 726 public static final int ackTag = GroebnerBaseDistributedHybridMPI.ackTag; 727 728 729 /** 730 * Constructor. 731 * @param r MPI rank of partner. 732 * @param fin terminator 733 * @param a active remote tasks count 734 * @param pc tagged socket channel 735 * @param dl distributed hash table 736 * @param L ordered pair list 737 */ 738 HybridReducerReceiverMPI(int r, Terminator fin, AtomicInteger a, MPIChannel pc, 739 DistHashTableMPI<Integer, GenPolynomial<C>> dl, PairList<C> L) { 740 rank = r; 741 active = a; 742 //threadsPerNode = tpn; 743 finner = fin; 744 pairChannel = pc; 745 theList = dl; 746 pairlist = L; 747 goon = true; 748 //logger.info("reducer server created " + this); 749 } 750 751 752 /** 753 * Work loop. 754 * @see java.lang.Thread#run() 755 */ 756 @Override 757 @SuppressWarnings("unchecked") 758 public void run() { 759 //Pair<C> pair = null; 760 GenPolynomial<C> H = null; 761 int red = 0; 762 int polIndex = -1; 763 //Integer senderId; // obsolete 764 765 // while more requests 766 while (goon) { 767 // receive request 768 logger.debug("receive result"); 769 //senderId = null; 770 Object rh = null; 771 try { 772 rh = pairChannel.receive(resultTag); 773 @SuppressWarnings("unused") 774 int i = active.getAndDecrement(); 775 //} catch (InterruptedException e) { 776 //goon = false; 777 ////e.printStackTrace(); 778 ////?? finner.initIdle(1); 779 //break; 780 } catch (IOException e) { 781 e.printStackTrace(); 782 goon = false; 783 finner.initIdle(1); 784 break; 785 } catch (MPIException e) { 786 e.printStackTrace(); 787 goon = false; 788 finner.initIdle(1); 789 break; 790 } catch (ClassNotFoundException e) { 791 e.printStackTrace(); 792 goon = false; 793 finner.initIdle(1); 794 break; 795 } 796 logger.info("received result"); 797 if (rh == null) { 798 if (this.isInterrupted()) { 799 goon = false; 800 finner.initIdle(1); 801 break; 802 } 803 //finner.initIdle(1); 804 } else if (rh instanceof GBTransportMessEnd) { // should only happen from server 805 logger.info("received GBTransportMessEnd"); 806 goon = false; 807 //?? finner.initIdle(1); 808 break; 809 } else if (rh instanceof GBTransportMessPoly) { 810 // update pair list 811 red++; 812 GBTransportMessPoly<C> mpi = (GBTransportMessPoly<C>) rh; 813 H = mpi.pol; 814 //senderId = mpi.threadId; 815 if (H != null) { 816 if (logger.isInfoEnabled()) { // debug 817 logger.info("H = " + H.leadingExpVector()); 818 } 819 if (!H.isZERO()) { 820 if (H.isONE()) { 821 polIndex = pairlist.putOne(); 822 //GenPolynomial<C> nn = 823 theList.putWait(Integer.valueOf(polIndex), H); 824 //goon = false; must wait for other clients 825 //finner.initIdle(1); 826 //break; 827 } else { 828 polIndex = pairlist.put(H); 829 // use putWait ? but still not all distributed 830 //GenPolynomial<C> nn = 831 theList.putWait(Integer.valueOf(polIndex), H); 832 } 833 } 834 } 835 } 836 // only after recording in pairlist ! 837 finner.initIdle(1); 838 try { 839 pairChannel.send(ackTag, new GBTransportMess()); 840 logger.debug("send acknowledgement"); 841 } catch (IOException e) { 842 e.printStackTrace(); 843 goon = false; 844 break; 845 } catch (MPIException e) { 846 e.printStackTrace(); 847 goon = false; 848 break; 849 } 850 } // end while 851 goon = false; 852 logger.info("terminated, received " + red + " reductions"); 853 } 854 855 856 /** 857 * Terminate. 858 */ 859 public void terminate() { 860 goon = false; 861 try { 862 this.join(); 863 //this.interrupt(); 864 } catch (InterruptedException e) { 865 // unfug Thread.currentThread().interrupt(); 866 } 867 logger.info("terminate end"); 868 } 869 870} 871 872 873/** 874 * Distributed clients reducing worker threads. 875 */ 876 877class HybridReducerClientMPI<C extends RingElem<C>> implements Runnable { 878 879 880 private static final Logger logger = Logger.getLogger(HybridReducerClientMPI.class); 881 882 883 public final boolean debug = logger.isDebugEnabled(); 884 885 886 private final MPIChannel pairChannel; 887 888 889 private final DistHashTableMPI<Integer, GenPolynomial<C>> theList; 890 891 892 private final ReductionPar<C> red; 893 894 895 //private final int threadsPerNode; 896 897 898 /* 899 * Identification number for this thread. 900 */ 901 //public final Integer threadId; // obsolete 902 903 904 /** 905 * Message tag for pairs. 906 */ 907 public static final int pairTag = GroebnerBaseDistributedHybridMPI.pairTag; 908 909 910 /** 911 * Message tag for results. 912 */ 913 public static final int resultTag = GroebnerBaseDistributedHybridMPI.resultTag; 914 915 916 /** 917 * Message tag for acknowledgments. 918 */ 919 public static final int ackTag = GroebnerBaseDistributedHybridMPI.ackTag; 920 921 922 /** 923 * Constructor. 924 * @param tc tagged socket channel 925 * @param dl distributed hash table 926 */ 927 HybridReducerClientMPI(MPIChannel tc, DistHashTableMPI<Integer, GenPolynomial<C>> dl) { 928 //this.threadsPerNode = tpn; 929 pairChannel = tc; 930 //threadId = 100 + tid; // keep distinct from other tags 931 theList = dl; 932 red = new ReductionPar<C>(); 933 } 934 935 936 /** 937 * Work loop. 938 * @see java.lang.Runnable#run() 939 */ 940 @Override 941 @SuppressWarnings("unchecked") 942 public void run() { 943 if (debug) { 944 logger.info("pairChannel = " + pairChannel + " reducer client running"); 945 } 946 Pair<C> pair = null; 947 GenPolynomial<C> pi, pj, ps; 948 GenPolynomial<C> S; 949 GenPolynomial<C> H = null; 950 //boolean set = false; 951 boolean goon = true; 952 boolean doEnd = true; 953 int reduction = 0; 954 //int sleeps = 0; 955 Integer pix, pjx, psx; 956 957 while (goon) { 958 /* protocol: 959 * request pair, process pair, send result, receive acknowledgment 960 */ 961 // pair = (Pair) pairlist.removeNext(); 962 Object req = new GBTransportMessReq(); 963 logger.debug("send request = " + req); 964 try { 965 pairChannel.send(pairTag, req); 966 } catch (IOException e) { 967 goon = false; 968 if (debug) { 969 e.printStackTrace(); 970 } 971 logger.info("receive pair, exception "); 972 break; 973 } catch (MPIException e) { 974 goon = false; 975 if (debug) { 976 e.printStackTrace(); 977 } 978 logger.info("receive pair, exception "); 979 break; 980 } 981 logger.debug("receive pair, goon = " + goon); 982 doEnd = true; 983 Object pp = null; 984 try { 985 pp = pairChannel.receive(pairTag); 986 //} catch (InterruptedException e) { 987 //goon = false; 988 //e.printStackTrace(); 989 } catch (IOException e) { 990 goon = false; 991 if (debug) { 992 e.printStackTrace(); 993 } 994 break; 995 } catch (MPIException e) { 996 goon = false; 997 if (debug) { 998 e.printStackTrace(); 999 } 1000 break; 1001 } catch (ClassNotFoundException e) { 1002 goon = false; 1003 e.printStackTrace(); 1004 } 1005 if (debug) { 1006 logger.info("received pair = " + pp); 1007 } 1008 H = null; 1009 if (pp == null) { // should not happen 1010 continue; 1011 } 1012 if (pp instanceof GBTransportMessEnd) { 1013 goon = false; 1014 //doEnd = false; // bug 1015 continue; 1016 } 1017 if (pp instanceof GBTransportMessPair || pp instanceof GBTransportMessPairIndex) { 1018 pi = pj = ps = null; 1019 if (pp instanceof GBTransportMessPair) { 1020 pair = ((GBTransportMessPair<C>) pp).pair; 1021 if (pair != null) { 1022 pi = pair.pi; 1023 pj = pair.pj; 1024 //logger.debug("pair: pix = " + pair.i 1025 // + ", pjx = " + pair.j); 1026 } 1027 } 1028 if (pp instanceof GBTransportMessPairIndex) { 1029 pix = ((GBTransportMessPairIndex) pp).i; 1030 pjx = ((GBTransportMessPairIndex) pp).j; 1031 psx = ((GBTransportMessPairIndex) pp).s; 1032 pi = theList.getWait(pix); 1033 pj = theList.getWait(pjx); 1034 ps = theList.getWait(psx); 1035 //logger.info("pix = " + pix + ", pjx = " +pjx + ", psx = " +psx); 1036 } 1037 1038 if (pi != null && pj != null) { 1039 S = red.SPolynomial(pi, pj); 1040 //System.out.println("S = " + S); 1041 logger.info("ht(S) = " + S.leadingExpVector()); 1042 if (S.isZERO()) { 1043 // pair.setZero(); does not work in dist 1044 H = S; 1045 } else { 1046 if (debug) { 1047 logger.debug("ht(S) = " + S.leadingExpVector()); 1048 } 1049 H = red.normalform(theList, S); 1050 reduction++; 1051 if (H.isZERO()) { 1052 // pair.setZero(); does not work in dist 1053 } else { 1054 H = H.monic(); 1055 if (logger.isInfoEnabled()) { 1056 logger.info("ht(H) = " + H.leadingExpVector()); 1057 } 1058 } 1059 } 1060 } else { 1061 logger.info("pi = " + pi + ", pj = " + pj + ", ps = " + ps); 1062 } 1063 } 1064 if (pp instanceof GBTransportMess) { 1065 logger.debug("null pair results in null H poly"); 1066 } 1067 1068 // send H or must send null, if not at end 1069 if (debug) { 1070 logger.debug("#distributed list = " + theList.size()); 1071 logger.debug("send H polynomial = " + H); 1072 } 1073 try { 1074 pairChannel.send(resultTag, new GBTransportMessPoly<C>(H)); //,threadId)); 1075 doEnd = false; 1076 } catch (IOException e) { 1077 goon = false; 1078 e.printStackTrace(); 1079 } catch (MPIException e) { 1080 goon = false; 1081 e.printStackTrace(); 1082 } 1083 logger.debug("done send poly message of " + pp); 1084 try { 1085 pp = pairChannel.receive(ackTag); 1086 //} catch (InterruptedException e) { 1087 //goon = false; 1088 //e.printStackTrace(); 1089 } catch (IOException e) { 1090 goon = false; 1091 if (debug) { 1092 e.printStackTrace(); 1093 } 1094 break; 1095 } catch (MPIException e) { 1096 goon = false; 1097 if (debug) { 1098 e.printStackTrace(); 1099 } 1100 break; 1101 } catch (ClassNotFoundException e) { 1102 goon = false; 1103 e.printStackTrace(); 1104 } 1105 if (!(pp instanceof GBTransportMess)) { 1106 logger.error("invalid acknowledgement " + pp); 1107 } 1108 logger.debug("received acknowledgment " + pp); 1109 } 1110 logger.info("terminated, done " + reduction + " reductions"); 1111 if (doEnd) { 1112 try { 1113 pairChannel.send(resultTag, new GBTransportMessEnd()); 1114 } catch (IOException e) { 1115 //e.printStackTrace(); 1116 } catch (MPIException e) { 1117 //e.printStackTrace(); 1118 } 1119 logger.info("terminated, send done"); 1120 } 1121 } 1122}