From 6e6e6c53b7b6745a06a75025a1a89cb4f56d2d9b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pavel=20Jir=C3=A1sek?= <pavel.jirasek@vsb.cz>
Date: Wed, 25 Jan 2017 12:03:58 +0100
Subject: [PATCH] links

---
 .../software/mpi/mpi4py-mpi-for-python.md                     | 2 +-
 docs.it4i/salomon/software/intel-suite/intel-advisor.md       | 4 ++--
 .../intel-suite/intel-trace-analyzer-and-collector.md         | 4 ++--
 docs.it4i/salomon/software/mpi/mpi4py-mpi-for-python.md       | 2 +-
 4 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/docs.it4i/anselm-cluster-documentation/software/mpi/mpi4py-mpi-for-python.md b/docs.it4i/anselm-cluster-documentation/software/mpi/mpi4py-mpi-for-python.md
index 6c79215c0..df186ef65 100644
--- a/docs.it4i/anselm-cluster-documentation/software/mpi/mpi4py-mpi-for-python.md
+++ b/docs.it4i/anselm-cluster-documentation/software/mpi/mpi4py-mpi-for-python.md
@@ -92,4 +92,4 @@ Execute the above code as:
     $ mpiexec -bycore -bind-to-core python hello_world.py
 ```
 
-In this example, we run MPI4Py enabled code on 4 nodes, 16 cores per node (total of 64 processes), each python process is bound to a different core. More examples and documentation can be found on [MPI for Python webpage](https://pythonhosted.org/mpi4py/usrman/index.html).
+In this example, we run MPI4Py enabled code on 4 nodes, 16 cores per node (total of 64 processes), each python process is bound to a different core. More examples and documentation can be found on [MPI for Python webpage](https://pypi.python.org/pypi/mpi4py).
diff --git a/docs.it4i/salomon/software/intel-suite/intel-advisor.md b/docs.it4i/salomon/software/intel-suite/intel-advisor.md
index cf25a765c..3d074032a 100644
--- a/docs.it4i/salomon/software/intel-suite/intel-advisor.md
+++ b/docs.it4i/salomon/software/intel-suite/intel-advisor.md
@@ -27,6 +27,6 @@ In the left pane, you can switch between Vectorization and Threading workflows.
 
 References
 ----------
-1.  [Intel® Advisor 2015 Tutorial: Find Where to Add Parallelism - C++ Sample](https://software.intel.com/en-us/advisorxe_2015_tut_lin_c)
-2.  [Product     page](https://software.intel.com/en-us/intel-advisor-xe)
+1.  [Intel® Advisor 2015 Tutorial: Find Where to Add Parallelism - C++ Sample](https://software.intel.com/en-us/intel-advisor-tutorial-vectorization-windows-cplusplus)
+2.  [Product page](https://software.intel.com/en-us/intel-advisor-xe)
 3.  [Documentation](https://software.intel.com/en-us/intel-advisor-2016-user-guide-linux)
diff --git a/docs.it4i/salomon/software/intel-suite/intel-trace-analyzer-and-collector.md b/docs.it4i/salomon/software/intel-suite/intel-trace-analyzer-and-collector.md
index e88fff56b..62fe24fee 100644
--- a/docs.it4i/salomon/software/intel-suite/intel-trace-analyzer-and-collector.md
+++ b/docs.it4i/salomon/software/intel-suite/intel-trace-analyzer-and-collector.md
@@ -29,7 +29,7 @@ To view and analyze the trace, open ITAC GUI in a [graphical environment](../../
     $ traceanalyzer
 ```
 
-The GUI will launch and you can open the produced *.stf file.
+The GUI will launch and you can open the produced `*`.stf file.
 
 ![](../../../img/Snmekobrazovky20151204v15.35.12.png)
 
@@ -38,5 +38,5 @@ Please refer to Intel documenation about usage of the GUI tool.
 References
 ----------
 1.  [Getting Started with Intel® Trace Analyzer and Collector](https://software.intel.com/en-us/get-started-with-itac-for-linux)
-2.  [Intel® Trace Analyzer and Collector - Documentation](http://Intel®%20Trace%20Analyzer%20and%20Collector%20-%20Documentation)
+2.  [Intel® Trace Analyzer and Collector - Documentation](https://software.intel.com/en-us/intel-trace-analyzer)
 
diff --git a/docs.it4i/salomon/software/mpi/mpi4py-mpi-for-python.md b/docs.it4i/salomon/software/mpi/mpi4py-mpi-for-python.md
index 490b2cfc8..00762481d 100644
--- a/docs.it4i/salomon/software/mpi/mpi4py-mpi-for-python.md
+++ b/docs.it4i/salomon/software/mpi/mpi4py-mpi-for-python.md
@@ -93,4 +93,4 @@ Execute the above code as:
     $ mpiexec --map-by core --bind-to core python hello_world.py
 ```
 
-In this example, we run MPI4Py enabled code on 4 nodes, 24 cores per node (total of 96 processes), each python process is bound to a different core. More examples and documentation can be found on [MPI for Python webpage](https://pythonhosted.org/mpi4py/usrman/index.md).
+In this example, we run MPI4Py enabled code on 4 nodes, 24 cores per node (total of 96 processes), each python process is bound to a different core. More examples and documentation can be found on [MPI for Python webpage](https://pypi.python.org/pypi/mpi4py).
-- 
GitLab